]> Git Repo - J-linux.git/blob - drivers/net/tun.c
net: dsa: bcm_sf2: support BCM4908's integrated switch
[J-linux.git] / drivers / net / tun.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  TUN - Universal TUN/TAP device driver.
4  *  Copyright (C) 1999-2002 Maxim Krasnyansky <[email protected]>
5  *
6  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
7  */
8
9 /*
10  *  Changes:
11  *
12  *  Mike Kershaw <[email protected]> 2005/08/14
13  *    Add TUNSETLINK ioctl to set the link encapsulation
14  *
15  *  Mark Smith <[email protected]>
16  *    Use eth_random_addr() for tap MAC address.
17  *
18  *  Harald Roelle <[email protected]>  2004/04/20
19  *    Fixes in packet dropping, queue length setting and queue wakeup.
20  *    Increased default tx queue length.
21  *    Added ethtool API.
22  *    Minor cleanups
23  *
24  *  Daniel Podlejski <[email protected]>
25  *    Modifications for 2.3.99-pre5 kernel.
26  */
27
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
30 #define DRV_NAME        "tun"
31 #define DRV_VERSION     "1.6"
32 #define DRV_DESCRIPTION "Universal TUN/TAP device driver"
33 #define DRV_COPYRIGHT   "(C) 1999-2004 Max Krasnyansky <[email protected]>"
34
35 #include <linux/module.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/sched/signal.h>
39 #include <linux/major.h>
40 #include <linux/slab.h>
41 #include <linux/poll.h>
42 #include <linux/fcntl.h>
43 #include <linux/init.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/miscdevice.h>
48 #include <linux/ethtool.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/compat.h>
51 #include <linux/if.h>
52 #include <linux/if_arp.h>
53 #include <linux/if_ether.h>
54 #include <linux/if_tun.h>
55 #include <linux/if_vlan.h>
56 #include <linux/crc32.h>
57 #include <linux/nsproxy.h>
58 #include <linux/virtio_net.h>
59 #include <linux/rcupdate.h>
60 #include <net/net_namespace.h>
61 #include <net/netns/generic.h>
62 #include <net/rtnetlink.h>
63 #include <net/sock.h>
64 #include <net/xdp.h>
65 #include <net/ip_tunnels.h>
66 #include <linux/seq_file.h>
67 #include <linux/uio.h>
68 #include <linux/skb_array.h>
69 #include <linux/bpf.h>
70 #include <linux/bpf_trace.h>
71 #include <linux/mutex.h>
72
73 #include <linux/uaccess.h>
74 #include <linux/proc_fs.h>
75
76 static void tun_default_link_ksettings(struct net_device *dev,
77                                        struct ethtool_link_ksettings *cmd);
78
79 #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
80
81 /* TUN device flags */
82
83 /* IFF_ATTACH_QUEUE is never stored in device flags,
84  * overload it to mean fasync when stored there.
85  */
86 #define TUN_FASYNC      IFF_ATTACH_QUEUE
87 /* High bits in flags field are unused. */
88 #define TUN_VNET_LE     0x80000000
89 #define TUN_VNET_BE     0x40000000
90
91 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
92                       IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
93
94 #define GOODCOPY_LEN 128
95
96 #define FLT_EXACT_COUNT 8
97 struct tap_filter {
98         unsigned int    count;    /* Number of addrs. Zero means disabled */
99         u32             mask[2];  /* Mask of the hashed addrs */
100         unsigned char   addr[FLT_EXACT_COUNT][ETH_ALEN];
101 };
102
103 /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
104  * to max number of VCPUs in guest. */
105 #define MAX_TAP_QUEUES 256
106 #define MAX_TAP_FLOWS  4096
107
108 #define TUN_FLOW_EXPIRE (3 * HZ)
109
110 /* A tun_file connects an open character device to a tuntap netdevice. It
111  * also contains all socket related structures (except sock_fprog and tap_filter)
112  * to serve as one transmit queue for tuntap device. The sock_fprog and
113  * tap_filter were kept in tun_struct since they were used for filtering for the
114  * netdevice not for a specific queue (at least I didn't see the requirement for
115  * this).
116  *
117  * RCU usage:
118  * The tun_file and tun_struct are loosely coupled, the pointer from one to the
119  * other can only be read while rcu_read_lock or rtnl_lock is held.
120  */
121 struct tun_file {
122         struct sock sk;
123         struct socket socket;
124         struct tun_struct __rcu *tun;
125         struct fasync_struct *fasync;
126         /* only used for fasnyc */
127         unsigned int flags;
128         union {
129                 u16 queue_index;
130                 unsigned int ifindex;
131         };
132         struct napi_struct napi;
133         bool napi_enabled;
134         bool napi_frags_enabled;
135         struct mutex napi_mutex;        /* Protects access to the above napi */
136         struct list_head next;
137         struct tun_struct *detached;
138         struct ptr_ring tx_ring;
139         struct xdp_rxq_info xdp_rxq;
140 };
141
142 struct tun_page {
143         struct page *page;
144         int count;
145 };
146
147 struct tun_flow_entry {
148         struct hlist_node hash_link;
149         struct rcu_head rcu;
150         struct tun_struct *tun;
151
152         u32 rxhash;
153         u32 rps_rxhash;
154         int queue_index;
155         unsigned long updated ____cacheline_aligned_in_smp;
156 };
157
158 #define TUN_NUM_FLOW_ENTRIES 1024
159 #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
160
161 struct tun_prog {
162         struct rcu_head rcu;
163         struct bpf_prog *prog;
164 };
165
166 /* Since the socket were moved to tun_file, to preserve the behavior of persist
167  * device, socket filter, sndbuf and vnet header size were restore when the
168  * file were attached to a persist device.
169  */
170 struct tun_struct {
171         struct tun_file __rcu   *tfiles[MAX_TAP_QUEUES];
172         unsigned int            numqueues;
173         unsigned int            flags;
174         kuid_t                  owner;
175         kgid_t                  group;
176
177         struct net_device       *dev;
178         netdev_features_t       set_features;
179 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
180                           NETIF_F_TSO6)
181
182         int                     align;
183         int                     vnet_hdr_sz;
184         int                     sndbuf;
185         struct tap_filter       txflt;
186         struct sock_fprog       fprog;
187         /* protected by rtnl lock */
188         bool                    filter_attached;
189         u32                     msg_enable;
190         spinlock_t lock;
191         struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
192         struct timer_list flow_gc_timer;
193         unsigned long ageing_time;
194         unsigned int numdisabled;
195         struct list_head disabled;
196         void *security;
197         u32 flow_count;
198         u32 rx_batched;
199         atomic_long_t rx_frame_errors;
200         struct bpf_prog __rcu *xdp_prog;
201         struct tun_prog __rcu *steering_prog;
202         struct tun_prog __rcu *filter_prog;
203         struct ethtool_link_ksettings link_ksettings;
204 };
205
206 struct veth {
207         __be16 h_vlan_proto;
208         __be16 h_vlan_TCI;
209 };
210
211 static int tun_napi_receive(struct napi_struct *napi, int budget)
212 {
213         struct tun_file *tfile = container_of(napi, struct tun_file, napi);
214         struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
215         struct sk_buff_head process_queue;
216         struct sk_buff *skb;
217         int received = 0;
218
219         __skb_queue_head_init(&process_queue);
220
221         spin_lock(&queue->lock);
222         skb_queue_splice_tail_init(queue, &process_queue);
223         spin_unlock(&queue->lock);
224
225         while (received < budget && (skb = __skb_dequeue(&process_queue))) {
226                 napi_gro_receive(napi, skb);
227                 ++received;
228         }
229
230         if (!skb_queue_empty(&process_queue)) {
231                 spin_lock(&queue->lock);
232                 skb_queue_splice(&process_queue, queue);
233                 spin_unlock(&queue->lock);
234         }
235
236         return received;
237 }
238
239 static int tun_napi_poll(struct napi_struct *napi, int budget)
240 {
241         unsigned int received;
242
243         received = tun_napi_receive(napi, budget);
244
245         if (received < budget)
246                 napi_complete_done(napi, received);
247
248         return received;
249 }
250
251 static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
252                           bool napi_en, bool napi_frags)
253 {
254         tfile->napi_enabled = napi_en;
255         tfile->napi_frags_enabled = napi_en && napi_frags;
256         if (napi_en) {
257                 netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
258                                   NAPI_POLL_WEIGHT);
259                 napi_enable(&tfile->napi);
260         }
261 }
262
263 static void tun_napi_disable(struct tun_file *tfile)
264 {
265         if (tfile->napi_enabled)
266                 napi_disable(&tfile->napi);
267 }
268
269 static void tun_napi_del(struct tun_file *tfile)
270 {
271         if (tfile->napi_enabled)
272                 netif_napi_del(&tfile->napi);
273 }
274
275 static bool tun_napi_frags_enabled(const struct tun_file *tfile)
276 {
277         return tfile->napi_frags_enabled;
278 }
279
280 #ifdef CONFIG_TUN_VNET_CROSS_LE
281 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
282 {
283         return tun->flags & TUN_VNET_BE ? false :
284                 virtio_legacy_is_little_endian();
285 }
286
287 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
288 {
289         int be = !!(tun->flags & TUN_VNET_BE);
290
291         if (put_user(be, argp))
292                 return -EFAULT;
293
294         return 0;
295 }
296
297 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
298 {
299         int be;
300
301         if (get_user(be, argp))
302                 return -EFAULT;
303
304         if (be)
305                 tun->flags |= TUN_VNET_BE;
306         else
307                 tun->flags &= ~TUN_VNET_BE;
308
309         return 0;
310 }
311 #else
312 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
313 {
314         return virtio_legacy_is_little_endian();
315 }
316
317 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
318 {
319         return -EINVAL;
320 }
321
322 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
323 {
324         return -EINVAL;
325 }
326 #endif /* CONFIG_TUN_VNET_CROSS_LE */
327
328 static inline bool tun_is_little_endian(struct tun_struct *tun)
329 {
330         return tun->flags & TUN_VNET_LE ||
331                 tun_legacy_is_little_endian(tun);
332 }
333
334 static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
335 {
336         return __virtio16_to_cpu(tun_is_little_endian(tun), val);
337 }
338
339 static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
340 {
341         return __cpu_to_virtio16(tun_is_little_endian(tun), val);
342 }
343
344 static inline u32 tun_hashfn(u32 rxhash)
345 {
346         return rxhash & TUN_MASK_FLOW_ENTRIES;
347 }
348
349 static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
350 {
351         struct tun_flow_entry *e;
352
353         hlist_for_each_entry_rcu(e, head, hash_link) {
354                 if (e->rxhash == rxhash)
355                         return e;
356         }
357         return NULL;
358 }
359
360 static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
361                                               struct hlist_head *head,
362                                               u32 rxhash, u16 queue_index)
363 {
364         struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
365
366         if (e) {
367                 netif_info(tun, tx_queued, tun->dev,
368                            "create flow: hash %u index %u\n",
369                            rxhash, queue_index);
370                 e->updated = jiffies;
371                 e->rxhash = rxhash;
372                 e->rps_rxhash = 0;
373                 e->queue_index = queue_index;
374                 e->tun = tun;
375                 hlist_add_head_rcu(&e->hash_link, head);
376                 ++tun->flow_count;
377         }
378         return e;
379 }
380
381 static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
382 {
383         netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n",
384                    e->rxhash, e->queue_index);
385         hlist_del_rcu(&e->hash_link);
386         kfree_rcu(e, rcu);
387         --tun->flow_count;
388 }
389
390 static void tun_flow_flush(struct tun_struct *tun)
391 {
392         int i;
393
394         spin_lock_bh(&tun->lock);
395         for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
396                 struct tun_flow_entry *e;
397                 struct hlist_node *n;
398
399                 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
400                         tun_flow_delete(tun, e);
401         }
402         spin_unlock_bh(&tun->lock);
403 }
404
405 static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
406 {
407         int i;
408
409         spin_lock_bh(&tun->lock);
410         for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
411                 struct tun_flow_entry *e;
412                 struct hlist_node *n;
413
414                 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
415                         if (e->queue_index == queue_index)
416                                 tun_flow_delete(tun, e);
417                 }
418         }
419         spin_unlock_bh(&tun->lock);
420 }
421
422 static void tun_flow_cleanup(struct timer_list *t)
423 {
424         struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
425         unsigned long delay = tun->ageing_time;
426         unsigned long next_timer = jiffies + delay;
427         unsigned long count = 0;
428         int i;
429
430         spin_lock(&tun->lock);
431         for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
432                 struct tun_flow_entry *e;
433                 struct hlist_node *n;
434
435                 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
436                         unsigned long this_timer;
437
438                         this_timer = e->updated + delay;
439                         if (time_before_eq(this_timer, jiffies)) {
440                                 tun_flow_delete(tun, e);
441                                 continue;
442                         }
443                         count++;
444                         if (time_before(this_timer, next_timer))
445                                 next_timer = this_timer;
446                 }
447         }
448
449         if (count)
450                 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
451         spin_unlock(&tun->lock);
452 }
453
454 static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
455                             struct tun_file *tfile)
456 {
457         struct hlist_head *head;
458         struct tun_flow_entry *e;
459         unsigned long delay = tun->ageing_time;
460         u16 queue_index = tfile->queue_index;
461
462         head = &tun->flows[tun_hashfn(rxhash)];
463
464         rcu_read_lock();
465
466         e = tun_flow_find(head, rxhash);
467         if (likely(e)) {
468                 /* TODO: keep queueing to old queue until it's empty? */
469                 if (READ_ONCE(e->queue_index) != queue_index)
470                         WRITE_ONCE(e->queue_index, queue_index);
471                 if (e->updated != jiffies)
472                         e->updated = jiffies;
473                 sock_rps_record_flow_hash(e->rps_rxhash);
474         } else {
475                 spin_lock_bh(&tun->lock);
476                 if (!tun_flow_find(head, rxhash) &&
477                     tun->flow_count < MAX_TAP_FLOWS)
478                         tun_flow_create(tun, head, rxhash, queue_index);
479
480                 if (!timer_pending(&tun->flow_gc_timer))
481                         mod_timer(&tun->flow_gc_timer,
482                                   round_jiffies_up(jiffies + delay));
483                 spin_unlock_bh(&tun->lock);
484         }
485
486         rcu_read_unlock();
487 }
488
489 /* Save the hash received in the stack receive path and update the
490  * flow_hash table accordingly.
491  */
492 static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
493 {
494         if (unlikely(e->rps_rxhash != hash))
495                 e->rps_rxhash = hash;
496 }
497
498 /* We try to identify a flow through its rxhash. The reason that
499  * we do not check rxq no. is because some cards(e.g 82599), chooses
500  * the rxq based on the txq where the last packet of the flow comes. As
501  * the userspace application move between processors, we may get a
502  * different rxq no. here.
503  */
504 static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
505 {
506         struct tun_flow_entry *e;
507         u32 txq = 0;
508         u32 numqueues = 0;
509
510         numqueues = READ_ONCE(tun->numqueues);
511
512         txq = __skb_get_hash_symmetric(skb);
513         e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
514         if (e) {
515                 tun_flow_save_rps_rxhash(e, txq);
516                 txq = e->queue_index;
517         } else {
518                 /* use multiply and shift instead of expensive divide */
519                 txq = ((u64)txq * numqueues) >> 32;
520         }
521
522         return txq;
523 }
524
525 static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
526 {
527         struct tun_prog *prog;
528         u32 numqueues;
529         u16 ret = 0;
530
531         numqueues = READ_ONCE(tun->numqueues);
532         if (!numqueues)
533                 return 0;
534
535         prog = rcu_dereference(tun->steering_prog);
536         if (prog)
537                 ret = bpf_prog_run_clear_cb(prog->prog, skb);
538
539         return ret % numqueues;
540 }
541
542 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
543                             struct net_device *sb_dev)
544 {
545         struct tun_struct *tun = netdev_priv(dev);
546         u16 ret;
547
548         rcu_read_lock();
549         if (rcu_dereference(tun->steering_prog))
550                 ret = tun_ebpf_select_queue(tun, skb);
551         else
552                 ret = tun_automq_select_queue(tun, skb);
553         rcu_read_unlock();
554
555         return ret;
556 }
557
558 static inline bool tun_not_capable(struct tun_struct *tun)
559 {
560         const struct cred *cred = current_cred();
561         struct net *net = dev_net(tun->dev);
562
563         return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
564                   (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
565                 !ns_capable(net->user_ns, CAP_NET_ADMIN);
566 }
567
568 static void tun_set_real_num_queues(struct tun_struct *tun)
569 {
570         netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
571         netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
572 }
573
574 static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
575 {
576         tfile->detached = tun;
577         list_add_tail(&tfile->next, &tun->disabled);
578         ++tun->numdisabled;
579 }
580
581 static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
582 {
583         struct tun_struct *tun = tfile->detached;
584
585         tfile->detached = NULL;
586         list_del_init(&tfile->next);
587         --tun->numdisabled;
588         return tun;
589 }
590
591 void tun_ptr_free(void *ptr)
592 {
593         if (!ptr)
594                 return;
595         if (tun_is_xdp_frame(ptr)) {
596                 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
597
598                 xdp_return_frame(xdpf);
599         } else {
600                 __skb_array_destroy_skb(ptr);
601         }
602 }
603 EXPORT_SYMBOL_GPL(tun_ptr_free);
604
605 static void tun_queue_purge(struct tun_file *tfile)
606 {
607         void *ptr;
608
609         while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
610                 tun_ptr_free(ptr);
611
612         skb_queue_purge(&tfile->sk.sk_write_queue);
613         skb_queue_purge(&tfile->sk.sk_error_queue);
614 }
615
616 static void __tun_detach(struct tun_file *tfile, bool clean)
617 {
618         struct tun_file *ntfile;
619         struct tun_struct *tun;
620
621         tun = rtnl_dereference(tfile->tun);
622
623         if (tun && clean) {
624                 tun_napi_disable(tfile);
625                 tun_napi_del(tfile);
626         }
627
628         if (tun && !tfile->detached) {
629                 u16 index = tfile->queue_index;
630                 BUG_ON(index >= tun->numqueues);
631
632                 rcu_assign_pointer(tun->tfiles[index],
633                                    tun->tfiles[tun->numqueues - 1]);
634                 ntfile = rtnl_dereference(tun->tfiles[index]);
635                 ntfile->queue_index = index;
636                 rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
637                                    NULL);
638
639                 --tun->numqueues;
640                 if (clean) {
641                         RCU_INIT_POINTER(tfile->tun, NULL);
642                         sock_put(&tfile->sk);
643                 } else
644                         tun_disable_queue(tun, tfile);
645
646                 synchronize_net();
647                 tun_flow_delete_by_queue(tun, tun->numqueues + 1);
648                 /* Drop read queue */
649                 tun_queue_purge(tfile);
650                 tun_set_real_num_queues(tun);
651         } else if (tfile->detached && clean) {
652                 tun = tun_enable_queue(tfile);
653                 sock_put(&tfile->sk);
654         }
655
656         if (clean) {
657                 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
658                         netif_carrier_off(tun->dev);
659
660                         if (!(tun->flags & IFF_PERSIST) &&
661                             tun->dev->reg_state == NETREG_REGISTERED)
662                                 unregister_netdevice(tun->dev);
663                 }
664                 if (tun)
665                         xdp_rxq_info_unreg(&tfile->xdp_rxq);
666                 ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
667                 sock_put(&tfile->sk);
668         }
669 }
670
671 static void tun_detach(struct tun_file *tfile, bool clean)
672 {
673         struct tun_struct *tun;
674         struct net_device *dev;
675
676         rtnl_lock();
677         tun = rtnl_dereference(tfile->tun);
678         dev = tun ? tun->dev : NULL;
679         __tun_detach(tfile, clean);
680         if (dev)
681                 netdev_state_change(dev);
682         rtnl_unlock();
683 }
684
685 static void tun_detach_all(struct net_device *dev)
686 {
687         struct tun_struct *tun = netdev_priv(dev);
688         struct tun_file *tfile, *tmp;
689         int i, n = tun->numqueues;
690
691         for (i = 0; i < n; i++) {
692                 tfile = rtnl_dereference(tun->tfiles[i]);
693                 BUG_ON(!tfile);
694                 tun_napi_disable(tfile);
695                 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
696                 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
697                 RCU_INIT_POINTER(tfile->tun, NULL);
698                 --tun->numqueues;
699         }
700         list_for_each_entry(tfile, &tun->disabled, next) {
701                 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
702                 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
703                 RCU_INIT_POINTER(tfile->tun, NULL);
704         }
705         BUG_ON(tun->numqueues != 0);
706
707         synchronize_net();
708         for (i = 0; i < n; i++) {
709                 tfile = rtnl_dereference(tun->tfiles[i]);
710                 tun_napi_del(tfile);
711                 /* Drop read queue */
712                 tun_queue_purge(tfile);
713                 xdp_rxq_info_unreg(&tfile->xdp_rxq);
714                 sock_put(&tfile->sk);
715         }
716         list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
717                 tun_enable_queue(tfile);
718                 tun_queue_purge(tfile);
719                 xdp_rxq_info_unreg(&tfile->xdp_rxq);
720                 sock_put(&tfile->sk);
721         }
722         BUG_ON(tun->numdisabled != 0);
723
724         if (tun->flags & IFF_PERSIST)
725                 module_put(THIS_MODULE);
726 }
727
728 static int tun_attach(struct tun_struct *tun, struct file *file,
729                       bool skip_filter, bool napi, bool napi_frags,
730                       bool publish_tun)
731 {
732         struct tun_file *tfile = file->private_data;
733         struct net_device *dev = tun->dev;
734         int err;
735
736         err = security_tun_dev_attach(tfile->socket.sk, tun->security);
737         if (err < 0)
738                 goto out;
739
740         err = -EINVAL;
741         if (rtnl_dereference(tfile->tun) && !tfile->detached)
742                 goto out;
743
744         err = -EBUSY;
745         if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
746                 goto out;
747
748         err = -E2BIG;
749         if (!tfile->detached &&
750             tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
751                 goto out;
752
753         err = 0;
754
755         /* Re-attach the filter to persist device */
756         if (!skip_filter && (tun->filter_attached == true)) {
757                 lock_sock(tfile->socket.sk);
758                 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
759                 release_sock(tfile->socket.sk);
760                 if (!err)
761                         goto out;
762         }
763
764         if (!tfile->detached &&
765             ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
766                             GFP_KERNEL, tun_ptr_free)) {
767                 err = -ENOMEM;
768                 goto out;
769         }
770
771         tfile->queue_index = tun->numqueues;
772         tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
773
774         if (tfile->detached) {
775                 /* Re-attach detached tfile, updating XDP queue_index */
776                 WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
777
778                 if (tfile->xdp_rxq.queue_index    != tfile->queue_index)
779                         tfile->xdp_rxq.queue_index = tfile->queue_index;
780         } else {
781                 /* Setup XDP RX-queue info, for new tfile getting attached */
782                 err = xdp_rxq_info_reg(&tfile->xdp_rxq,
783                                        tun->dev, tfile->queue_index, 0);
784                 if (err < 0)
785                         goto out;
786                 err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
787                                                  MEM_TYPE_PAGE_SHARED, NULL);
788                 if (err < 0) {
789                         xdp_rxq_info_unreg(&tfile->xdp_rxq);
790                         goto out;
791                 }
792                 err = 0;
793         }
794
795         if (tfile->detached) {
796                 tun_enable_queue(tfile);
797         } else {
798                 sock_hold(&tfile->sk);
799                 tun_napi_init(tun, tfile, napi, napi_frags);
800         }
801
802         if (rtnl_dereference(tun->xdp_prog))
803                 sock_set_flag(&tfile->sk, SOCK_XDP);
804
805         /* device is allowed to go away first, so no need to hold extra
806          * refcnt.
807          */
808
809         /* Publish tfile->tun and tun->tfiles only after we've fully
810          * initialized tfile; otherwise we risk using half-initialized
811          * object.
812          */
813         if (publish_tun)
814                 rcu_assign_pointer(tfile->tun, tun);
815         rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
816         tun->numqueues++;
817         tun_set_real_num_queues(tun);
818 out:
819         return err;
820 }
821
822 static struct tun_struct *tun_get(struct tun_file *tfile)
823 {
824         struct tun_struct *tun;
825
826         rcu_read_lock();
827         tun = rcu_dereference(tfile->tun);
828         if (tun)
829                 dev_hold(tun->dev);
830         rcu_read_unlock();
831
832         return tun;
833 }
834
835 static void tun_put(struct tun_struct *tun)
836 {
837         dev_put(tun->dev);
838 }
839
840 /* TAP filtering */
841 static void addr_hash_set(u32 *mask, const u8 *addr)
842 {
843         int n = ether_crc(ETH_ALEN, addr) >> 26;
844         mask[n >> 5] |= (1 << (n & 31));
845 }
846
847 static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
848 {
849         int n = ether_crc(ETH_ALEN, addr) >> 26;
850         return mask[n >> 5] & (1 << (n & 31));
851 }
852
853 static int update_filter(struct tap_filter *filter, void __user *arg)
854 {
855         struct { u8 u[ETH_ALEN]; } *addr;
856         struct tun_filter uf;
857         int err, alen, n, nexact;
858
859         if (copy_from_user(&uf, arg, sizeof(uf)))
860                 return -EFAULT;
861
862         if (!uf.count) {
863                 /* Disabled */
864                 filter->count = 0;
865                 return 0;
866         }
867
868         alen = ETH_ALEN * uf.count;
869         addr = memdup_user(arg + sizeof(uf), alen);
870         if (IS_ERR(addr))
871                 return PTR_ERR(addr);
872
873         /* The filter is updated without holding any locks. Which is
874          * perfectly safe. We disable it first and in the worst
875          * case we'll accept a few undesired packets. */
876         filter->count = 0;
877         wmb();
878
879         /* Use first set of addresses as an exact filter */
880         for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
881                 memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
882
883         nexact = n;
884
885         /* Remaining multicast addresses are hashed,
886          * unicast will leave the filter disabled. */
887         memset(filter->mask, 0, sizeof(filter->mask));
888         for (; n < uf.count; n++) {
889                 if (!is_multicast_ether_addr(addr[n].u)) {
890                         err = 0; /* no filter */
891                         goto free_addr;
892                 }
893                 addr_hash_set(filter->mask, addr[n].u);
894         }
895
896         /* For ALLMULTI just set the mask to all ones.
897          * This overrides the mask populated above. */
898         if ((uf.flags & TUN_FLT_ALLMULTI))
899                 memset(filter->mask, ~0, sizeof(filter->mask));
900
901         /* Now enable the filter */
902         wmb();
903         filter->count = nexact;
904
905         /* Return the number of exact filters */
906         err = nexact;
907 free_addr:
908         kfree(addr);
909         return err;
910 }
911
912 /* Returns: 0 - drop, !=0 - accept */
913 static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
914 {
915         /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
916          * at this point. */
917         struct ethhdr *eh = (struct ethhdr *) skb->data;
918         int i;
919
920         /* Exact match */
921         for (i = 0; i < filter->count; i++)
922                 if (ether_addr_equal(eh->h_dest, filter->addr[i]))
923                         return 1;
924
925         /* Inexact match (multicast only) */
926         if (is_multicast_ether_addr(eh->h_dest))
927                 return addr_hash_test(filter->mask, eh->h_dest);
928
929         return 0;
930 }
931
932 /*
933  * Checks whether the packet is accepted or not.
934  * Returns: 0 - drop, !=0 - accept
935  */
936 static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
937 {
938         if (!filter->count)
939                 return 1;
940
941         return run_filter(filter, skb);
942 }
943
944 /* Network device part of the driver */
945
946 static const struct ethtool_ops tun_ethtool_ops;
947
948 /* Net device detach from fd. */
949 static void tun_net_uninit(struct net_device *dev)
950 {
951         tun_detach_all(dev);
952 }
953
954 /* Net device open. */
955 static int tun_net_open(struct net_device *dev)
956 {
957         netif_tx_start_all_queues(dev);
958
959         return 0;
960 }
961
962 /* Net device close. */
963 static int tun_net_close(struct net_device *dev)
964 {
965         netif_tx_stop_all_queues(dev);
966         return 0;
967 }
968
969 /* Net device start xmit */
970 static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
971 {
972 #ifdef CONFIG_RPS
973         if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
974                 /* Select queue was not called for the skbuff, so we extract the
975                  * RPS hash and save it into the flow_table here.
976                  */
977                 struct tun_flow_entry *e;
978                 __u32 rxhash;
979
980                 rxhash = __skb_get_hash_symmetric(skb);
981                 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
982                 if (e)
983                         tun_flow_save_rps_rxhash(e, rxhash);
984         }
985 #endif
986 }
987
988 static unsigned int run_ebpf_filter(struct tun_struct *tun,
989                                     struct sk_buff *skb,
990                                     int len)
991 {
992         struct tun_prog *prog = rcu_dereference(tun->filter_prog);
993
994         if (prog)
995                 len = bpf_prog_run_clear_cb(prog->prog, skb);
996
997         return len;
998 }
999
1000 /* Net device start xmit */
1001 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
1002 {
1003         struct tun_struct *tun = netdev_priv(dev);
1004         int txq = skb->queue_mapping;
1005         struct tun_file *tfile;
1006         int len = skb->len;
1007
1008         rcu_read_lock();
1009         tfile = rcu_dereference(tun->tfiles[txq]);
1010
1011         /* Drop packet if interface is not attached */
1012         if (!tfile)
1013                 goto drop;
1014
1015         if (!rcu_dereference(tun->steering_prog))
1016                 tun_automq_xmit(tun, skb);
1017
1018         netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len);
1019
1020         /* Drop if the filter does not like it.
1021          * This is a noop if the filter is disabled.
1022          * Filter can be enabled only for the TAP devices. */
1023         if (!check_filter(&tun->txflt, skb))
1024                 goto drop;
1025
1026         if (tfile->socket.sk->sk_filter &&
1027             sk_filter(tfile->socket.sk, skb))
1028                 goto drop;
1029
1030         len = run_ebpf_filter(tun, skb, len);
1031         if (len == 0 || pskb_trim(skb, len))
1032                 goto drop;
1033
1034         if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
1035                 goto drop;
1036
1037         skb_tx_timestamp(skb);
1038
1039         /* Orphan the skb - required as we might hang on to it
1040          * for indefinite time.
1041          */
1042         skb_orphan(skb);
1043
1044         nf_reset_ct(skb);
1045
1046         if (ptr_ring_produce(&tfile->tx_ring, skb))
1047                 goto drop;
1048
1049         /* Notify and wake up reader process */
1050         if (tfile->flags & TUN_FASYNC)
1051                 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1052         tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1053
1054         rcu_read_unlock();
1055         return NETDEV_TX_OK;
1056
1057 drop:
1058         atomic_long_inc(&dev->tx_dropped);
1059         skb_tx_error(skb);
1060         kfree_skb(skb);
1061         rcu_read_unlock();
1062         return NET_XMIT_DROP;
1063 }
1064
1065 static void tun_net_mclist(struct net_device *dev)
1066 {
1067         /*
1068          * This callback is supposed to deal with mc filter in
1069          * _rx_ path and has nothing to do with the _tx_ path.
1070          * In rx path we always accept everything userspace gives us.
1071          */
1072 }
1073
1074 static netdev_features_t tun_net_fix_features(struct net_device *dev,
1075         netdev_features_t features)
1076 {
1077         struct tun_struct *tun = netdev_priv(dev);
1078
1079         return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
1080 }
1081
1082 static void tun_set_headroom(struct net_device *dev, int new_hr)
1083 {
1084         struct tun_struct *tun = netdev_priv(dev);
1085
1086         if (new_hr < NET_SKB_PAD)
1087                 new_hr = NET_SKB_PAD;
1088
1089         tun->align = new_hr;
1090 }
1091
1092 static void
1093 tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1094 {
1095         struct tun_struct *tun = netdev_priv(dev);
1096
1097         dev_get_tstats64(dev, stats);
1098
1099         stats->rx_frame_errors +=
1100                 (unsigned long)atomic_long_read(&tun->rx_frame_errors);
1101 }
1102
1103 static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1104                        struct netlink_ext_ack *extack)
1105 {
1106         struct tun_struct *tun = netdev_priv(dev);
1107         struct tun_file *tfile;
1108         struct bpf_prog *old_prog;
1109         int i;
1110
1111         old_prog = rtnl_dereference(tun->xdp_prog);
1112         rcu_assign_pointer(tun->xdp_prog, prog);
1113         if (old_prog)
1114                 bpf_prog_put(old_prog);
1115
1116         for (i = 0; i < tun->numqueues; i++) {
1117                 tfile = rtnl_dereference(tun->tfiles[i]);
1118                 if (prog)
1119                         sock_set_flag(&tfile->sk, SOCK_XDP);
1120                 else
1121                         sock_reset_flag(&tfile->sk, SOCK_XDP);
1122         }
1123         list_for_each_entry(tfile, &tun->disabled, next) {
1124                 if (prog)
1125                         sock_set_flag(&tfile->sk, SOCK_XDP);
1126                 else
1127                         sock_reset_flag(&tfile->sk, SOCK_XDP);
1128         }
1129
1130         return 0;
1131 }
1132
1133 static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1134 {
1135         switch (xdp->command) {
1136         case XDP_SETUP_PROG:
1137                 return tun_xdp_set(dev, xdp->prog, xdp->extack);
1138         default:
1139                 return -EINVAL;
1140         }
1141 }
1142
1143 static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
1144 {
1145         if (new_carrier) {
1146                 struct tun_struct *tun = netdev_priv(dev);
1147
1148                 if (!tun->numqueues)
1149                         return -EPERM;
1150
1151                 netif_carrier_on(dev);
1152         } else {
1153                 netif_carrier_off(dev);
1154         }
1155         return 0;
1156 }
1157
1158 static const struct net_device_ops tun_netdev_ops = {
1159         .ndo_uninit             = tun_net_uninit,
1160         .ndo_open               = tun_net_open,
1161         .ndo_stop               = tun_net_close,
1162         .ndo_start_xmit         = tun_net_xmit,
1163         .ndo_fix_features       = tun_net_fix_features,
1164         .ndo_select_queue       = tun_select_queue,
1165         .ndo_set_rx_headroom    = tun_set_headroom,
1166         .ndo_get_stats64        = tun_net_get_stats64,
1167         .ndo_change_carrier     = tun_net_change_carrier,
1168 };
1169
1170 static void __tun_xdp_flush_tfile(struct tun_file *tfile)
1171 {
1172         /* Notify and wake up reader process */
1173         if (tfile->flags & TUN_FASYNC)
1174                 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1175         tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1176 }
1177
1178 static int tun_xdp_xmit(struct net_device *dev, int n,
1179                         struct xdp_frame **frames, u32 flags)
1180 {
1181         struct tun_struct *tun = netdev_priv(dev);
1182         struct tun_file *tfile;
1183         u32 numqueues;
1184         int drops = 0;
1185         int cnt = n;
1186         int i;
1187
1188         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1189                 return -EINVAL;
1190
1191         rcu_read_lock();
1192
1193 resample:
1194         numqueues = READ_ONCE(tun->numqueues);
1195         if (!numqueues) {
1196                 rcu_read_unlock();
1197                 return -ENXIO; /* Caller will free/return all frames */
1198         }
1199
1200         tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1201                                             numqueues]);
1202         if (unlikely(!tfile))
1203                 goto resample;
1204
1205         spin_lock(&tfile->tx_ring.producer_lock);
1206         for (i = 0; i < n; i++) {
1207                 struct xdp_frame *xdp = frames[i];
1208                 /* Encode the XDP flag into lowest bit for consumer to differ
1209                  * XDP buffer from sk_buff.
1210                  */
1211                 void *frame = tun_xdp_to_ptr(xdp);
1212
1213                 if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
1214                         atomic_long_inc(&dev->tx_dropped);
1215                         xdp_return_frame_rx_napi(xdp);
1216                         drops++;
1217                 }
1218         }
1219         spin_unlock(&tfile->tx_ring.producer_lock);
1220
1221         if (flags & XDP_XMIT_FLUSH)
1222                 __tun_xdp_flush_tfile(tfile);
1223
1224         rcu_read_unlock();
1225         return cnt - drops;
1226 }
1227
1228 static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
1229 {
1230         struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
1231
1232         if (unlikely(!frame))
1233                 return -EOVERFLOW;
1234
1235         return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
1236 }
1237
1238 static const struct net_device_ops tap_netdev_ops = {
1239         .ndo_uninit             = tun_net_uninit,
1240         .ndo_open               = tun_net_open,
1241         .ndo_stop               = tun_net_close,
1242         .ndo_start_xmit         = tun_net_xmit,
1243         .ndo_fix_features       = tun_net_fix_features,
1244         .ndo_set_rx_mode        = tun_net_mclist,
1245         .ndo_set_mac_address    = eth_mac_addr,
1246         .ndo_validate_addr      = eth_validate_addr,
1247         .ndo_select_queue       = tun_select_queue,
1248         .ndo_features_check     = passthru_features_check,
1249         .ndo_set_rx_headroom    = tun_set_headroom,
1250         .ndo_get_stats64        = dev_get_tstats64,
1251         .ndo_bpf                = tun_xdp,
1252         .ndo_xdp_xmit           = tun_xdp_xmit,
1253         .ndo_change_carrier     = tun_net_change_carrier,
1254 };
1255
1256 static void tun_flow_init(struct tun_struct *tun)
1257 {
1258         int i;
1259
1260         for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
1261                 INIT_HLIST_HEAD(&tun->flows[i]);
1262
1263         tun->ageing_time = TUN_FLOW_EXPIRE;
1264         timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1265         mod_timer(&tun->flow_gc_timer,
1266                   round_jiffies_up(jiffies + tun->ageing_time));
1267 }
1268
1269 static void tun_flow_uninit(struct tun_struct *tun)
1270 {
1271         del_timer_sync(&tun->flow_gc_timer);
1272         tun_flow_flush(tun);
1273 }
1274
1275 #define MIN_MTU 68
1276 #define MAX_MTU 65535
1277
1278 /* Initialize net device. */
1279 static void tun_net_init(struct net_device *dev)
1280 {
1281         struct tun_struct *tun = netdev_priv(dev);
1282
1283         switch (tun->flags & TUN_TYPE_MASK) {
1284         case IFF_TUN:
1285                 dev->netdev_ops = &tun_netdev_ops;
1286                 dev->header_ops = &ip_tunnel_header_ops;
1287
1288                 /* Point-to-Point TUN Device */
1289                 dev->hard_header_len = 0;
1290                 dev->addr_len = 0;
1291                 dev->mtu = 1500;
1292
1293                 /* Zero header length */
1294                 dev->type = ARPHRD_NONE;
1295                 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1296                 break;
1297
1298         case IFF_TAP:
1299                 dev->netdev_ops = &tap_netdev_ops;
1300                 /* Ethernet TAP Device */
1301                 ether_setup(dev);
1302                 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1303                 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1304
1305                 eth_hw_addr_random(dev);
1306
1307                 break;
1308         }
1309
1310         dev->min_mtu = MIN_MTU;
1311         dev->max_mtu = MAX_MTU - dev->hard_header_len;
1312 }
1313
1314 static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
1315 {
1316         struct sock *sk = tfile->socket.sk;
1317
1318         return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
1319 }
1320
1321 /* Character device part */
1322
1323 /* Poll */
1324 static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
1325 {
1326         struct tun_file *tfile = file->private_data;
1327         struct tun_struct *tun = tun_get(tfile);
1328         struct sock *sk;
1329         __poll_t mask = 0;
1330
1331         if (!tun)
1332                 return EPOLLERR;
1333
1334         sk = tfile->socket.sk;
1335
1336         poll_wait(file, sk_sleep(sk), wait);
1337
1338         if (!ptr_ring_empty(&tfile->tx_ring))
1339                 mask |= EPOLLIN | EPOLLRDNORM;
1340
1341         /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
1342          * guarantee EPOLLOUT to be raised by either here or
1343          * tun_sock_write_space(). Then process could get notification
1344          * after it writes to a down device and meets -EIO.
1345          */
1346         if (tun_sock_writeable(tun, tfile) ||
1347             (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1348              tun_sock_writeable(tun, tfile)))
1349                 mask |= EPOLLOUT | EPOLLWRNORM;
1350
1351         if (tun->dev->reg_state != NETREG_REGISTERED)
1352                 mask = EPOLLERR;
1353
1354         tun_put(tun);
1355         return mask;
1356 }
1357
1358 static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
1359                                             size_t len,
1360                                             const struct iov_iter *it)
1361 {
1362         struct sk_buff *skb;
1363         size_t linear;
1364         int err;
1365         int i;
1366
1367         if (it->nr_segs > MAX_SKB_FRAGS + 1)
1368                 return ERR_PTR(-EMSGSIZE);
1369
1370         local_bh_disable();
1371         skb = napi_get_frags(&tfile->napi);
1372         local_bh_enable();
1373         if (!skb)
1374                 return ERR_PTR(-ENOMEM);
1375
1376         linear = iov_iter_single_seg_count(it);
1377         err = __skb_grow(skb, linear);
1378         if (err)
1379                 goto free;
1380
1381         skb->len = len;
1382         skb->data_len = len - linear;
1383         skb->truesize += skb->data_len;
1384
1385         for (i = 1; i < it->nr_segs; i++) {
1386                 size_t fragsz = it->iov[i].iov_len;
1387                 struct page *page;
1388                 void *frag;
1389
1390                 if (fragsz == 0 || fragsz > PAGE_SIZE) {
1391                         err = -EINVAL;
1392                         goto free;
1393                 }
1394                 frag = netdev_alloc_frag(fragsz);
1395                 if (!frag) {
1396                         err = -ENOMEM;
1397                         goto free;
1398                 }
1399                 page = virt_to_head_page(frag);
1400                 skb_fill_page_desc(skb, i - 1, page,
1401                                    frag - page_address(page), fragsz);
1402         }
1403
1404         return skb;
1405 free:
1406         /* frees skb and all frags allocated with napi_alloc_frag() */
1407         napi_free_frags(&tfile->napi);
1408         return ERR_PTR(err);
1409 }
1410
1411 /* prepad is the amount to reserve at front.  len is length after that.
1412  * linear is a hint as to how much to copy (usually headers). */
1413 static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
1414                                      size_t prepad, size_t len,
1415                                      size_t linear, int noblock)
1416 {
1417         struct sock *sk = tfile->socket.sk;
1418         struct sk_buff *skb;
1419         int err;
1420
1421         /* Under a page?  Don't bother with paged skb. */
1422         if (prepad + len < PAGE_SIZE || !linear)
1423                 linear = len;
1424
1425         skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1426                                    &err, 0);
1427         if (!skb)
1428                 return ERR_PTR(err);
1429
1430         skb_reserve(skb, prepad);
1431         skb_put(skb, linear);
1432         skb->data_len = len - linear;
1433         skb->len += len - linear;
1434
1435         return skb;
1436 }
1437
1438 static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
1439                            struct sk_buff *skb, int more)
1440 {
1441         struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1442         struct sk_buff_head process_queue;
1443         u32 rx_batched = tun->rx_batched;
1444         bool rcv = false;
1445
1446         if (!rx_batched || (!more && skb_queue_empty(queue))) {
1447                 local_bh_disable();
1448                 skb_record_rx_queue(skb, tfile->queue_index);
1449                 netif_receive_skb(skb);
1450                 local_bh_enable();
1451                 return;
1452         }
1453
1454         spin_lock(&queue->lock);
1455         if (!more || skb_queue_len(queue) == rx_batched) {
1456                 __skb_queue_head_init(&process_queue);
1457                 skb_queue_splice_tail_init(queue, &process_queue);
1458                 rcv = true;
1459         } else {
1460                 __skb_queue_tail(queue, skb);
1461         }
1462         spin_unlock(&queue->lock);
1463
1464         if (rcv) {
1465                 struct sk_buff *nskb;
1466
1467                 local_bh_disable();
1468                 while ((nskb = __skb_dequeue(&process_queue))) {
1469                         skb_record_rx_queue(nskb, tfile->queue_index);
1470                         netif_receive_skb(nskb);
1471                 }
1472                 skb_record_rx_queue(skb, tfile->queue_index);
1473                 netif_receive_skb(skb);
1474                 local_bh_enable();
1475         }
1476 }
1477
1478 static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
1479                               int len, int noblock, bool zerocopy)
1480 {
1481         if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
1482                 return false;
1483
1484         if (tfile->socket.sk->sk_sndbuf != INT_MAX)
1485                 return false;
1486
1487         if (!noblock)
1488                 return false;
1489
1490         if (zerocopy)
1491                 return false;
1492
1493         if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
1494             SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
1495                 return false;
1496
1497         return true;
1498 }
1499
1500 static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
1501                                        struct page_frag *alloc_frag, char *buf,
1502                                        int buflen, int len, int pad)
1503 {
1504         struct sk_buff *skb = build_skb(buf, buflen);
1505
1506         if (!skb)
1507                 return ERR_PTR(-ENOMEM);
1508
1509         skb_reserve(skb, pad);
1510         skb_put(skb, len);
1511         skb_set_owner_w(skb, tfile->socket.sk);
1512
1513         get_page(alloc_frag->page);
1514         alloc_frag->offset += buflen;
1515
1516         return skb;
1517 }
1518
1519 static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
1520                        struct xdp_buff *xdp, u32 act)
1521 {
1522         int err;
1523
1524         switch (act) {
1525         case XDP_REDIRECT:
1526                 err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
1527                 if (err)
1528                         return err;
1529                 break;
1530         case XDP_TX:
1531                 err = tun_xdp_tx(tun->dev, xdp);
1532                 if (err < 0)
1533                         return err;
1534                 break;
1535         case XDP_PASS:
1536                 break;
1537         default:
1538                 bpf_warn_invalid_xdp_action(act);
1539                 fallthrough;
1540         case XDP_ABORTED:
1541                 trace_xdp_exception(tun->dev, xdp_prog, act);
1542                 fallthrough;
1543         case XDP_DROP:
1544                 atomic_long_inc(&tun->dev->rx_dropped);
1545                 break;
1546         }
1547
1548         return act;
1549 }
1550
1551 static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1552                                      struct tun_file *tfile,
1553                                      struct iov_iter *from,
1554                                      struct virtio_net_hdr *hdr,
1555                                      int len, int *skb_xdp)
1556 {
1557         struct page_frag *alloc_frag = &current->task_frag;
1558         struct bpf_prog *xdp_prog;
1559         int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1560         char *buf;
1561         size_t copied;
1562         int pad = TUN_RX_PAD;
1563         int err = 0;
1564
1565         rcu_read_lock();
1566         xdp_prog = rcu_dereference(tun->xdp_prog);
1567         if (xdp_prog)
1568                 pad += XDP_PACKET_HEADROOM;
1569         buflen += SKB_DATA_ALIGN(len + pad);
1570         rcu_read_unlock();
1571
1572         alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
1573         if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
1574                 return ERR_PTR(-ENOMEM);
1575
1576         buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1577         copied = copy_page_from_iter(alloc_frag->page,
1578                                      alloc_frag->offset + pad,
1579                                      len, from);
1580         if (copied != len)
1581                 return ERR_PTR(-EFAULT);
1582
1583         /* There's a small window that XDP may be set after the check
1584          * of xdp_prog above, this should be rare and for simplicity
1585          * we do XDP on skb in case the headroom is not enough.
1586          */
1587         if (hdr->gso_type || !xdp_prog) {
1588                 *skb_xdp = 1;
1589                 return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
1590                                        pad);
1591         }
1592
1593         *skb_xdp = 0;
1594
1595         local_bh_disable();
1596         rcu_read_lock();
1597         xdp_prog = rcu_dereference(tun->xdp_prog);
1598         if (xdp_prog) {
1599                 struct xdp_buff xdp;
1600                 u32 act;
1601
1602                 xdp.data_hard_start = buf;
1603                 xdp.data = buf + pad;
1604                 xdp_set_data_meta_invalid(&xdp);
1605                 xdp.data_end = xdp.data + len;
1606                 xdp.rxq = &tfile->xdp_rxq;
1607                 xdp.frame_sz = buflen;
1608
1609                 act = bpf_prog_run_xdp(xdp_prog, &xdp);
1610                 if (act == XDP_REDIRECT || act == XDP_TX) {
1611                         get_page(alloc_frag->page);
1612                         alloc_frag->offset += buflen;
1613                 }
1614                 err = tun_xdp_act(tun, xdp_prog, &xdp, act);
1615                 if (err < 0) {
1616                         if (act == XDP_REDIRECT || act == XDP_TX)
1617                                 put_page(alloc_frag->page);
1618                         goto out;
1619                 }
1620
1621                 if (err == XDP_REDIRECT)
1622                         xdp_do_flush();
1623                 if (err != XDP_PASS)
1624                         goto out;
1625
1626                 pad = xdp.data - xdp.data_hard_start;
1627                 len = xdp.data_end - xdp.data;
1628         }
1629         rcu_read_unlock();
1630         local_bh_enable();
1631
1632         return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
1633
1634 out:
1635         rcu_read_unlock();
1636         local_bh_enable();
1637         return NULL;
1638 }
1639
1640 /* Get packet from user space buffer */
1641 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1642                             void *msg_control, struct iov_iter *from,
1643                             int noblock, bool more)
1644 {
1645         struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1646         struct sk_buff *skb;
1647         size_t total_len = iov_iter_count(from);
1648         size_t len = total_len, align = tun->align, linear;
1649         struct virtio_net_hdr gso = { 0 };
1650         int good_linear;
1651         int copylen;
1652         bool zerocopy = false;
1653         int err;
1654         u32 rxhash = 0;
1655         int skb_xdp = 1;
1656         bool frags = tun_napi_frags_enabled(tfile);
1657
1658         if (!(tun->flags & IFF_NO_PI)) {
1659                 if (len < sizeof(pi))
1660                         return -EINVAL;
1661                 len -= sizeof(pi);
1662
1663                 if (!copy_from_iter_full(&pi, sizeof(pi), from))
1664                         return -EFAULT;
1665         }
1666
1667         if (tun->flags & IFF_VNET_HDR) {
1668                 int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1669
1670                 if (len < vnet_hdr_sz)
1671                         return -EINVAL;
1672                 len -= vnet_hdr_sz;
1673
1674                 if (!copy_from_iter_full(&gso, sizeof(gso), from))
1675                         return -EFAULT;
1676
1677                 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1678                     tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
1679                         gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
1680
1681                 if (tun16_to_cpu(tun, gso.hdr_len) > len)
1682                         return -EINVAL;
1683                 iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1684         }
1685
1686         if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1687                 align += NET_IP_ALIGN;
1688                 if (unlikely(len < ETH_HLEN ||
1689                              (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1690                         return -EINVAL;
1691         }
1692
1693         good_linear = SKB_MAX_HEAD(align);
1694
1695         if (msg_control) {
1696                 struct iov_iter i = *from;
1697
1698                 /* There are 256 bytes to be copied in skb, so there is
1699                  * enough room for skb expand head in case it is used.
1700                  * The rest of the buffer is mapped from userspace.
1701                  */
1702                 copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
1703                 if (copylen > good_linear)
1704                         copylen = good_linear;
1705                 linear = copylen;
1706                 iov_iter_advance(&i, copylen);
1707                 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
1708                         zerocopy = true;
1709         }
1710
1711         if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
1712                 /* For the packet that is not easy to be processed
1713                  * (e.g gso or jumbo packet), we will do it at after
1714                  * skb was created with generic XDP routine.
1715                  */
1716                 skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
1717                 if (IS_ERR(skb)) {
1718                         atomic_long_inc(&tun->dev->rx_dropped);
1719                         return PTR_ERR(skb);
1720                 }
1721                 if (!skb)
1722                         return total_len;
1723         } else {
1724                 if (!zerocopy) {
1725                         copylen = len;
1726                         if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
1727                                 linear = good_linear;
1728                         else
1729                                 linear = tun16_to_cpu(tun, gso.hdr_len);
1730                 }
1731
1732                 if (frags) {
1733                         mutex_lock(&tfile->napi_mutex);
1734                         skb = tun_napi_alloc_frags(tfile, copylen, from);
1735                         /* tun_napi_alloc_frags() enforces a layout for the skb.
1736                          * If zerocopy is enabled, then this layout will be
1737                          * overwritten by zerocopy_sg_from_iter().
1738                          */
1739                         zerocopy = false;
1740                 } else {
1741                         skb = tun_alloc_skb(tfile, align, copylen, linear,
1742                                             noblock);
1743                 }
1744
1745                 if (IS_ERR(skb)) {
1746                         if (PTR_ERR(skb) != -EAGAIN)
1747                                 atomic_long_inc(&tun->dev->rx_dropped);
1748                         if (frags)
1749                                 mutex_unlock(&tfile->napi_mutex);
1750                         return PTR_ERR(skb);
1751                 }
1752
1753                 if (zerocopy)
1754                         err = zerocopy_sg_from_iter(skb, from);
1755                 else
1756                         err = skb_copy_datagram_from_iter(skb, 0, from, len);
1757
1758                 if (err) {
1759                         err = -EFAULT;
1760 drop:
1761                         atomic_long_inc(&tun->dev->rx_dropped);
1762                         kfree_skb(skb);
1763                         if (frags) {
1764                                 tfile->napi.skb = NULL;
1765                                 mutex_unlock(&tfile->napi_mutex);
1766                         }
1767
1768                         return err;
1769                 }
1770         }
1771
1772         if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1773                 atomic_long_inc(&tun->rx_frame_errors);
1774                 kfree_skb(skb);
1775                 if (frags) {
1776                         tfile->napi.skb = NULL;
1777                         mutex_unlock(&tfile->napi_mutex);
1778                 }
1779
1780                 return -EINVAL;
1781         }
1782
1783         switch (tun->flags & TUN_TYPE_MASK) {
1784         case IFF_TUN:
1785                 if (tun->flags & IFF_NO_PI) {
1786                         u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
1787
1788                         switch (ip_version) {
1789                         case 4:
1790                                 pi.proto = htons(ETH_P_IP);
1791                                 break;
1792                         case 6:
1793                                 pi.proto = htons(ETH_P_IPV6);
1794                                 break;
1795                         default:
1796                                 atomic_long_inc(&tun->dev->rx_dropped);
1797                                 kfree_skb(skb);
1798                                 return -EINVAL;
1799                         }
1800                 }
1801
1802                 skb_reset_mac_header(skb);
1803                 skb->protocol = pi.proto;
1804                 skb->dev = tun->dev;
1805                 break;
1806         case IFF_TAP:
1807                 if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
1808                         err = -ENOMEM;
1809                         goto drop;
1810                 }
1811                 skb->protocol = eth_type_trans(skb, tun->dev);
1812                 break;
1813         }
1814
1815         /* copy skb_ubuf_info for callback when skb has no error */
1816         if (zerocopy) {
1817                 skb_zcopy_init(skb, msg_control);
1818         } else if (msg_control) {
1819                 struct ubuf_info *uarg = msg_control;
1820                 uarg->callback(NULL, uarg, false);
1821         }
1822
1823         skb_reset_network_header(skb);
1824         skb_probe_transport_header(skb);
1825         skb_record_rx_queue(skb, tfile->queue_index);
1826
1827         if (skb_xdp) {
1828                 struct bpf_prog *xdp_prog;
1829                 int ret;
1830
1831                 local_bh_disable();
1832                 rcu_read_lock();
1833                 xdp_prog = rcu_dereference(tun->xdp_prog);
1834                 if (xdp_prog) {
1835                         ret = do_xdp_generic(xdp_prog, skb);
1836                         if (ret != XDP_PASS) {
1837                                 rcu_read_unlock();
1838                                 local_bh_enable();
1839                                 if (frags) {
1840                                         tfile->napi.skb = NULL;
1841                                         mutex_unlock(&tfile->napi_mutex);
1842                                 }
1843                                 return total_len;
1844                         }
1845                 }
1846                 rcu_read_unlock();
1847                 local_bh_enable();
1848         }
1849
1850         /* Compute the costly rx hash only if needed for flow updates.
1851          * We may get a very small possibility of OOO during switching, not
1852          * worth to optimize.
1853          */
1854         if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1855             !tfile->detached)
1856                 rxhash = __skb_get_hash_symmetric(skb);
1857
1858         rcu_read_lock();
1859         if (unlikely(!(tun->dev->flags & IFF_UP))) {
1860                 err = -EIO;
1861                 rcu_read_unlock();
1862                 goto drop;
1863         }
1864
1865         if (frags) {
1866                 u32 headlen;
1867
1868                 /* Exercise flow dissector code path. */
1869                 skb_push(skb, ETH_HLEN);
1870                 headlen = eth_get_headlen(tun->dev, skb->data,
1871                                           skb_headlen(skb));
1872
1873                 if (unlikely(headlen > skb_headlen(skb))) {
1874                         atomic_long_inc(&tun->dev->rx_dropped);
1875                         napi_free_frags(&tfile->napi);
1876                         rcu_read_unlock();
1877                         mutex_unlock(&tfile->napi_mutex);
1878                         WARN_ON(1);
1879                         return -ENOMEM;
1880                 }
1881
1882                 local_bh_disable();
1883                 napi_gro_frags(&tfile->napi);
1884                 local_bh_enable();
1885                 mutex_unlock(&tfile->napi_mutex);
1886         } else if (tfile->napi_enabled) {
1887                 struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1888                 int queue_len;
1889
1890                 spin_lock_bh(&queue->lock);
1891                 __skb_queue_tail(queue, skb);
1892                 queue_len = skb_queue_len(queue);
1893                 spin_unlock(&queue->lock);
1894
1895                 if (!more || queue_len > NAPI_POLL_WEIGHT)
1896                         napi_schedule(&tfile->napi);
1897
1898                 local_bh_enable();
1899         } else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
1900                 tun_rx_batched(tun, tfile, skb, more);
1901         } else {
1902                 netif_rx_ni(skb);
1903         }
1904         rcu_read_unlock();
1905
1906         preempt_disable();
1907         dev_sw_netstats_rx_add(tun->dev, len);
1908         preempt_enable();
1909
1910         if (rxhash)
1911                 tun_flow_update(tun, rxhash, tfile);
1912
1913         return total_len;
1914 }
1915
1916 static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
1917 {
1918         struct file *file = iocb->ki_filp;
1919         struct tun_file *tfile = file->private_data;
1920         struct tun_struct *tun = tun_get(tfile);
1921         ssize_t result;
1922         int noblock = 0;
1923
1924         if (!tun)
1925                 return -EBADFD;
1926
1927         if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
1928                 noblock = 1;
1929
1930         result = tun_get_user(tun, tfile, NULL, from, noblock, false);
1931
1932         tun_put(tun);
1933         return result;
1934 }
1935
1936 static ssize_t tun_put_user_xdp(struct tun_struct *tun,
1937                                 struct tun_file *tfile,
1938                                 struct xdp_frame *xdp_frame,
1939                                 struct iov_iter *iter)
1940 {
1941         int vnet_hdr_sz = 0;
1942         size_t size = xdp_frame->len;
1943         size_t ret;
1944
1945         if (tun->flags & IFF_VNET_HDR) {
1946                 struct virtio_net_hdr gso = { 0 };
1947
1948                 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1949                 if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
1950                         return -EINVAL;
1951                 if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
1952                              sizeof(gso)))
1953                         return -EFAULT;
1954                 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
1955         }
1956
1957         ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
1958
1959         preempt_disable();
1960         dev_sw_netstats_tx_add(tun->dev, 1, ret);
1961         preempt_enable();
1962
1963         return ret;
1964 }
1965
1966 /* Put packet to the user space buffer */
1967 static ssize_t tun_put_user(struct tun_struct *tun,
1968                             struct tun_file *tfile,
1969                             struct sk_buff *skb,
1970                             struct iov_iter *iter)
1971 {
1972         struct tun_pi pi = { 0, skb->protocol };
1973         ssize_t total;
1974         int vlan_offset = 0;
1975         int vlan_hlen = 0;
1976         int vnet_hdr_sz = 0;
1977
1978         if (skb_vlan_tag_present(skb))
1979                 vlan_hlen = VLAN_HLEN;
1980
1981         if (tun->flags & IFF_VNET_HDR)
1982                 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1983
1984         total = skb->len + vlan_hlen + vnet_hdr_sz;
1985
1986         if (!(tun->flags & IFF_NO_PI)) {
1987                 if (iov_iter_count(iter) < sizeof(pi))
1988                         return -EINVAL;
1989
1990                 total += sizeof(pi);
1991                 if (iov_iter_count(iter) < total) {
1992                         /* Packet will be striped */
1993                         pi.flags |= TUN_PKT_STRIP;
1994                 }
1995
1996                 if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
1997                         return -EFAULT;
1998         }
1999
2000         if (vnet_hdr_sz) {
2001                 struct virtio_net_hdr gso;
2002
2003                 if (iov_iter_count(iter) < vnet_hdr_sz)
2004                         return -EINVAL;
2005
2006                 if (virtio_net_hdr_from_skb(skb, &gso,
2007                                             tun_is_little_endian(tun), true,
2008                                             vlan_hlen)) {
2009                         struct skb_shared_info *sinfo = skb_shinfo(skb);
2010                         pr_err("unexpected GSO type: "
2011                                "0x%x, gso_size %d, hdr_len %d\n",
2012                                sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
2013                                tun16_to_cpu(tun, gso.hdr_len));
2014                         print_hex_dump(KERN_ERR, "tun: ",
2015                                        DUMP_PREFIX_NONE,
2016                                        16, 1, skb->head,
2017                                        min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2018                         WARN_ON_ONCE(1);
2019                         return -EINVAL;
2020                 }
2021
2022                 if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
2023                         return -EFAULT;
2024
2025                 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2026         }
2027
2028         if (vlan_hlen) {
2029                 int ret;
2030                 struct veth veth;
2031
2032                 veth.h_vlan_proto = skb->vlan_proto;
2033                 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
2034
2035                 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
2036
2037                 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2038                 if (ret || !iov_iter_count(iter))
2039                         goto done;
2040
2041                 ret = copy_to_iter(&veth, sizeof(veth), iter);
2042                 if (ret != sizeof(veth) || !iov_iter_count(iter))
2043                         goto done;
2044         }
2045
2046         skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
2047
2048 done:
2049         /* caller is in process context, */
2050         preempt_disable();
2051         dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen);
2052         preempt_enable();
2053
2054         return total;
2055 }
2056
2057 static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
2058 {
2059         DECLARE_WAITQUEUE(wait, current);
2060         void *ptr = NULL;
2061         int error = 0;
2062
2063         ptr = ptr_ring_consume(&tfile->tx_ring);
2064         if (ptr)
2065                 goto out;
2066         if (noblock) {
2067                 error = -EAGAIN;
2068                 goto out;
2069         }
2070
2071         add_wait_queue(&tfile->socket.wq.wait, &wait);
2072
2073         while (1) {
2074                 set_current_state(TASK_INTERRUPTIBLE);
2075                 ptr = ptr_ring_consume(&tfile->tx_ring);
2076                 if (ptr)
2077                         break;
2078                 if (signal_pending(current)) {
2079                         error = -ERESTARTSYS;
2080                         break;
2081                 }
2082                 if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
2083                         error = -EFAULT;
2084                         break;
2085                 }
2086
2087                 schedule();
2088         }
2089
2090         __set_current_state(TASK_RUNNING);
2091         remove_wait_queue(&tfile->socket.wq.wait, &wait);
2092
2093 out:
2094         *err = error;
2095         return ptr;
2096 }
2097
2098 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
2099                            struct iov_iter *to,
2100                            int noblock, void *ptr)
2101 {
2102         ssize_t ret;
2103         int err;
2104
2105         if (!iov_iter_count(to)) {
2106                 tun_ptr_free(ptr);
2107                 return 0;
2108         }
2109
2110         if (!ptr) {
2111                 /* Read frames from ring */
2112                 ptr = tun_ring_recv(tfile, noblock, &err);
2113                 if (!ptr)
2114                         return err;
2115         }
2116
2117         if (tun_is_xdp_frame(ptr)) {
2118                 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2119
2120                 ret = tun_put_user_xdp(tun, tfile, xdpf, to);
2121                 xdp_return_frame(xdpf);
2122         } else {
2123                 struct sk_buff *skb = ptr;
2124
2125                 ret = tun_put_user(tun, tfile, skb, to);
2126                 if (unlikely(ret < 0))
2127                         kfree_skb(skb);
2128                 else
2129                         consume_skb(skb);
2130         }
2131
2132         return ret;
2133 }
2134
2135 static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
2136 {
2137         struct file *file = iocb->ki_filp;
2138         struct tun_file *tfile = file->private_data;
2139         struct tun_struct *tun = tun_get(tfile);
2140         ssize_t len = iov_iter_count(to), ret;
2141         int noblock = 0;
2142
2143         if (!tun)
2144                 return -EBADFD;
2145
2146         if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
2147                 noblock = 1;
2148
2149         ret = tun_do_read(tun, tfile, to, noblock, NULL);
2150         ret = min_t(ssize_t, ret, len);
2151         if (ret > 0)
2152                 iocb->ki_pos = ret;
2153         tun_put(tun);
2154         return ret;
2155 }
2156
2157 static void tun_prog_free(struct rcu_head *rcu)
2158 {
2159         struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
2160
2161         bpf_prog_destroy(prog->prog);
2162         kfree(prog);
2163 }
2164
2165 static int __tun_set_ebpf(struct tun_struct *tun,
2166                           struct tun_prog __rcu **prog_p,
2167                           struct bpf_prog *prog)
2168 {
2169         struct tun_prog *old, *new = NULL;
2170
2171         if (prog) {
2172                 new = kmalloc(sizeof(*new), GFP_KERNEL);
2173                 if (!new)
2174                         return -ENOMEM;
2175                 new->prog = prog;
2176         }
2177
2178         spin_lock_bh(&tun->lock);
2179         old = rcu_dereference_protected(*prog_p,
2180                                         lockdep_is_held(&tun->lock));
2181         rcu_assign_pointer(*prog_p, new);
2182         spin_unlock_bh(&tun->lock);
2183
2184         if (old)
2185                 call_rcu(&old->rcu, tun_prog_free);
2186
2187         return 0;
2188 }
2189
2190 static void tun_free_netdev(struct net_device *dev)
2191 {
2192         struct tun_struct *tun = netdev_priv(dev);
2193
2194         BUG_ON(!(list_empty(&tun->disabled)));
2195
2196         free_percpu(dev->tstats);
2197         /* We clear tstats so that tun_set_iff() can tell if
2198          * tun_free_netdev() has been called from register_netdevice().
2199          */
2200         dev->tstats = NULL;
2201
2202         tun_flow_uninit(tun);
2203         security_tun_dev_free_security(tun->security);
2204         __tun_set_ebpf(tun, &tun->steering_prog, NULL);
2205         __tun_set_ebpf(tun, &tun->filter_prog, NULL);
2206 }
2207
2208 static void tun_setup(struct net_device *dev)
2209 {
2210         struct tun_struct *tun = netdev_priv(dev);
2211
2212         tun->owner = INVALID_UID;
2213         tun->group = INVALID_GID;
2214         tun_default_link_ksettings(dev, &tun->link_ksettings);
2215
2216         dev->ethtool_ops = &tun_ethtool_ops;
2217         dev->needs_free_netdev = true;
2218         dev->priv_destructor = tun_free_netdev;
2219         /* We prefer our own queue length */
2220         dev->tx_queue_len = TUN_READQ_SIZE;
2221 }
2222
2223 /* Trivial set of netlink ops to allow deleting tun or tap
2224  * device with netlink.
2225  */
2226 static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2227                         struct netlink_ext_ack *extack)
2228 {
2229         NL_SET_ERR_MSG(extack,
2230                        "tun/tap creation via rtnetlink is not supported.");
2231         return -EOPNOTSUPP;
2232 }
2233
2234 static size_t tun_get_size(const struct net_device *dev)
2235 {
2236         BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
2237         BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
2238
2239         return nla_total_size(sizeof(uid_t)) + /* OWNER */
2240                nla_total_size(sizeof(gid_t)) + /* GROUP */
2241                nla_total_size(sizeof(u8)) + /* TYPE */
2242                nla_total_size(sizeof(u8)) + /* PI */
2243                nla_total_size(sizeof(u8)) + /* VNET_HDR */
2244                nla_total_size(sizeof(u8)) + /* PERSIST */
2245                nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */
2246                nla_total_size(sizeof(u32)) + /* NUM_QUEUES */
2247                nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */
2248                0;
2249 }
2250
2251 static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
2252 {
2253         struct tun_struct *tun = netdev_priv(dev);
2254
2255         if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
2256                 goto nla_put_failure;
2257         if (uid_valid(tun->owner) &&
2258             nla_put_u32(skb, IFLA_TUN_OWNER,
2259                         from_kuid_munged(current_user_ns(), tun->owner)))
2260                 goto nla_put_failure;
2261         if (gid_valid(tun->group) &&
2262             nla_put_u32(skb, IFLA_TUN_GROUP,
2263                         from_kgid_munged(current_user_ns(), tun->group)))
2264                 goto nla_put_failure;
2265         if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
2266                 goto nla_put_failure;
2267         if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
2268                 goto nla_put_failure;
2269         if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
2270                 goto nla_put_failure;
2271         if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
2272                        !!(tun->flags & IFF_MULTI_QUEUE)))
2273                 goto nla_put_failure;
2274         if (tun->flags & IFF_MULTI_QUEUE) {
2275                 if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
2276                         goto nla_put_failure;
2277                 if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
2278                                 tun->numdisabled))
2279                         goto nla_put_failure;
2280         }
2281
2282         return 0;
2283
2284 nla_put_failure:
2285         return -EMSGSIZE;
2286 }
2287
2288 static struct rtnl_link_ops tun_link_ops __read_mostly = {
2289         .kind           = DRV_NAME,
2290         .priv_size      = sizeof(struct tun_struct),
2291         .setup          = tun_setup,
2292         .validate       = tun_validate,
2293         .get_size       = tun_get_size,
2294         .fill_info      = tun_fill_info,
2295 };
2296
2297 static void tun_sock_write_space(struct sock *sk)
2298 {
2299         struct tun_file *tfile;
2300         wait_queue_head_t *wqueue;
2301
2302         if (!sock_writeable(sk))
2303                 return;
2304
2305         if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
2306                 return;
2307
2308         wqueue = sk_sleep(sk);
2309         if (wqueue && waitqueue_active(wqueue))
2310                 wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
2311                                                 EPOLLWRNORM | EPOLLWRBAND);
2312
2313         tfile = container_of(sk, struct tun_file, sk);
2314         kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
2315 }
2316
2317 static void tun_put_page(struct tun_page *tpage)
2318 {
2319         if (tpage->page)
2320                 __page_frag_cache_drain(tpage->page, tpage->count);
2321 }
2322
2323 static int tun_xdp_one(struct tun_struct *tun,
2324                        struct tun_file *tfile,
2325                        struct xdp_buff *xdp, int *flush,
2326                        struct tun_page *tpage)
2327 {
2328         unsigned int datasize = xdp->data_end - xdp->data;
2329         struct tun_xdp_hdr *hdr = xdp->data_hard_start;
2330         struct virtio_net_hdr *gso = &hdr->gso;
2331         struct bpf_prog *xdp_prog;
2332         struct sk_buff *skb = NULL;
2333         u32 rxhash = 0, act;
2334         int buflen = hdr->buflen;
2335         int err = 0;
2336         bool skb_xdp = false;
2337         struct page *page;
2338
2339         xdp_prog = rcu_dereference(tun->xdp_prog);
2340         if (xdp_prog) {
2341                 if (gso->gso_type) {
2342                         skb_xdp = true;
2343                         goto build;
2344                 }
2345                 xdp_set_data_meta_invalid(xdp);
2346                 xdp->rxq = &tfile->xdp_rxq;
2347                 xdp->frame_sz = buflen;
2348
2349                 act = bpf_prog_run_xdp(xdp_prog, xdp);
2350                 err = tun_xdp_act(tun, xdp_prog, xdp, act);
2351                 if (err < 0) {
2352                         put_page(virt_to_head_page(xdp->data));
2353                         return err;
2354                 }
2355
2356                 switch (err) {
2357                 case XDP_REDIRECT:
2358                         *flush = true;
2359                         fallthrough;
2360                 case XDP_TX:
2361                         return 0;
2362                 case XDP_PASS:
2363                         break;
2364                 default:
2365                         page = virt_to_head_page(xdp->data);
2366                         if (tpage->page == page) {
2367                                 ++tpage->count;
2368                         } else {
2369                                 tun_put_page(tpage);
2370                                 tpage->page = page;
2371                                 tpage->count = 1;
2372                         }
2373                         return 0;
2374                 }
2375         }
2376
2377 build:
2378         skb = build_skb(xdp->data_hard_start, buflen);
2379         if (!skb) {
2380                 err = -ENOMEM;
2381                 goto out;
2382         }
2383
2384         skb_reserve(skb, xdp->data - xdp->data_hard_start);
2385         skb_put(skb, xdp->data_end - xdp->data);
2386
2387         if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
2388                 atomic_long_inc(&tun->rx_frame_errors);
2389                 kfree_skb(skb);
2390                 err = -EINVAL;
2391                 goto out;
2392         }
2393
2394         skb->protocol = eth_type_trans(skb, tun->dev);
2395         skb_reset_network_header(skb);
2396         skb_probe_transport_header(skb);
2397         skb_record_rx_queue(skb, tfile->queue_index);
2398
2399         if (skb_xdp) {
2400                 err = do_xdp_generic(xdp_prog, skb);
2401                 if (err != XDP_PASS)
2402                         goto out;
2403         }
2404
2405         if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
2406             !tfile->detached)
2407                 rxhash = __skb_get_hash_symmetric(skb);
2408
2409         netif_receive_skb(skb);
2410
2411         /* No need to disable preemption here since this function is
2412          * always called with bh disabled
2413          */
2414         dev_sw_netstats_rx_add(tun->dev, datasize);
2415
2416         if (rxhash)
2417                 tun_flow_update(tun, rxhash, tfile);
2418
2419 out:
2420         return err;
2421 }
2422
2423 static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
2424 {
2425         int ret, i;
2426         struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2427         struct tun_struct *tun = tun_get(tfile);
2428         struct tun_msg_ctl *ctl = m->msg_control;
2429         struct xdp_buff *xdp;
2430
2431         if (!tun)
2432                 return -EBADFD;
2433
2434         if (ctl && (ctl->type == TUN_MSG_PTR)) {
2435                 struct tun_page tpage;
2436                 int n = ctl->num;
2437                 int flush = 0;
2438
2439                 memset(&tpage, 0, sizeof(tpage));
2440
2441                 local_bh_disable();
2442                 rcu_read_lock();
2443
2444                 for (i = 0; i < n; i++) {
2445                         xdp = &((struct xdp_buff *)ctl->ptr)[i];
2446                         tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
2447                 }
2448
2449                 if (flush)
2450                         xdp_do_flush();
2451
2452                 rcu_read_unlock();
2453                 local_bh_enable();
2454
2455                 tun_put_page(&tpage);
2456
2457                 ret = total_len;
2458                 goto out;
2459         }
2460
2461         ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter,
2462                            m->msg_flags & MSG_DONTWAIT,
2463                            m->msg_flags & MSG_MORE);
2464 out:
2465         tun_put(tun);
2466         return ret;
2467 }
2468
2469 static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
2470                        int flags)
2471 {
2472         struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2473         struct tun_struct *tun = tun_get(tfile);
2474         void *ptr = m->msg_control;
2475         int ret;
2476
2477         if (!tun) {
2478                 ret = -EBADFD;
2479                 goto out_free;
2480         }
2481
2482         if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
2483                 ret = -EINVAL;
2484                 goto out_put_tun;
2485         }
2486         if (flags & MSG_ERRQUEUE) {
2487                 ret = sock_recv_errqueue(sock->sk, m, total_len,
2488                                          SOL_PACKET, TUN_TX_TIMESTAMP);
2489                 goto out;
2490         }
2491         ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
2492         if (ret > (ssize_t)total_len) {
2493                 m->msg_flags |= MSG_TRUNC;
2494                 ret = flags & MSG_TRUNC ? ret : total_len;
2495         }
2496 out:
2497         tun_put(tun);
2498         return ret;
2499
2500 out_put_tun:
2501         tun_put(tun);
2502 out_free:
2503         tun_ptr_free(ptr);
2504         return ret;
2505 }
2506
2507 static int tun_ptr_peek_len(void *ptr)
2508 {
2509         if (likely(ptr)) {
2510                 if (tun_is_xdp_frame(ptr)) {
2511                         struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2512
2513                         return xdpf->len;
2514                 }
2515                 return __skb_array_len_with_tag(ptr);
2516         } else {
2517                 return 0;
2518         }
2519 }
2520
2521 static int tun_peek_len(struct socket *sock)
2522 {
2523         struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2524         struct tun_struct *tun;
2525         int ret = 0;
2526
2527         tun = tun_get(tfile);
2528         if (!tun)
2529                 return 0;
2530
2531         ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
2532         tun_put(tun);
2533
2534         return ret;
2535 }
2536
2537 /* Ops structure to mimic raw sockets with tun */
2538 static const struct proto_ops tun_socket_ops = {
2539         .peek_len = tun_peek_len,
2540         .sendmsg = tun_sendmsg,
2541         .recvmsg = tun_recvmsg,
2542 };
2543
2544 static struct proto tun_proto = {
2545         .name           = "tun",
2546         .owner          = THIS_MODULE,
2547         .obj_size       = sizeof(struct tun_file),
2548 };
2549
2550 static int tun_flags(struct tun_struct *tun)
2551 {
2552         return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2553 }
2554
2555 static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
2556                               char *buf)
2557 {
2558         struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2559         return sprintf(buf, "0x%x\n", tun_flags(tun));
2560 }
2561
2562 static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
2563                               char *buf)
2564 {
2565         struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2566         return uid_valid(tun->owner)?
2567                 sprintf(buf, "%u\n",
2568                         from_kuid_munged(current_user_ns(), tun->owner)):
2569                 sprintf(buf, "-1\n");
2570 }
2571
2572 static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
2573                               char *buf)
2574 {
2575         struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2576         return gid_valid(tun->group) ?
2577                 sprintf(buf, "%u\n",
2578                         from_kgid_munged(current_user_ns(), tun->group)):
2579                 sprintf(buf, "-1\n");
2580 }
2581
2582 static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
2583 static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
2584 static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
2585
2586 static struct attribute *tun_dev_attrs[] = {
2587         &dev_attr_tun_flags.attr,
2588         &dev_attr_owner.attr,
2589         &dev_attr_group.attr,
2590         NULL
2591 };
2592
2593 static const struct attribute_group tun_attr_group = {
2594         .attrs = tun_dev_attrs
2595 };
2596
2597 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2598 {
2599         struct tun_struct *tun;
2600         struct tun_file *tfile = file->private_data;
2601         struct net_device *dev;
2602         int err;
2603
2604         if (tfile->detached)
2605                 return -EINVAL;
2606
2607         if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
2608                 if (!capable(CAP_NET_ADMIN))
2609                         return -EPERM;
2610
2611                 if (!(ifr->ifr_flags & IFF_NAPI) ||
2612                     (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
2613                         return -EINVAL;
2614         }
2615
2616         dev = __dev_get_by_name(net, ifr->ifr_name);
2617         if (dev) {
2618                 if (ifr->ifr_flags & IFF_TUN_EXCL)
2619                         return -EBUSY;
2620                 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
2621                         tun = netdev_priv(dev);
2622                 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
2623                         tun = netdev_priv(dev);
2624                 else
2625                         return -EINVAL;
2626
2627                 if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
2628                     !!(tun->flags & IFF_MULTI_QUEUE))
2629                         return -EINVAL;
2630
2631                 if (tun_not_capable(tun))
2632                         return -EPERM;
2633                 err = security_tun_dev_open(tun->security);
2634                 if (err < 0)
2635                         return err;
2636
2637                 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2638                                  ifr->ifr_flags & IFF_NAPI,
2639                                  ifr->ifr_flags & IFF_NAPI_FRAGS, true);
2640                 if (err < 0)
2641                         return err;
2642
2643                 if (tun->flags & IFF_MULTI_QUEUE &&
2644                     (tun->numqueues + tun->numdisabled > 1)) {
2645                         /* One or more queue has already been attached, no need
2646                          * to initialize the device again.
2647                          */
2648                         netdev_state_change(dev);
2649                         return 0;
2650                 }
2651
2652                 tun->flags = (tun->flags & ~TUN_FEATURES) |
2653                               (ifr->ifr_flags & TUN_FEATURES);
2654
2655                 netdev_state_change(dev);
2656         } else {
2657                 char *name;
2658                 unsigned long flags = 0;
2659                 int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2660                              MAX_TAP_QUEUES : 1;
2661
2662                 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2663                         return -EPERM;
2664                 err = security_tun_dev_create();
2665                 if (err < 0)
2666                         return err;
2667
2668                 /* Set dev type */
2669                 if (ifr->ifr_flags & IFF_TUN) {
2670                         /* TUN device */
2671                         flags |= IFF_TUN;
2672                         name = "tun%d";
2673                 } else if (ifr->ifr_flags & IFF_TAP) {
2674                         /* TAP device */
2675                         flags |= IFF_TAP;
2676                         name = "tap%d";
2677                 } else
2678                         return -EINVAL;
2679
2680                 if (*ifr->ifr_name)
2681                         name = ifr->ifr_name;
2682
2683                 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
2684                                        NET_NAME_UNKNOWN, tun_setup, queues,
2685                                        queues);
2686
2687                 if (!dev)
2688                         return -ENOMEM;
2689
2690                 dev_net_set(dev, net);
2691                 dev->rtnl_link_ops = &tun_link_ops;
2692                 dev->ifindex = tfile->ifindex;
2693                 dev->sysfs_groups[0] = &tun_attr_group;
2694
2695                 tun = netdev_priv(dev);
2696                 tun->dev = dev;
2697                 tun->flags = flags;
2698                 tun->txflt.count = 0;
2699                 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
2700
2701                 tun->align = NET_SKB_PAD;
2702                 tun->filter_attached = false;
2703                 tun->sndbuf = tfile->socket.sk->sk_sndbuf;
2704                 tun->rx_batched = 0;
2705                 RCU_INIT_POINTER(tun->steering_prog, NULL);
2706
2707                 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2708                 if (!dev->tstats) {
2709                         err = -ENOMEM;
2710                         goto err_free_dev;
2711                 }
2712
2713                 spin_lock_init(&tun->lock);
2714
2715                 err = security_tun_dev_alloc_security(&tun->security);
2716                 if (err < 0)
2717                         goto err_free_stat;
2718
2719                 tun_net_init(dev);
2720                 tun_flow_init(tun);
2721
2722                 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
2723                                    TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
2724                                    NETIF_F_HW_VLAN_STAG_TX;
2725                 dev->features = dev->hw_features | NETIF_F_LLTX;
2726                 dev->vlan_features = dev->features &
2727                                      ~(NETIF_F_HW_VLAN_CTAG_TX |
2728                                        NETIF_F_HW_VLAN_STAG_TX);
2729
2730                 tun->flags = (tun->flags & ~TUN_FEATURES) |
2731                               (ifr->ifr_flags & TUN_FEATURES);
2732
2733                 INIT_LIST_HEAD(&tun->disabled);
2734                 err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
2735                                  ifr->ifr_flags & IFF_NAPI_FRAGS, false);
2736                 if (err < 0)
2737                         goto err_free_flow;
2738
2739                 err = register_netdevice(tun->dev);
2740                 if (err < 0)
2741                         goto err_detach;
2742                 /* free_netdev() won't check refcnt, to aovid race
2743                  * with dev_put() we need publish tun after registration.
2744                  */
2745                 rcu_assign_pointer(tfile->tun, tun);
2746         }
2747
2748         netif_carrier_on(tun->dev);
2749
2750         /* Make sure persistent devices do not get stuck in
2751          * xoff state.
2752          */
2753         if (netif_running(tun->dev))
2754                 netif_tx_wake_all_queues(tun->dev);
2755
2756         strcpy(ifr->ifr_name, tun->dev->name);
2757         return 0;
2758
2759 err_detach:
2760         tun_detach_all(dev);
2761         /* We are here because register_netdevice() has failed.
2762          * If register_netdevice() already called tun_free_netdev()
2763          * while dealing with the error, dev->stats has been cleared.
2764          */
2765         if (!dev->tstats)
2766                 goto err_free_dev;
2767
2768 err_free_flow:
2769         tun_flow_uninit(tun);
2770         security_tun_dev_free_security(tun->security);
2771 err_free_stat:
2772         free_percpu(dev->tstats);
2773 err_free_dev:
2774         free_netdev(dev);
2775         return err;
2776 }
2777
2778 static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
2779 {
2780         strcpy(ifr->ifr_name, tun->dev->name);
2781
2782         ifr->ifr_flags = tun_flags(tun);
2783
2784 }
2785
2786 /* This is like a cut-down ethtool ops, except done via tun fd so no
2787  * privs required. */
2788 static int set_offload(struct tun_struct *tun, unsigned long arg)
2789 {
2790         netdev_features_t features = 0;
2791
2792         if (arg & TUN_F_CSUM) {
2793                 features |= NETIF_F_HW_CSUM;
2794                 arg &= ~TUN_F_CSUM;
2795
2796                 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
2797                         if (arg & TUN_F_TSO_ECN) {
2798                                 features |= NETIF_F_TSO_ECN;
2799                                 arg &= ~TUN_F_TSO_ECN;
2800                         }
2801                         if (arg & TUN_F_TSO4)
2802                                 features |= NETIF_F_TSO;
2803                         if (arg & TUN_F_TSO6)
2804                                 features |= NETIF_F_TSO6;
2805                         arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
2806                 }
2807
2808                 arg &= ~TUN_F_UFO;
2809         }
2810
2811         /* This gives the user a way to test for new features in future by
2812          * trying to set them. */
2813         if (arg)
2814                 return -EINVAL;
2815
2816         tun->set_features = features;
2817         tun->dev->wanted_features &= ~TUN_USER_FEATURES;
2818         tun->dev->wanted_features |= features;
2819         netdev_update_features(tun->dev);
2820
2821         return 0;
2822 }
2823
2824 static void tun_detach_filter(struct tun_struct *tun, int n)
2825 {
2826         int i;
2827         struct tun_file *tfile;
2828
2829         for (i = 0; i < n; i++) {
2830                 tfile = rtnl_dereference(tun->tfiles[i]);
2831                 lock_sock(tfile->socket.sk);
2832                 sk_detach_filter(tfile->socket.sk);
2833                 release_sock(tfile->socket.sk);
2834         }
2835
2836         tun->filter_attached = false;
2837 }
2838
2839 static int tun_attach_filter(struct tun_struct *tun)
2840 {
2841         int i, ret = 0;
2842         struct tun_file *tfile;
2843
2844         for (i = 0; i < tun->numqueues; i++) {
2845                 tfile = rtnl_dereference(tun->tfiles[i]);
2846                 lock_sock(tfile->socket.sk);
2847                 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
2848                 release_sock(tfile->socket.sk);
2849                 if (ret) {
2850                         tun_detach_filter(tun, i);
2851                         return ret;
2852                 }
2853         }
2854
2855         tun->filter_attached = true;
2856         return ret;
2857 }
2858
2859 static void tun_set_sndbuf(struct tun_struct *tun)
2860 {
2861         struct tun_file *tfile;
2862         int i;
2863
2864         for (i = 0; i < tun->numqueues; i++) {
2865                 tfile = rtnl_dereference(tun->tfiles[i]);
2866                 tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2867         }
2868 }
2869
2870 static int tun_set_queue(struct file *file, struct ifreq *ifr)
2871 {
2872         struct tun_file *tfile = file->private_data;
2873         struct tun_struct *tun;
2874         int ret = 0;
2875
2876         rtnl_lock();
2877
2878         if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
2879                 tun = tfile->detached;
2880                 if (!tun) {
2881                         ret = -EINVAL;
2882                         goto unlock;
2883                 }
2884                 ret = security_tun_dev_attach_queue(tun->security);
2885                 if (ret < 0)
2886                         goto unlock;
2887                 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
2888                                  tun->flags & IFF_NAPI_FRAGS, true);
2889         } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
2890                 tun = rtnl_dereference(tfile->tun);
2891                 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
2892                         ret = -EINVAL;
2893                 else
2894                         __tun_detach(tfile, false);
2895         } else
2896                 ret = -EINVAL;
2897
2898         if (ret >= 0)
2899                 netdev_state_change(tun->dev);
2900
2901 unlock:
2902         rtnl_unlock();
2903         return ret;
2904 }
2905
2906 static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
2907                         void __user *data)
2908 {
2909         struct bpf_prog *prog;
2910         int fd;
2911
2912         if (copy_from_user(&fd, data, sizeof(fd)))
2913                 return -EFAULT;
2914
2915         if (fd == -1) {
2916                 prog = NULL;
2917         } else {
2918                 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
2919                 if (IS_ERR(prog))
2920                         return PTR_ERR(prog);
2921         }
2922
2923         return __tun_set_ebpf(tun, prog_p, prog);
2924 }
2925
2926 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
2927                             unsigned long arg, int ifreq_len)
2928 {
2929         struct tun_file *tfile = file->private_data;
2930         struct net *net = sock_net(&tfile->sk);
2931         struct tun_struct *tun;
2932         void __user* argp = (void __user*)arg;
2933         unsigned int ifindex, carrier;
2934         struct ifreq ifr;
2935         kuid_t owner;
2936         kgid_t group;
2937         int sndbuf;
2938         int vnet_hdr_sz;
2939         int le;
2940         int ret;
2941         bool do_notify = false;
2942
2943         if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
2944             (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
2945                 if (copy_from_user(&ifr, argp, ifreq_len))
2946                         return -EFAULT;
2947         } else {
2948                 memset(&ifr, 0, sizeof(ifr));
2949         }
2950         if (cmd == TUNGETFEATURES) {
2951                 /* Currently this just means: "what IFF flags are valid?".
2952                  * This is needed because we never checked for invalid flags on
2953                  * TUNSETIFF.
2954                  */
2955                 return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
2956                                 (unsigned int __user*)argp);
2957         } else if (cmd == TUNSETQUEUE) {
2958                 return tun_set_queue(file, &ifr);
2959         } else if (cmd == SIOCGSKNS) {
2960                 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2961                         return -EPERM;
2962                 return open_related_ns(&net->ns, get_net_ns);
2963         }
2964
2965         ret = 0;
2966         rtnl_lock();
2967
2968         tun = tun_get(tfile);
2969         if (cmd == TUNSETIFF) {
2970                 ret = -EEXIST;
2971                 if (tun)
2972                         goto unlock;
2973
2974                 ifr.ifr_name[IFNAMSIZ-1] = '\0';
2975
2976                 ret = tun_set_iff(net, file, &ifr);
2977
2978                 if (ret)
2979                         goto unlock;
2980
2981                 if (copy_to_user(argp, &ifr, ifreq_len))
2982                         ret = -EFAULT;
2983                 goto unlock;
2984         }
2985         if (cmd == TUNSETIFINDEX) {
2986                 ret = -EPERM;
2987                 if (tun)
2988                         goto unlock;
2989
2990                 ret = -EFAULT;
2991                 if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
2992                         goto unlock;
2993
2994                 ret = 0;
2995                 tfile->ifindex = ifindex;
2996                 goto unlock;
2997         }
2998
2999         ret = -EBADFD;
3000         if (!tun)
3001                 goto unlock;
3002
3003         netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd);
3004
3005         net = dev_net(tun->dev);
3006         ret = 0;
3007         switch (cmd) {
3008         case TUNGETIFF:
3009                 tun_get_iff(tun, &ifr);
3010
3011                 if (tfile->detached)
3012                         ifr.ifr_flags |= IFF_DETACH_QUEUE;
3013                 if (!tfile->socket.sk->sk_filter)
3014                         ifr.ifr_flags |= IFF_NOFILTER;
3015
3016                 if (copy_to_user(argp, &ifr, ifreq_len))
3017                         ret = -EFAULT;
3018                 break;
3019
3020         case TUNSETNOCSUM:
3021                 /* Disable/Enable checksum */
3022
3023                 /* [unimplemented] */
3024                 netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n",
3025                            arg ? "disabled" : "enabled");
3026                 break;
3027
3028         case TUNSETPERSIST:
3029                 /* Disable/Enable persist mode. Keep an extra reference to the
3030                  * module to prevent the module being unprobed.
3031                  */
3032                 if (arg && !(tun->flags & IFF_PERSIST)) {
3033                         tun->flags |= IFF_PERSIST;
3034                         __module_get(THIS_MODULE);
3035                         do_notify = true;
3036                 }
3037                 if (!arg && (tun->flags & IFF_PERSIST)) {
3038                         tun->flags &= ~IFF_PERSIST;
3039                         module_put(THIS_MODULE);
3040                         do_notify = true;
3041                 }
3042
3043                 netif_info(tun, drv, tun->dev, "persist %s\n",
3044                            arg ? "enabled" : "disabled");
3045                 break;
3046
3047         case TUNSETOWNER:
3048                 /* Set owner of the device */
3049                 owner = make_kuid(current_user_ns(), arg);
3050                 if (!uid_valid(owner)) {
3051                         ret = -EINVAL;
3052                         break;
3053                 }
3054                 tun->owner = owner;
3055                 do_notify = true;
3056                 netif_info(tun, drv, tun->dev, "owner set to %u\n",
3057                            from_kuid(&init_user_ns, tun->owner));
3058                 break;
3059
3060         case TUNSETGROUP:
3061                 /* Set group of the device */
3062                 group = make_kgid(current_user_ns(), arg);
3063                 if (!gid_valid(group)) {
3064                         ret = -EINVAL;
3065                         break;
3066                 }
3067                 tun->group = group;
3068                 do_notify = true;
3069                 netif_info(tun, drv, tun->dev, "group set to %u\n",
3070                            from_kgid(&init_user_ns, tun->group));
3071                 break;
3072
3073         case TUNSETLINK:
3074                 /* Only allow setting the type when the interface is down */
3075                 if (tun->dev->flags & IFF_UP) {
3076                         netif_info(tun, drv, tun->dev,
3077                                    "Linktype set failed because interface is up\n");
3078                         ret = -EBUSY;
3079                 } else {
3080                         ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
3081                                                        tun->dev);
3082                         ret = notifier_to_errno(ret);
3083                         if (ret) {
3084                                 netif_info(tun, drv, tun->dev,
3085                                            "Refused to change device type\n");
3086                                 break;
3087                         }
3088                         tun->dev->type = (int) arg;
3089                         netif_info(tun, drv, tun->dev, "linktype set to %d\n",
3090                                    tun->dev->type);
3091                         call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
3092                                                  tun->dev);
3093                 }
3094                 break;
3095
3096         case TUNSETDEBUG:
3097                 tun->msg_enable = (u32)arg;
3098                 break;
3099
3100         case TUNSETOFFLOAD:
3101                 ret = set_offload(tun, arg);
3102                 break;
3103
3104         case TUNSETTXFILTER:
3105                 /* Can be set only for TAPs */
3106                 ret = -EINVAL;
3107                 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3108                         break;
3109                 ret = update_filter(&tun->txflt, (void __user *)arg);
3110                 break;
3111
3112         case SIOCGIFHWADDR:
3113                 /* Get hw address */
3114                 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
3115                 ifr.ifr_hwaddr.sa_family = tun->dev->type;
3116                 if (copy_to_user(argp, &ifr, ifreq_len))
3117                         ret = -EFAULT;
3118                 break;
3119
3120         case SIOCSIFHWADDR:
3121                 /* Set hw address */
3122                 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr, NULL);
3123                 break;
3124
3125         case TUNGETSNDBUF:
3126                 sndbuf = tfile->socket.sk->sk_sndbuf;
3127                 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
3128                         ret = -EFAULT;
3129                 break;
3130
3131         case TUNSETSNDBUF:
3132                 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
3133                         ret = -EFAULT;
3134                         break;
3135                 }
3136                 if (sndbuf <= 0) {
3137                         ret = -EINVAL;
3138                         break;
3139                 }
3140
3141                 tun->sndbuf = sndbuf;
3142                 tun_set_sndbuf(tun);
3143                 break;
3144
3145         case TUNGETVNETHDRSZ:
3146                 vnet_hdr_sz = tun->vnet_hdr_sz;
3147                 if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
3148                         ret = -EFAULT;
3149                 break;
3150
3151         case TUNSETVNETHDRSZ:
3152                 if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
3153                         ret = -EFAULT;
3154                         break;
3155                 }
3156                 if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
3157                         ret = -EINVAL;
3158                         break;
3159                 }
3160
3161                 tun->vnet_hdr_sz = vnet_hdr_sz;
3162                 break;
3163
3164         case TUNGETVNETLE:
3165                 le = !!(tun->flags & TUN_VNET_LE);
3166                 if (put_user(le, (int __user *)argp))
3167                         ret = -EFAULT;
3168                 break;
3169
3170         case TUNSETVNETLE:
3171                 if (get_user(le, (int __user *)argp)) {
3172                         ret = -EFAULT;
3173                         break;
3174                 }
3175                 if (le)
3176                         tun->flags |= TUN_VNET_LE;
3177                 else
3178                         tun->flags &= ~TUN_VNET_LE;
3179                 break;
3180
3181         case TUNGETVNETBE:
3182                 ret = tun_get_vnet_be(tun, argp);
3183                 break;
3184
3185         case TUNSETVNETBE:
3186                 ret = tun_set_vnet_be(tun, argp);
3187                 break;
3188
3189         case TUNATTACHFILTER:
3190                 /* Can be set only for TAPs */
3191                 ret = -EINVAL;
3192                 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3193                         break;
3194                 ret = -EFAULT;
3195                 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
3196                         break;
3197
3198                 ret = tun_attach_filter(tun);
3199                 break;
3200
3201         case TUNDETACHFILTER:
3202                 /* Can be set only for TAPs */
3203                 ret = -EINVAL;
3204                 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3205                         break;
3206                 ret = 0;
3207                 tun_detach_filter(tun, tun->numqueues);
3208                 break;
3209
3210         case TUNGETFILTER:
3211                 ret = -EINVAL;
3212                 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3213                         break;
3214                 ret = -EFAULT;
3215                 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
3216                         break;
3217                 ret = 0;
3218                 break;
3219
3220         case TUNSETSTEERINGEBPF:
3221                 ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
3222                 break;
3223
3224         case TUNSETFILTEREBPF:
3225                 ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3226                 break;
3227
3228         case TUNSETCARRIER:
3229                 ret = -EFAULT;
3230                 if (copy_from_user(&carrier, argp, sizeof(carrier)))
3231                         goto unlock;
3232
3233                 ret = tun_net_change_carrier(tun->dev, (bool)carrier);
3234                 break;
3235
3236         case TUNGETDEVNETNS:
3237                 ret = -EPERM;
3238                 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3239                         goto unlock;
3240                 ret = open_related_ns(&net->ns, get_net_ns);
3241                 break;
3242
3243         default:
3244                 ret = -EINVAL;
3245                 break;
3246         }
3247
3248         if (do_notify)
3249                 netdev_state_change(tun->dev);
3250
3251 unlock:
3252         rtnl_unlock();
3253         if (tun)
3254                 tun_put(tun);
3255         return ret;
3256 }
3257
3258 static long tun_chr_ioctl(struct file *file,
3259                           unsigned int cmd, unsigned long arg)
3260 {
3261         return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
3262 }
3263
3264 #ifdef CONFIG_COMPAT
3265 static long tun_chr_compat_ioctl(struct file *file,
3266                          unsigned int cmd, unsigned long arg)
3267 {
3268         switch (cmd) {
3269         case TUNSETIFF:
3270         case TUNGETIFF:
3271         case TUNSETTXFILTER:
3272         case TUNGETSNDBUF:
3273         case TUNSETSNDBUF:
3274         case SIOCGIFHWADDR:
3275         case SIOCSIFHWADDR:
3276                 arg = (unsigned long)compat_ptr(arg);
3277                 break;
3278         default:
3279                 arg = (compat_ulong_t)arg;
3280                 break;
3281         }
3282
3283         /*
3284          * compat_ifreq is shorter than ifreq, so we must not access beyond
3285          * the end of that structure. All fields that are used in this
3286          * driver are compatible though, we don't need to convert the
3287          * contents.
3288          */
3289         return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
3290 }
3291 #endif /* CONFIG_COMPAT */
3292
3293 static int tun_chr_fasync(int fd, struct file *file, int on)
3294 {
3295         struct tun_file *tfile = file->private_data;
3296         int ret;
3297
3298         if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
3299                 goto out;
3300
3301         if (on) {
3302                 __f_setown(file, task_pid(current), PIDTYPE_TGID, 0);
3303                 tfile->flags |= TUN_FASYNC;
3304         } else
3305                 tfile->flags &= ~TUN_FASYNC;
3306         ret = 0;
3307 out:
3308         return ret;
3309 }
3310
3311 static int tun_chr_open(struct inode *inode, struct file * file)
3312 {
3313         struct net *net = current->nsproxy->net_ns;
3314         struct tun_file *tfile;
3315
3316         tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
3317                                             &tun_proto, 0);
3318         if (!tfile)
3319                 return -ENOMEM;
3320         if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
3321                 sk_free(&tfile->sk);
3322                 return -ENOMEM;
3323         }
3324
3325         mutex_init(&tfile->napi_mutex);
3326         RCU_INIT_POINTER(tfile->tun, NULL);
3327         tfile->flags = 0;
3328         tfile->ifindex = 0;
3329
3330         init_waitqueue_head(&tfile->socket.wq.wait);
3331
3332         tfile->socket.file = file;
3333         tfile->socket.ops = &tun_socket_ops;
3334
3335         sock_init_data(&tfile->socket, &tfile->sk);
3336
3337         tfile->sk.sk_write_space = tun_sock_write_space;
3338         tfile->sk.sk_sndbuf = INT_MAX;
3339
3340         file->private_data = tfile;
3341         INIT_LIST_HEAD(&tfile->next);
3342
3343         sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
3344
3345         return 0;
3346 }
3347
3348 static int tun_chr_close(struct inode *inode, struct file *file)
3349 {
3350         struct tun_file *tfile = file->private_data;
3351
3352         tun_detach(tfile, true);
3353
3354         return 0;
3355 }
3356
3357 #ifdef CONFIG_PROC_FS
3358 static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
3359 {
3360         struct tun_file *tfile = file->private_data;
3361         struct tun_struct *tun;
3362         struct ifreq ifr;
3363
3364         memset(&ifr, 0, sizeof(ifr));
3365
3366         rtnl_lock();
3367         tun = tun_get(tfile);
3368         if (tun)
3369                 tun_get_iff(tun, &ifr);
3370         rtnl_unlock();
3371
3372         if (tun)
3373                 tun_put(tun);
3374
3375         seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
3376 }
3377 #endif
3378
3379 static const struct file_operations tun_fops = {
3380         .owner  = THIS_MODULE,
3381         .llseek = no_llseek,
3382         .read_iter  = tun_chr_read_iter,
3383         .write_iter = tun_chr_write_iter,
3384         .poll   = tun_chr_poll,
3385         .unlocked_ioctl = tun_chr_ioctl,
3386 #ifdef CONFIG_COMPAT
3387         .compat_ioctl = tun_chr_compat_ioctl,
3388 #endif
3389         .open   = tun_chr_open,
3390         .release = tun_chr_close,
3391         .fasync = tun_chr_fasync,
3392 #ifdef CONFIG_PROC_FS
3393         .show_fdinfo = tun_chr_show_fdinfo,
3394 #endif
3395 };
3396
3397 static struct miscdevice tun_miscdev = {
3398         .minor = TUN_MINOR,
3399         .name = "tun",
3400         .nodename = "net/tun",
3401         .fops = &tun_fops,
3402 };
3403
3404 /* ethtool interface */
3405
3406 static void tun_default_link_ksettings(struct net_device *dev,
3407                                        struct ethtool_link_ksettings *cmd)
3408 {
3409         ethtool_link_ksettings_zero_link_mode(cmd, supported);
3410         ethtool_link_ksettings_zero_link_mode(cmd, advertising);
3411         cmd->base.speed         = SPEED_10;
3412         cmd->base.duplex        = DUPLEX_FULL;
3413         cmd->base.port          = PORT_TP;
3414         cmd->base.phy_address   = 0;
3415         cmd->base.autoneg       = AUTONEG_DISABLE;
3416 }
3417
3418 static int tun_get_link_ksettings(struct net_device *dev,
3419                                   struct ethtool_link_ksettings *cmd)
3420 {
3421         struct tun_struct *tun = netdev_priv(dev);
3422
3423         memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
3424         return 0;
3425 }
3426
3427 static int tun_set_link_ksettings(struct net_device *dev,
3428                                   const struct ethtool_link_ksettings *cmd)
3429 {
3430         struct tun_struct *tun = netdev_priv(dev);
3431
3432         memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
3433         return 0;
3434 }
3435
3436 static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3437 {
3438         struct tun_struct *tun = netdev_priv(dev);
3439
3440         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
3441         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
3442
3443         switch (tun->flags & TUN_TYPE_MASK) {
3444         case IFF_TUN:
3445                 strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
3446                 break;
3447         case IFF_TAP:
3448                 strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
3449                 break;
3450         }
3451 }
3452
3453 static u32 tun_get_msglevel(struct net_device *dev)
3454 {
3455         struct tun_struct *tun = netdev_priv(dev);
3456
3457         return tun->msg_enable;
3458 }
3459
3460 static void tun_set_msglevel(struct net_device *dev, u32 value)
3461 {
3462         struct tun_struct *tun = netdev_priv(dev);
3463
3464         tun->msg_enable = value;
3465 }
3466
3467 static int tun_get_coalesce(struct net_device *dev,
3468                             struct ethtool_coalesce *ec)
3469 {
3470         struct tun_struct *tun = netdev_priv(dev);
3471
3472         ec->rx_max_coalesced_frames = tun->rx_batched;
3473
3474         return 0;
3475 }
3476
3477 static int tun_set_coalesce(struct net_device *dev,
3478                             struct ethtool_coalesce *ec)
3479 {
3480         struct tun_struct *tun = netdev_priv(dev);
3481
3482         if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
3483                 tun->rx_batched = NAPI_POLL_WEIGHT;
3484         else
3485                 tun->rx_batched = ec->rx_max_coalesced_frames;
3486
3487         return 0;
3488 }
3489
3490 static const struct ethtool_ops tun_ethtool_ops = {
3491         .supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES,
3492         .get_drvinfo    = tun_get_drvinfo,
3493         .get_msglevel   = tun_get_msglevel,
3494         .set_msglevel   = tun_set_msglevel,
3495         .get_link       = ethtool_op_get_link,
3496         .get_ts_info    = ethtool_op_get_ts_info,
3497         .get_coalesce   = tun_get_coalesce,
3498         .set_coalesce   = tun_set_coalesce,
3499         .get_link_ksettings = tun_get_link_ksettings,
3500         .set_link_ksettings = tun_set_link_ksettings,
3501 };
3502
3503 static int tun_queue_resize(struct tun_struct *tun)
3504 {
3505         struct net_device *dev = tun->dev;
3506         struct tun_file *tfile;
3507         struct ptr_ring **rings;
3508         int n = tun->numqueues + tun->numdisabled;
3509         int ret, i;
3510
3511         rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
3512         if (!rings)
3513                 return -ENOMEM;
3514
3515         for (i = 0; i < tun->numqueues; i++) {
3516                 tfile = rtnl_dereference(tun->tfiles[i]);
3517                 rings[i] = &tfile->tx_ring;
3518         }
3519         list_for_each_entry(tfile, &tun->disabled, next)
3520                 rings[i++] = &tfile->tx_ring;
3521
3522         ret = ptr_ring_resize_multiple(rings, n,
3523                                        dev->tx_queue_len, GFP_KERNEL,
3524                                        tun_ptr_free);
3525
3526         kfree(rings);
3527         return ret;
3528 }
3529
3530 static int tun_device_event(struct notifier_block *unused,
3531                             unsigned long event, void *ptr)
3532 {
3533         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3534         struct tun_struct *tun = netdev_priv(dev);
3535         int i;
3536
3537         if (dev->rtnl_link_ops != &tun_link_ops)
3538                 return NOTIFY_DONE;
3539
3540         switch (event) {
3541         case NETDEV_CHANGE_TX_QUEUE_LEN:
3542                 if (tun_queue_resize(tun))
3543                         return NOTIFY_BAD;
3544                 break;
3545         case NETDEV_UP:
3546                 for (i = 0; i < tun->numqueues; i++) {
3547                         struct tun_file *tfile;
3548
3549                         tfile = rtnl_dereference(tun->tfiles[i]);
3550                         tfile->socket.sk->sk_write_space(tfile->socket.sk);
3551                 }
3552                 break;
3553         default:
3554                 break;
3555         }
3556
3557         return NOTIFY_DONE;
3558 }
3559
3560 static struct notifier_block tun_notifier_block __read_mostly = {
3561         .notifier_call  = tun_device_event,
3562 };
3563
3564 static int __init tun_init(void)
3565 {
3566         int ret = 0;
3567
3568         pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3569
3570         ret = rtnl_link_register(&tun_link_ops);
3571         if (ret) {
3572                 pr_err("Can't register link_ops\n");
3573                 goto err_linkops;
3574         }
3575
3576         ret = misc_register(&tun_miscdev);
3577         if (ret) {
3578                 pr_err("Can't register misc device %d\n", TUN_MINOR);
3579                 goto err_misc;
3580         }
3581
3582         ret = register_netdevice_notifier(&tun_notifier_block);
3583         if (ret) {
3584                 pr_err("Can't register netdevice notifier\n");
3585                 goto err_notifier;
3586         }
3587
3588         return  0;
3589
3590 err_notifier:
3591         misc_deregister(&tun_miscdev);
3592 err_misc:
3593         rtnl_link_unregister(&tun_link_ops);
3594 err_linkops:
3595         return ret;
3596 }
3597
3598 static void tun_cleanup(void)
3599 {
3600         misc_deregister(&tun_miscdev);
3601         rtnl_link_unregister(&tun_link_ops);
3602         unregister_netdevice_notifier(&tun_notifier_block);
3603 }
3604
3605 /* Get an underlying socket object from tun file.  Returns error unless file is
3606  * attached to a device.  The returned object works like a packet socket, it
3607  * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
3608  * holding a reference to the file for as long as the socket is in use. */
3609 struct socket *tun_get_socket(struct file *file)
3610 {
3611         struct tun_file *tfile;
3612         if (file->f_op != &tun_fops)
3613                 return ERR_PTR(-EINVAL);
3614         tfile = file->private_data;
3615         if (!tfile)
3616                 return ERR_PTR(-EBADFD);
3617         return &tfile->socket;
3618 }
3619 EXPORT_SYMBOL_GPL(tun_get_socket);
3620
3621 struct ptr_ring *tun_get_tx_ring(struct file *file)
3622 {
3623         struct tun_file *tfile;
3624
3625         if (file->f_op != &tun_fops)
3626                 return ERR_PTR(-EINVAL);
3627         tfile = file->private_data;
3628         if (!tfile)
3629                 return ERR_PTR(-EBADFD);
3630         return &tfile->tx_ring;
3631 }
3632 EXPORT_SYMBOL_GPL(tun_get_tx_ring);
3633
3634 module_init(tun_init);
3635 module_exit(tun_cleanup);
3636 MODULE_DESCRIPTION(DRV_DESCRIPTION);
3637 MODULE_AUTHOR(DRV_COPYRIGHT);
3638 MODULE_LICENSE("GPL");
3639 MODULE_ALIAS_MISCDEV(TUN_MINOR);
3640 MODULE_ALIAS("devname:net/tun");
This page took 0.238173 seconds and 4 git commands to generate.