]> Git Repo - linux.git/blob - net/core/dev.c
ip6_gre: set DSCP for non-IP
[linux.git] / net / core / dev.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *      NET3    Protocol independent device support routines.
4  *
5  *      Derived from the non IP parts of dev.c 1.0.19
6  *              Authors:        Ross Biro
7  *                              Fred N. van Kempen, <[email protected]>
8  *                              Mark Evans, <[email protected]>
9  *
10  *      Additional Authors:
11  *              Florian la Roche <[email protected]>
12  *              Alan Cox <[email protected]>
13  *              David Hinds <[email protected]>
14  *              Alexey Kuznetsov <[email protected]>
15  *              Adam Sulmicki <[email protected]>
16  *              Pekka Riikonen <[email protected]>
17  *
18  *      Changes:
19  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
20  *                                      to 2 if register_netdev gets called
21  *                                      before net_dev_init & also removed a
22  *                                      few lines of code in the process.
23  *              Alan Cox        :       device private ioctl copies fields back.
24  *              Alan Cox        :       Transmit queue code does relevant
25  *                                      stunts to keep the queue safe.
26  *              Alan Cox        :       Fixed double lock.
27  *              Alan Cox        :       Fixed promisc NULL pointer trap
28  *              ????????        :       Support the full private ioctl range
29  *              Alan Cox        :       Moved ioctl permission check into
30  *                                      drivers
31  *              Tim Kordas      :       SIOCADDMULTI/SIOCDELMULTI
32  *              Alan Cox        :       100 backlog just doesn't cut it when
33  *                                      you start doing multicast video 8)
34  *              Alan Cox        :       Rewrote net_bh and list manager.
35  *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
36  *              Alan Cox        :       Took out transmit every packet pass
37  *                                      Saved a few bytes in the ioctl handler
38  *              Alan Cox        :       Network driver sets packet type before
39  *                                      calling netif_rx. Saves a function
40  *                                      call a packet.
41  *              Alan Cox        :       Hashed net_bh()
42  *              Richard Kooijman:       Timestamp fixes.
43  *              Alan Cox        :       Wrong field in SIOCGIFDSTADDR
44  *              Alan Cox        :       Device lock protection.
45  *              Alan Cox        :       Fixed nasty side effect of device close
46  *                                      changes.
47  *              Rudi Cilibrasi  :       Pass the right thing to
48  *                                      set_mac_address()
49  *              Dave Miller     :       32bit quantity for the device lock to
50  *                                      make it work out on a Sparc.
51  *              Bjorn Ekwall    :       Added KERNELD hack.
52  *              Alan Cox        :       Cleaned up the backlog initialise.
53  *              Craig Metz      :       SIOCGIFCONF fix if space for under
54  *                                      1 device.
55  *          Thomas Bogendoerfer :       Return ENODEV for dev_open, if there
56  *                                      is no device open function.
57  *              Andi Kleen      :       Fix error reporting for SIOCGIFCONF
58  *          Michael Chastain    :       Fix signed/unsigned for SIOCGIFCONF
59  *              Cyrus Durgin    :       Cleaned for KMOD
60  *              Adam Sulmicki   :       Bug Fix : Network Device Unload
61  *                                      A network device unload needs to purge
62  *                                      the backlog queue.
63  *      Paul Rusty Russell      :       SIOCSIFNAME
64  *              Pekka Riikonen  :       Netdev boot-time settings code
65  *              Andrew Morton   :       Make unregister_netdevice wait
66  *                                      indefinitely on dev->refcnt
67  *              J Hadi Salim    :       - Backlog queue sampling
68  *                                      - netif_rx() feedback
69  */
70
71 #include <linux/uaccess.h>
72 #include <linux/bitops.h>
73 #include <linux/capability.h>
74 #include <linux/cpu.h>
75 #include <linux/types.h>
76 #include <linux/kernel.h>
77 #include <linux/hash.h>
78 #include <linux/slab.h>
79 #include <linux/sched.h>
80 #include <linux/sched/mm.h>
81 #include <linux/mutex.h>
82 #include <linux/rwsem.h>
83 #include <linux/string.h>
84 #include <linux/mm.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/errno.h>
88 #include <linux/interrupt.h>
89 #include <linux/if_ether.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/ethtool.h>
93 #include <linux/skbuff.h>
94 #include <linux/kthread.h>
95 #include <linux/bpf.h>
96 #include <linux/bpf_trace.h>
97 #include <net/net_namespace.h>
98 #include <net/sock.h>
99 #include <net/busy_poll.h>
100 #include <linux/rtnetlink.h>
101 #include <linux/stat.h>
102 #include <net/dsa.h>
103 #include <net/dst.h>
104 #include <net/dst_metadata.h>
105 #include <net/gro.h>
106 #include <net/pkt_sched.h>
107 #include <net/pkt_cls.h>
108 #include <net/checksum.h>
109 #include <net/xfrm.h>
110 #include <linux/highmem.h>
111 #include <linux/init.h>
112 #include <linux/module.h>
113 #include <linux/netpoll.h>
114 #include <linux/rcupdate.h>
115 #include <linux/delay.h>
116 #include <net/iw_handler.h>
117 #include <asm/current.h>
118 #include <linux/audit.h>
119 #include <linux/dmaengine.h>
120 #include <linux/err.h>
121 #include <linux/ctype.h>
122 #include <linux/if_arp.h>
123 #include <linux/if_vlan.h>
124 #include <linux/ip.h>
125 #include <net/ip.h>
126 #include <net/mpls.h>
127 #include <linux/ipv6.h>
128 #include <linux/in.h>
129 #include <linux/jhash.h>
130 #include <linux/random.h>
131 #include <trace/events/napi.h>
132 #include <trace/events/net.h>
133 #include <trace/events/skb.h>
134 #include <trace/events/qdisc.h>
135 #include <linux/inetdevice.h>
136 #include <linux/cpu_rmap.h>
137 #include <linux/static_key.h>
138 #include <linux/hashtable.h>
139 #include <linux/vmalloc.h>
140 #include <linux/if_macvlan.h>
141 #include <linux/errqueue.h>
142 #include <linux/hrtimer.h>
143 #include <linux/netfilter_netdev.h>
144 #include <linux/crash_dump.h>
145 #include <linux/sctp.h>
146 #include <net/udp_tunnel.h>
147 #include <linux/net_namespace.h>
148 #include <linux/indirect_call_wrapper.h>
149 #include <net/devlink.h>
150 #include <linux/pm_runtime.h>
151 #include <linux/prandom.h>
152 #include <linux/once_lite.h>
153
154 #include "dev.h"
155 #include "net-sysfs.h"
156
157
158 static DEFINE_SPINLOCK(ptype_lock);
159 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
160 struct list_head ptype_all __read_mostly;       /* Taps */
161
162 static int netif_rx_internal(struct sk_buff *skb);
163 static int call_netdevice_notifiers_info(unsigned long val,
164                                          struct netdev_notifier_info *info);
165 static int call_netdevice_notifiers_extack(unsigned long val,
166                                            struct net_device *dev,
167                                            struct netlink_ext_ack *extack);
168 static struct napi_struct *napi_by_id(unsigned int napi_id);
169
170 /*
171  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
172  * semaphore.
173  *
174  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
175  *
176  * Writers must hold the rtnl semaphore while they loop through the
177  * dev_base_head list, and hold dev_base_lock for writing when they do the
178  * actual updates.  This allows pure readers to access the list even
179  * while a writer is preparing to update it.
180  *
181  * To put it another way, dev_base_lock is held for writing only to
182  * protect against pure readers; the rtnl semaphore provides the
183  * protection against other writers.
184  *
185  * See, for example usages, register_netdevice() and
186  * unregister_netdevice(), which must be called with the rtnl
187  * semaphore held.
188  */
189 DEFINE_RWLOCK(dev_base_lock);
190 EXPORT_SYMBOL(dev_base_lock);
191
192 static DEFINE_MUTEX(ifalias_mutex);
193
194 /* protects napi_hash addition/deletion and napi_gen_id */
195 static DEFINE_SPINLOCK(napi_hash_lock);
196
197 static unsigned int napi_gen_id = NR_CPUS;
198 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
199
200 static DECLARE_RWSEM(devnet_rename_sem);
201
202 static inline void dev_base_seq_inc(struct net *net)
203 {
204         while (++net->dev_base_seq == 0)
205                 ;
206 }
207
208 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
209 {
210         unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
211
212         return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
213 }
214
215 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
216 {
217         return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
218 }
219
220 static inline void rps_lock_irqsave(struct softnet_data *sd,
221                                     unsigned long *flags)
222 {
223         if (IS_ENABLED(CONFIG_RPS))
224                 spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
225         else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
226                 local_irq_save(*flags);
227 }
228
229 static inline void rps_lock_irq_disable(struct softnet_data *sd)
230 {
231         if (IS_ENABLED(CONFIG_RPS))
232                 spin_lock_irq(&sd->input_pkt_queue.lock);
233         else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
234                 local_irq_disable();
235 }
236
237 static inline void rps_unlock_irq_restore(struct softnet_data *sd,
238                                           unsigned long *flags)
239 {
240         if (IS_ENABLED(CONFIG_RPS))
241                 spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
242         else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
243                 local_irq_restore(*flags);
244 }
245
246 static inline void rps_unlock_irq_enable(struct softnet_data *sd)
247 {
248         if (IS_ENABLED(CONFIG_RPS))
249                 spin_unlock_irq(&sd->input_pkt_queue.lock);
250         else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
251                 local_irq_enable();
252 }
253
254 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
255                                                        const char *name)
256 {
257         struct netdev_name_node *name_node;
258
259         name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
260         if (!name_node)
261                 return NULL;
262         INIT_HLIST_NODE(&name_node->hlist);
263         name_node->dev = dev;
264         name_node->name = name;
265         return name_node;
266 }
267
268 static struct netdev_name_node *
269 netdev_name_node_head_alloc(struct net_device *dev)
270 {
271         struct netdev_name_node *name_node;
272
273         name_node = netdev_name_node_alloc(dev, dev->name);
274         if (!name_node)
275                 return NULL;
276         INIT_LIST_HEAD(&name_node->list);
277         return name_node;
278 }
279
280 static void netdev_name_node_free(struct netdev_name_node *name_node)
281 {
282         kfree(name_node);
283 }
284
285 static void netdev_name_node_add(struct net *net,
286                                  struct netdev_name_node *name_node)
287 {
288         hlist_add_head_rcu(&name_node->hlist,
289                            dev_name_hash(net, name_node->name));
290 }
291
292 static void netdev_name_node_del(struct netdev_name_node *name_node)
293 {
294         hlist_del_rcu(&name_node->hlist);
295 }
296
297 static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
298                                                         const char *name)
299 {
300         struct hlist_head *head = dev_name_hash(net, name);
301         struct netdev_name_node *name_node;
302
303         hlist_for_each_entry(name_node, head, hlist)
304                 if (!strcmp(name_node->name, name))
305                         return name_node;
306         return NULL;
307 }
308
309 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
310                                                             const char *name)
311 {
312         struct hlist_head *head = dev_name_hash(net, name);
313         struct netdev_name_node *name_node;
314
315         hlist_for_each_entry_rcu(name_node, head, hlist)
316                 if (!strcmp(name_node->name, name))
317                         return name_node;
318         return NULL;
319 }
320
321 bool netdev_name_in_use(struct net *net, const char *name)
322 {
323         return netdev_name_node_lookup(net, name);
324 }
325 EXPORT_SYMBOL(netdev_name_in_use);
326
327 int netdev_name_node_alt_create(struct net_device *dev, const char *name)
328 {
329         struct netdev_name_node *name_node;
330         struct net *net = dev_net(dev);
331
332         name_node = netdev_name_node_lookup(net, name);
333         if (name_node)
334                 return -EEXIST;
335         name_node = netdev_name_node_alloc(dev, name);
336         if (!name_node)
337                 return -ENOMEM;
338         netdev_name_node_add(net, name_node);
339         /* The node that holds dev->name acts as a head of per-device list. */
340         list_add_tail(&name_node->list, &dev->name_node->list);
341
342         return 0;
343 }
344
345 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
346 {
347         list_del(&name_node->list);
348         netdev_name_node_del(name_node);
349         kfree(name_node->name);
350         netdev_name_node_free(name_node);
351 }
352
353 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
354 {
355         struct netdev_name_node *name_node;
356         struct net *net = dev_net(dev);
357
358         name_node = netdev_name_node_lookup(net, name);
359         if (!name_node)
360                 return -ENOENT;
361         /* lookup might have found our primary name or a name belonging
362          * to another device.
363          */
364         if (name_node == dev->name_node || name_node->dev != dev)
365                 return -EINVAL;
366
367         __netdev_name_node_alt_destroy(name_node);
368
369         return 0;
370 }
371
372 static void netdev_name_node_alt_flush(struct net_device *dev)
373 {
374         struct netdev_name_node *name_node, *tmp;
375
376         list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
377                 __netdev_name_node_alt_destroy(name_node);
378 }
379
380 /* Device list insertion */
381 static void list_netdevice(struct net_device *dev)
382 {
383         struct net *net = dev_net(dev);
384
385         ASSERT_RTNL();
386
387         write_lock(&dev_base_lock);
388         list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
389         netdev_name_node_add(net, dev->name_node);
390         hlist_add_head_rcu(&dev->index_hlist,
391                            dev_index_hash(net, dev->ifindex));
392         write_unlock(&dev_base_lock);
393
394         dev_base_seq_inc(net);
395 }
396
397 /* Device list removal
398  * caller must respect a RCU grace period before freeing/reusing dev
399  */
400 static void unlist_netdevice(struct net_device *dev, bool lock)
401 {
402         ASSERT_RTNL();
403
404         /* Unlink dev from the device chain */
405         if (lock)
406                 write_lock(&dev_base_lock);
407         list_del_rcu(&dev->dev_list);
408         netdev_name_node_del(dev->name_node);
409         hlist_del_rcu(&dev->index_hlist);
410         if (lock)
411                 write_unlock(&dev_base_lock);
412
413         dev_base_seq_inc(dev_net(dev));
414 }
415
416 /*
417  *      Our notifier list
418  */
419
420 static RAW_NOTIFIER_HEAD(netdev_chain);
421
422 /*
423  *      Device drivers call our routines to queue packets here. We empty the
424  *      queue in the local softnet handler.
425  */
426
427 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
428 EXPORT_PER_CPU_SYMBOL(softnet_data);
429
430 #ifdef CONFIG_LOCKDEP
431 /*
432  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
433  * according to dev->type
434  */
435 static const unsigned short netdev_lock_type[] = {
436          ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
437          ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
438          ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
439          ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
440          ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
441          ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
442          ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
443          ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
444          ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
445          ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
446          ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
447          ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
448          ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
449          ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
450          ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
451
452 static const char *const netdev_lock_name[] = {
453         "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
454         "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
455         "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
456         "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
457         "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
458         "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
459         "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
460         "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
461         "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
462         "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
463         "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
464         "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
465         "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
466         "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
467         "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
468
469 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
470 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
471
472 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
473 {
474         int i;
475
476         for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
477                 if (netdev_lock_type[i] == dev_type)
478                         return i;
479         /* the last key is used by default */
480         return ARRAY_SIZE(netdev_lock_type) - 1;
481 }
482
483 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
484                                                  unsigned short dev_type)
485 {
486         int i;
487
488         i = netdev_lock_pos(dev_type);
489         lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
490                                    netdev_lock_name[i]);
491 }
492
493 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
494 {
495         int i;
496
497         i = netdev_lock_pos(dev->type);
498         lockdep_set_class_and_name(&dev->addr_list_lock,
499                                    &netdev_addr_lock_key[i],
500                                    netdev_lock_name[i]);
501 }
502 #else
503 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
504                                                  unsigned short dev_type)
505 {
506 }
507
508 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
509 {
510 }
511 #endif
512
513 /*******************************************************************************
514  *
515  *              Protocol management and registration routines
516  *
517  *******************************************************************************/
518
519
520 /*
521  *      Add a protocol ID to the list. Now that the input handler is
522  *      smarter we can dispense with all the messy stuff that used to be
523  *      here.
524  *
525  *      BEWARE!!! Protocol handlers, mangling input packets,
526  *      MUST BE last in hash buckets and checking protocol handlers
527  *      MUST start from promiscuous ptype_all chain in net_bh.
528  *      It is true now, do not change it.
529  *      Explanation follows: if protocol handler, mangling packet, will
530  *      be the first on list, it is not able to sense, that packet
531  *      is cloned and should be copied-on-write, so that it will
532  *      change it and subsequent readers will get broken packet.
533  *                                                      --ANK (980803)
534  */
535
536 static inline struct list_head *ptype_head(const struct packet_type *pt)
537 {
538         if (pt->type == htons(ETH_P_ALL))
539                 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
540         else
541                 return pt->dev ? &pt->dev->ptype_specific :
542                                  &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
543 }
544
545 /**
546  *      dev_add_pack - add packet handler
547  *      @pt: packet type declaration
548  *
549  *      Add a protocol handler to the networking stack. The passed &packet_type
550  *      is linked into kernel lists and may not be freed until it has been
551  *      removed from the kernel lists.
552  *
553  *      This call does not sleep therefore it can not
554  *      guarantee all CPU's that are in middle of receiving packets
555  *      will see the new packet type (until the next received packet).
556  */
557
558 void dev_add_pack(struct packet_type *pt)
559 {
560         struct list_head *head = ptype_head(pt);
561
562         spin_lock(&ptype_lock);
563         list_add_rcu(&pt->list, head);
564         spin_unlock(&ptype_lock);
565 }
566 EXPORT_SYMBOL(dev_add_pack);
567
568 /**
569  *      __dev_remove_pack        - remove packet handler
570  *      @pt: packet type declaration
571  *
572  *      Remove a protocol handler that was previously added to the kernel
573  *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
574  *      from the kernel lists and can be freed or reused once this function
575  *      returns.
576  *
577  *      The packet type might still be in use by receivers
578  *      and must not be freed until after all the CPU's have gone
579  *      through a quiescent state.
580  */
581 void __dev_remove_pack(struct packet_type *pt)
582 {
583         struct list_head *head = ptype_head(pt);
584         struct packet_type *pt1;
585
586         spin_lock(&ptype_lock);
587
588         list_for_each_entry(pt1, head, list) {
589                 if (pt == pt1) {
590                         list_del_rcu(&pt->list);
591                         goto out;
592                 }
593         }
594
595         pr_warn("dev_remove_pack: %p not found\n", pt);
596 out:
597         spin_unlock(&ptype_lock);
598 }
599 EXPORT_SYMBOL(__dev_remove_pack);
600
601 /**
602  *      dev_remove_pack  - remove packet handler
603  *      @pt: packet type declaration
604  *
605  *      Remove a protocol handler that was previously added to the kernel
606  *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
607  *      from the kernel lists and can be freed or reused once this function
608  *      returns.
609  *
610  *      This call sleeps to guarantee that no CPU is looking at the packet
611  *      type after return.
612  */
613 void dev_remove_pack(struct packet_type *pt)
614 {
615         __dev_remove_pack(pt);
616
617         synchronize_net();
618 }
619 EXPORT_SYMBOL(dev_remove_pack);
620
621
622 /*******************************************************************************
623  *
624  *                          Device Interface Subroutines
625  *
626  *******************************************************************************/
627
628 /**
629  *      dev_get_iflink  - get 'iflink' value of a interface
630  *      @dev: targeted interface
631  *
632  *      Indicates the ifindex the interface is linked to.
633  *      Physical interfaces have the same 'ifindex' and 'iflink' values.
634  */
635
636 int dev_get_iflink(const struct net_device *dev)
637 {
638         if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
639                 return dev->netdev_ops->ndo_get_iflink(dev);
640
641         return dev->ifindex;
642 }
643 EXPORT_SYMBOL(dev_get_iflink);
644
645 /**
646  *      dev_fill_metadata_dst - Retrieve tunnel egress information.
647  *      @dev: targeted interface
648  *      @skb: The packet.
649  *
650  *      For better visibility of tunnel traffic OVS needs to retrieve
651  *      egress tunnel information for a packet. Following API allows
652  *      user to get this info.
653  */
654 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
655 {
656         struct ip_tunnel_info *info;
657
658         if (!dev->netdev_ops  || !dev->netdev_ops->ndo_fill_metadata_dst)
659                 return -EINVAL;
660
661         info = skb_tunnel_info_unclone(skb);
662         if (!info)
663                 return -ENOMEM;
664         if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
665                 return -EINVAL;
666
667         return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
668 }
669 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
670
671 static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack)
672 {
673         int k = stack->num_paths++;
674
675         if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX))
676                 return NULL;
677
678         return &stack->path[k];
679 }
680
681 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
682                           struct net_device_path_stack *stack)
683 {
684         const struct net_device *last_dev;
685         struct net_device_path_ctx ctx = {
686                 .dev    = dev,
687         };
688         struct net_device_path *path;
689         int ret = 0;
690
691         memcpy(ctx.daddr, daddr, sizeof(ctx.daddr));
692         stack->num_paths = 0;
693         while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
694                 last_dev = ctx.dev;
695                 path = dev_fwd_path(stack);
696                 if (!path)
697                         return -1;
698
699                 memset(path, 0, sizeof(struct net_device_path));
700                 ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path);
701                 if (ret < 0)
702                         return -1;
703
704                 if (WARN_ON_ONCE(last_dev == ctx.dev))
705                         return -1;
706         }
707
708         if (!ctx.dev)
709                 return ret;
710
711         path = dev_fwd_path(stack);
712         if (!path)
713                 return -1;
714         path->type = DEV_PATH_ETHERNET;
715         path->dev = ctx.dev;
716
717         return ret;
718 }
719 EXPORT_SYMBOL_GPL(dev_fill_forward_path);
720
721 /**
722  *      __dev_get_by_name       - find a device by its name
723  *      @net: the applicable net namespace
724  *      @name: name to find
725  *
726  *      Find an interface by name. Must be called under RTNL semaphore
727  *      or @dev_base_lock. If the name is found a pointer to the device
728  *      is returned. If the name is not found then %NULL is returned. The
729  *      reference counters are not incremented so the caller must be
730  *      careful with locks.
731  */
732
733 struct net_device *__dev_get_by_name(struct net *net, const char *name)
734 {
735         struct netdev_name_node *node_name;
736
737         node_name = netdev_name_node_lookup(net, name);
738         return node_name ? node_name->dev : NULL;
739 }
740 EXPORT_SYMBOL(__dev_get_by_name);
741
742 /**
743  * dev_get_by_name_rcu  - find a device by its name
744  * @net: the applicable net namespace
745  * @name: name to find
746  *
747  * Find an interface by name.
748  * If the name is found a pointer to the device is returned.
749  * If the name is not found then %NULL is returned.
750  * The reference counters are not incremented so the caller must be
751  * careful with locks. The caller must hold RCU lock.
752  */
753
754 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
755 {
756         struct netdev_name_node *node_name;
757
758         node_name = netdev_name_node_lookup_rcu(net, name);
759         return node_name ? node_name->dev : NULL;
760 }
761 EXPORT_SYMBOL(dev_get_by_name_rcu);
762
763 /**
764  *      dev_get_by_name         - find a device by its name
765  *      @net: the applicable net namespace
766  *      @name: name to find
767  *
768  *      Find an interface by name. This can be called from any
769  *      context and does its own locking. The returned handle has
770  *      the usage count incremented and the caller must use dev_put() to
771  *      release it when it is no longer needed. %NULL is returned if no
772  *      matching device is found.
773  */
774
775 struct net_device *dev_get_by_name(struct net *net, const char *name)
776 {
777         struct net_device *dev;
778
779         rcu_read_lock();
780         dev = dev_get_by_name_rcu(net, name);
781         dev_hold(dev);
782         rcu_read_unlock();
783         return dev;
784 }
785 EXPORT_SYMBOL(dev_get_by_name);
786
787 /**
788  *      __dev_get_by_index - find a device by its ifindex
789  *      @net: the applicable net namespace
790  *      @ifindex: index of device
791  *
792  *      Search for an interface by index. Returns %NULL if the device
793  *      is not found or a pointer to the device. The device has not
794  *      had its reference counter increased so the caller must be careful
795  *      about locking. The caller must hold either the RTNL semaphore
796  *      or @dev_base_lock.
797  */
798
799 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
800 {
801         struct net_device *dev;
802         struct hlist_head *head = dev_index_hash(net, ifindex);
803
804         hlist_for_each_entry(dev, head, index_hlist)
805                 if (dev->ifindex == ifindex)
806                         return dev;
807
808         return NULL;
809 }
810 EXPORT_SYMBOL(__dev_get_by_index);
811
812 /**
813  *      dev_get_by_index_rcu - find a device by its ifindex
814  *      @net: the applicable net namespace
815  *      @ifindex: index of device
816  *
817  *      Search for an interface by index. Returns %NULL if the device
818  *      is not found or a pointer to the device. The device has not
819  *      had its reference counter increased so the caller must be careful
820  *      about locking. The caller must hold RCU lock.
821  */
822
823 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
824 {
825         struct net_device *dev;
826         struct hlist_head *head = dev_index_hash(net, ifindex);
827
828         hlist_for_each_entry_rcu(dev, head, index_hlist)
829                 if (dev->ifindex == ifindex)
830                         return dev;
831
832         return NULL;
833 }
834 EXPORT_SYMBOL(dev_get_by_index_rcu);
835
836
837 /**
838  *      dev_get_by_index - find a device by its ifindex
839  *      @net: the applicable net namespace
840  *      @ifindex: index of device
841  *
842  *      Search for an interface by index. Returns NULL if the device
843  *      is not found or a pointer to the device. The device returned has
844  *      had a reference added and the pointer is safe until the user calls
845  *      dev_put to indicate they have finished with it.
846  */
847
848 struct net_device *dev_get_by_index(struct net *net, int ifindex)
849 {
850         struct net_device *dev;
851
852         rcu_read_lock();
853         dev = dev_get_by_index_rcu(net, ifindex);
854         dev_hold(dev);
855         rcu_read_unlock();
856         return dev;
857 }
858 EXPORT_SYMBOL(dev_get_by_index);
859
860 /**
861  *      dev_get_by_napi_id - find a device by napi_id
862  *      @napi_id: ID of the NAPI struct
863  *
864  *      Search for an interface by NAPI ID. Returns %NULL if the device
865  *      is not found or a pointer to the device. The device has not had
866  *      its reference counter increased so the caller must be careful
867  *      about locking. The caller must hold RCU lock.
868  */
869
870 struct net_device *dev_get_by_napi_id(unsigned int napi_id)
871 {
872         struct napi_struct *napi;
873
874         WARN_ON_ONCE(!rcu_read_lock_held());
875
876         if (napi_id < MIN_NAPI_ID)
877                 return NULL;
878
879         napi = napi_by_id(napi_id);
880
881         return napi ? napi->dev : NULL;
882 }
883 EXPORT_SYMBOL(dev_get_by_napi_id);
884
885 /**
886  *      netdev_get_name - get a netdevice name, knowing its ifindex.
887  *      @net: network namespace
888  *      @name: a pointer to the buffer where the name will be stored.
889  *      @ifindex: the ifindex of the interface to get the name from.
890  */
891 int netdev_get_name(struct net *net, char *name, int ifindex)
892 {
893         struct net_device *dev;
894         int ret;
895
896         down_read(&devnet_rename_sem);
897         rcu_read_lock();
898
899         dev = dev_get_by_index_rcu(net, ifindex);
900         if (!dev) {
901                 ret = -ENODEV;
902                 goto out;
903         }
904
905         strcpy(name, dev->name);
906
907         ret = 0;
908 out:
909         rcu_read_unlock();
910         up_read(&devnet_rename_sem);
911         return ret;
912 }
913
914 /**
915  *      dev_getbyhwaddr_rcu - find a device by its hardware address
916  *      @net: the applicable net namespace
917  *      @type: media type of device
918  *      @ha: hardware address
919  *
920  *      Search for an interface by MAC address. Returns NULL if the device
921  *      is not found or a pointer to the device.
922  *      The caller must hold RCU or RTNL.
923  *      The returned device has not had its ref count increased
924  *      and the caller must therefore be careful about locking
925  *
926  */
927
928 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
929                                        const char *ha)
930 {
931         struct net_device *dev;
932
933         for_each_netdev_rcu(net, dev)
934                 if (dev->type == type &&
935                     !memcmp(dev->dev_addr, ha, dev->addr_len))
936                         return dev;
937
938         return NULL;
939 }
940 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
941
942 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
943 {
944         struct net_device *dev, *ret = NULL;
945
946         rcu_read_lock();
947         for_each_netdev_rcu(net, dev)
948                 if (dev->type == type) {
949                         dev_hold(dev);
950                         ret = dev;
951                         break;
952                 }
953         rcu_read_unlock();
954         return ret;
955 }
956 EXPORT_SYMBOL(dev_getfirstbyhwtype);
957
958 /**
959  *      __dev_get_by_flags - find any device with given flags
960  *      @net: the applicable net namespace
961  *      @if_flags: IFF_* values
962  *      @mask: bitmask of bits in if_flags to check
963  *
964  *      Search for any interface with the given flags. Returns NULL if a device
965  *      is not found or a pointer to the device. Must be called inside
966  *      rtnl_lock(), and result refcount is unchanged.
967  */
968
969 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
970                                       unsigned short mask)
971 {
972         struct net_device *dev, *ret;
973
974         ASSERT_RTNL();
975
976         ret = NULL;
977         for_each_netdev(net, dev) {
978                 if (((dev->flags ^ if_flags) & mask) == 0) {
979                         ret = dev;
980                         break;
981                 }
982         }
983         return ret;
984 }
985 EXPORT_SYMBOL(__dev_get_by_flags);
986
987 /**
988  *      dev_valid_name - check if name is okay for network device
989  *      @name: name string
990  *
991  *      Network device names need to be valid file names to
992  *      allow sysfs to work.  We also disallow any kind of
993  *      whitespace.
994  */
995 bool dev_valid_name(const char *name)
996 {
997         if (*name == '\0')
998                 return false;
999         if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
1000                 return false;
1001         if (!strcmp(name, ".") || !strcmp(name, ".."))
1002                 return false;
1003
1004         while (*name) {
1005                 if (*name == '/' || *name == ':' || isspace(*name))
1006                         return false;
1007                 name++;
1008         }
1009         return true;
1010 }
1011 EXPORT_SYMBOL(dev_valid_name);
1012
1013 /**
1014  *      __dev_alloc_name - allocate a name for a device
1015  *      @net: network namespace to allocate the device name in
1016  *      @name: name format string
1017  *      @buf:  scratch buffer and result name string
1018  *
1019  *      Passed a format string - eg "lt%d" it will try and find a suitable
1020  *      id. It scans list of devices to build up a free map, then chooses
1021  *      the first empty slot. The caller must hold the dev_base or rtnl lock
1022  *      while allocating the name and adding the device in order to avoid
1023  *      duplicates.
1024  *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1025  *      Returns the number of the unit assigned or a negative errno code.
1026  */
1027
1028 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1029 {
1030         int i = 0;
1031         const char *p;
1032         const int max_netdevices = 8*PAGE_SIZE;
1033         unsigned long *inuse;
1034         struct net_device *d;
1035
1036         if (!dev_valid_name(name))
1037                 return -EINVAL;
1038
1039         p = strchr(name, '%');
1040         if (p) {
1041                 /*
1042                  * Verify the string as this thing may have come from
1043                  * the user.  There must be either one "%d" and no other "%"
1044                  * characters.
1045                  */
1046                 if (p[1] != 'd' || strchr(p + 2, '%'))
1047                         return -EINVAL;
1048
1049                 /* Use one page as a bit array of possible slots */
1050                 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1051                 if (!inuse)
1052                         return -ENOMEM;
1053
1054                 for_each_netdev(net, d) {
1055                         struct netdev_name_node *name_node;
1056                         list_for_each_entry(name_node, &d->name_node->list, list) {
1057                                 if (!sscanf(name_node->name, name, &i))
1058                                         continue;
1059                                 if (i < 0 || i >= max_netdevices)
1060                                         continue;
1061
1062                                 /*  avoid cases where sscanf is not exact inverse of printf */
1063                                 snprintf(buf, IFNAMSIZ, name, i);
1064                                 if (!strncmp(buf, name_node->name, IFNAMSIZ))
1065                                         __set_bit(i, inuse);
1066                         }
1067                         if (!sscanf(d->name, name, &i))
1068                                 continue;
1069                         if (i < 0 || i >= max_netdevices)
1070                                 continue;
1071
1072                         /*  avoid cases where sscanf is not exact inverse of printf */
1073                         snprintf(buf, IFNAMSIZ, name, i);
1074                         if (!strncmp(buf, d->name, IFNAMSIZ))
1075                                 __set_bit(i, inuse);
1076                 }
1077
1078                 i = find_first_zero_bit(inuse, max_netdevices);
1079                 free_page((unsigned long) inuse);
1080         }
1081
1082         snprintf(buf, IFNAMSIZ, name, i);
1083         if (!netdev_name_in_use(net, buf))
1084                 return i;
1085
1086         /* It is possible to run out of possible slots
1087          * when the name is long and there isn't enough space left
1088          * for the digits, or if all bits are used.
1089          */
1090         return -ENFILE;
1091 }
1092
1093 static int dev_alloc_name_ns(struct net *net,
1094                              struct net_device *dev,
1095                              const char *name)
1096 {
1097         char buf[IFNAMSIZ];
1098         int ret;
1099
1100         BUG_ON(!net);
1101         ret = __dev_alloc_name(net, name, buf);
1102         if (ret >= 0)
1103                 strlcpy(dev->name, buf, IFNAMSIZ);
1104         return ret;
1105 }
1106
1107 /**
1108  *      dev_alloc_name - allocate a name for a device
1109  *      @dev: device
1110  *      @name: name format string
1111  *
1112  *      Passed a format string - eg "lt%d" it will try and find a suitable
1113  *      id. It scans list of devices to build up a free map, then chooses
1114  *      the first empty slot. The caller must hold the dev_base or rtnl lock
1115  *      while allocating the name and adding the device in order to avoid
1116  *      duplicates.
1117  *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1118  *      Returns the number of the unit assigned or a negative errno code.
1119  */
1120
1121 int dev_alloc_name(struct net_device *dev, const char *name)
1122 {
1123         return dev_alloc_name_ns(dev_net(dev), dev, name);
1124 }
1125 EXPORT_SYMBOL(dev_alloc_name);
1126
1127 static int dev_get_valid_name(struct net *net, struct net_device *dev,
1128                               const char *name)
1129 {
1130         BUG_ON(!net);
1131
1132         if (!dev_valid_name(name))
1133                 return -EINVAL;
1134
1135         if (strchr(name, '%'))
1136                 return dev_alloc_name_ns(net, dev, name);
1137         else if (netdev_name_in_use(net, name))
1138                 return -EEXIST;
1139         else if (dev->name != name)
1140                 strlcpy(dev->name, name, IFNAMSIZ);
1141
1142         return 0;
1143 }
1144
1145 /**
1146  *      dev_change_name - change name of a device
1147  *      @dev: device
1148  *      @newname: name (or format string) must be at least IFNAMSIZ
1149  *
1150  *      Change name of a device, can pass format strings "eth%d".
1151  *      for wildcarding.
1152  */
1153 int dev_change_name(struct net_device *dev, const char *newname)
1154 {
1155         unsigned char old_assign_type;
1156         char oldname[IFNAMSIZ];
1157         int err = 0;
1158         int ret;
1159         struct net *net;
1160
1161         ASSERT_RTNL();
1162         BUG_ON(!dev_net(dev));
1163
1164         net = dev_net(dev);
1165
1166         /* Some auto-enslaved devices e.g. failover slaves are
1167          * special, as userspace might rename the device after
1168          * the interface had been brought up and running since
1169          * the point kernel initiated auto-enslavement. Allow
1170          * live name change even when these slave devices are
1171          * up and running.
1172          *
1173          * Typically, users of these auto-enslaving devices
1174          * don't actually care about slave name change, as
1175          * they are supposed to operate on master interface
1176          * directly.
1177          */
1178         if (dev->flags & IFF_UP &&
1179             likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
1180                 return -EBUSY;
1181
1182         down_write(&devnet_rename_sem);
1183
1184         if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1185                 up_write(&devnet_rename_sem);
1186                 return 0;
1187         }
1188
1189         memcpy(oldname, dev->name, IFNAMSIZ);
1190
1191         err = dev_get_valid_name(net, dev, newname);
1192         if (err < 0) {
1193                 up_write(&devnet_rename_sem);
1194                 return err;
1195         }
1196
1197         if (oldname[0] && !strchr(oldname, '%'))
1198                 netdev_info(dev, "renamed from %s\n", oldname);
1199
1200         old_assign_type = dev->name_assign_type;
1201         dev->name_assign_type = NET_NAME_RENAMED;
1202
1203 rollback:
1204         ret = device_rename(&dev->dev, dev->name);
1205         if (ret) {
1206                 memcpy(dev->name, oldname, IFNAMSIZ);
1207                 dev->name_assign_type = old_assign_type;
1208                 up_write(&devnet_rename_sem);
1209                 return ret;
1210         }
1211
1212         up_write(&devnet_rename_sem);
1213
1214         netdev_adjacent_rename_links(dev, oldname);
1215
1216         write_lock(&dev_base_lock);
1217         netdev_name_node_del(dev->name_node);
1218         write_unlock(&dev_base_lock);
1219
1220         synchronize_rcu();
1221
1222         write_lock(&dev_base_lock);
1223         netdev_name_node_add(net, dev->name_node);
1224         write_unlock(&dev_base_lock);
1225
1226         ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1227         ret = notifier_to_errno(ret);
1228
1229         if (ret) {
1230                 /* err >= 0 after dev_alloc_name() or stores the first errno */
1231                 if (err >= 0) {
1232                         err = ret;
1233                         down_write(&devnet_rename_sem);
1234                         memcpy(dev->name, oldname, IFNAMSIZ);
1235                         memcpy(oldname, newname, IFNAMSIZ);
1236                         dev->name_assign_type = old_assign_type;
1237                         old_assign_type = NET_NAME_RENAMED;
1238                         goto rollback;
1239                 } else {
1240                         netdev_err(dev, "name change rollback failed: %d\n",
1241                                    ret);
1242                 }
1243         }
1244
1245         return err;
1246 }
1247
1248 /**
1249  *      dev_set_alias - change ifalias of a device
1250  *      @dev: device
1251  *      @alias: name up to IFALIASZ
1252  *      @len: limit of bytes to copy from info
1253  *
1254  *      Set ifalias for a device,
1255  */
1256 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1257 {
1258         struct dev_ifalias *new_alias = NULL;
1259
1260         if (len >= IFALIASZ)
1261                 return -EINVAL;
1262
1263         if (len) {
1264                 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1265                 if (!new_alias)
1266                         return -ENOMEM;
1267
1268                 memcpy(new_alias->ifalias, alias, len);
1269                 new_alias->ifalias[len] = 0;
1270         }
1271
1272         mutex_lock(&ifalias_mutex);
1273         new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
1274                                         mutex_is_locked(&ifalias_mutex));
1275         mutex_unlock(&ifalias_mutex);
1276
1277         if (new_alias)
1278                 kfree_rcu(new_alias, rcuhead);
1279
1280         return len;
1281 }
1282 EXPORT_SYMBOL(dev_set_alias);
1283
1284 /**
1285  *      dev_get_alias - get ifalias of a device
1286  *      @dev: device
1287  *      @name: buffer to store name of ifalias
1288  *      @len: size of buffer
1289  *
1290  *      get ifalias for a device.  Caller must make sure dev cannot go
1291  *      away,  e.g. rcu read lock or own a reference count to device.
1292  */
1293 int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1294 {
1295         const struct dev_ifalias *alias;
1296         int ret = 0;
1297
1298         rcu_read_lock();
1299         alias = rcu_dereference(dev->ifalias);
1300         if (alias)
1301                 ret = snprintf(name, len, "%s", alias->ifalias);
1302         rcu_read_unlock();
1303
1304         return ret;
1305 }
1306
1307 /**
1308  *      netdev_features_change - device changes features
1309  *      @dev: device to cause notification
1310  *
1311  *      Called to indicate a device has changed features.
1312  */
1313 void netdev_features_change(struct net_device *dev)
1314 {
1315         call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1316 }
1317 EXPORT_SYMBOL(netdev_features_change);
1318
1319 /**
1320  *      netdev_state_change - device changes state
1321  *      @dev: device to cause notification
1322  *
1323  *      Called to indicate a device has changed state. This function calls
1324  *      the notifier chains for netdev_chain and sends a NEWLINK message
1325  *      to the routing socket.
1326  */
1327 void netdev_state_change(struct net_device *dev)
1328 {
1329         if (dev->flags & IFF_UP) {
1330                 struct netdev_notifier_change_info change_info = {
1331                         .info.dev = dev,
1332                 };
1333
1334                 call_netdevice_notifiers_info(NETDEV_CHANGE,
1335                                               &change_info.info);
1336                 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1337         }
1338 }
1339 EXPORT_SYMBOL(netdev_state_change);
1340
1341 /**
1342  * __netdev_notify_peers - notify network peers about existence of @dev,
1343  * to be called when rtnl lock is already held.
1344  * @dev: network device
1345  *
1346  * Generate traffic such that interested network peers are aware of
1347  * @dev, such as by generating a gratuitous ARP. This may be used when
1348  * a device wants to inform the rest of the network about some sort of
1349  * reconfiguration such as a failover event or virtual machine
1350  * migration.
1351  */
1352 void __netdev_notify_peers(struct net_device *dev)
1353 {
1354         ASSERT_RTNL();
1355         call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1356         call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1357 }
1358 EXPORT_SYMBOL(__netdev_notify_peers);
1359
1360 /**
1361  * netdev_notify_peers - notify network peers about existence of @dev
1362  * @dev: network device
1363  *
1364  * Generate traffic such that interested network peers are aware of
1365  * @dev, such as by generating a gratuitous ARP. This may be used when
1366  * a device wants to inform the rest of the network about some sort of
1367  * reconfiguration such as a failover event or virtual machine
1368  * migration.
1369  */
1370 void netdev_notify_peers(struct net_device *dev)
1371 {
1372         rtnl_lock();
1373         __netdev_notify_peers(dev);
1374         rtnl_unlock();
1375 }
1376 EXPORT_SYMBOL(netdev_notify_peers);
1377
1378 static int napi_threaded_poll(void *data);
1379
1380 static int napi_kthread_create(struct napi_struct *n)
1381 {
1382         int err = 0;
1383
1384         /* Create and wake up the kthread once to put it in
1385          * TASK_INTERRUPTIBLE mode to avoid the blocked task
1386          * warning and work with loadavg.
1387          */
1388         n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
1389                                 n->dev->name, n->napi_id);
1390         if (IS_ERR(n->thread)) {
1391                 err = PTR_ERR(n->thread);
1392                 pr_err("kthread_run failed with err %d\n", err);
1393                 n->thread = NULL;
1394         }
1395
1396         return err;
1397 }
1398
1399 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1400 {
1401         const struct net_device_ops *ops = dev->netdev_ops;
1402         int ret;
1403
1404         ASSERT_RTNL();
1405         dev_addr_check(dev);
1406
1407         if (!netif_device_present(dev)) {
1408                 /* may be detached because parent is runtime-suspended */
1409                 if (dev->dev.parent)
1410                         pm_runtime_resume(dev->dev.parent);
1411                 if (!netif_device_present(dev))
1412                         return -ENODEV;
1413         }
1414
1415         /* Block netpoll from trying to do any rx path servicing.
1416          * If we don't do this there is a chance ndo_poll_controller
1417          * or ndo_poll may be running while we open the device
1418          */
1419         netpoll_poll_disable(dev);
1420
1421         ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
1422         ret = notifier_to_errno(ret);
1423         if (ret)
1424                 return ret;
1425
1426         set_bit(__LINK_STATE_START, &dev->state);
1427
1428         if (ops->ndo_validate_addr)
1429                 ret = ops->ndo_validate_addr(dev);
1430
1431         if (!ret && ops->ndo_open)
1432                 ret = ops->ndo_open(dev);
1433
1434         netpoll_poll_enable(dev);
1435
1436         if (ret)
1437                 clear_bit(__LINK_STATE_START, &dev->state);
1438         else {
1439                 dev->flags |= IFF_UP;
1440                 dev_set_rx_mode(dev);
1441                 dev_activate(dev);
1442                 add_device_randomness(dev->dev_addr, dev->addr_len);
1443         }
1444
1445         return ret;
1446 }
1447
1448 /**
1449  *      dev_open        - prepare an interface for use.
1450  *      @dev: device to open
1451  *      @extack: netlink extended ack
1452  *
1453  *      Takes a device from down to up state. The device's private open
1454  *      function is invoked and then the multicast lists are loaded. Finally
1455  *      the device is moved into the up state and a %NETDEV_UP message is
1456  *      sent to the netdev notifier chain.
1457  *
1458  *      Calling this function on an active interface is a nop. On a failure
1459  *      a negative errno code is returned.
1460  */
1461 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1462 {
1463         int ret;
1464
1465         if (dev->flags & IFF_UP)
1466                 return 0;
1467
1468         ret = __dev_open(dev, extack);
1469         if (ret < 0)
1470                 return ret;
1471
1472         rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1473         call_netdevice_notifiers(NETDEV_UP, dev);
1474
1475         return ret;
1476 }
1477 EXPORT_SYMBOL(dev_open);
1478
1479 static void __dev_close_many(struct list_head *head)
1480 {
1481         struct net_device *dev;
1482
1483         ASSERT_RTNL();
1484         might_sleep();
1485
1486         list_for_each_entry(dev, head, close_list) {
1487                 /* Temporarily disable netpoll until the interface is down */
1488                 netpoll_poll_disable(dev);
1489
1490                 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1491
1492                 clear_bit(__LINK_STATE_START, &dev->state);
1493
1494                 /* Synchronize to scheduled poll. We cannot touch poll list, it
1495                  * can be even on different cpu. So just clear netif_running().
1496                  *
1497                  * dev->stop() will invoke napi_disable() on all of it's
1498                  * napi_struct instances on this device.
1499                  */
1500                 smp_mb__after_atomic(); /* Commit netif_running(). */
1501         }
1502
1503         dev_deactivate_many(head);
1504
1505         list_for_each_entry(dev, head, close_list) {
1506                 const struct net_device_ops *ops = dev->netdev_ops;
1507
1508                 /*
1509                  *      Call the device specific close. This cannot fail.
1510                  *      Only if device is UP
1511                  *
1512                  *      We allow it to be called even after a DETACH hot-plug
1513                  *      event.
1514                  */
1515                 if (ops->ndo_stop)
1516                         ops->ndo_stop(dev);
1517
1518                 dev->flags &= ~IFF_UP;
1519                 netpoll_poll_enable(dev);
1520         }
1521 }
1522
1523 static void __dev_close(struct net_device *dev)
1524 {
1525         LIST_HEAD(single);
1526
1527         list_add(&dev->close_list, &single);
1528         __dev_close_many(&single);
1529         list_del(&single);
1530 }
1531
1532 void dev_close_many(struct list_head *head, bool unlink)
1533 {
1534         struct net_device *dev, *tmp;
1535
1536         /* Remove the devices that don't need to be closed */
1537         list_for_each_entry_safe(dev, tmp, head, close_list)
1538                 if (!(dev->flags & IFF_UP))
1539                         list_del_init(&dev->close_list);
1540
1541         __dev_close_many(head);
1542
1543         list_for_each_entry_safe(dev, tmp, head, close_list) {
1544                 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1545                 call_netdevice_notifiers(NETDEV_DOWN, dev);
1546                 if (unlink)
1547                         list_del_init(&dev->close_list);
1548         }
1549 }
1550 EXPORT_SYMBOL(dev_close_many);
1551
1552 /**
1553  *      dev_close - shutdown an interface.
1554  *      @dev: device to shutdown
1555  *
1556  *      This function moves an active device into down state. A
1557  *      %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1558  *      is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1559  *      chain.
1560  */
1561 void dev_close(struct net_device *dev)
1562 {
1563         if (dev->flags & IFF_UP) {
1564                 LIST_HEAD(single);
1565
1566                 list_add(&dev->close_list, &single);
1567                 dev_close_many(&single, true);
1568                 list_del(&single);
1569         }
1570 }
1571 EXPORT_SYMBOL(dev_close);
1572
1573
1574 /**
1575  *      dev_disable_lro - disable Large Receive Offload on a device
1576  *      @dev: device
1577  *
1578  *      Disable Large Receive Offload (LRO) on a net device.  Must be
1579  *      called under RTNL.  This is needed if received packets may be
1580  *      forwarded to another interface.
1581  */
1582 void dev_disable_lro(struct net_device *dev)
1583 {
1584         struct net_device *lower_dev;
1585         struct list_head *iter;
1586
1587         dev->wanted_features &= ~NETIF_F_LRO;
1588         netdev_update_features(dev);
1589
1590         if (unlikely(dev->features & NETIF_F_LRO))
1591                 netdev_WARN(dev, "failed to disable LRO!\n");
1592
1593         netdev_for_each_lower_dev(dev, lower_dev, iter)
1594                 dev_disable_lro(lower_dev);
1595 }
1596 EXPORT_SYMBOL(dev_disable_lro);
1597
1598 /**
1599  *      dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1600  *      @dev: device
1601  *
1602  *      Disable HW Generic Receive Offload (GRO_HW) on a net device.  Must be
1603  *      called under RTNL.  This is needed if Generic XDP is installed on
1604  *      the device.
1605  */
1606 static void dev_disable_gro_hw(struct net_device *dev)
1607 {
1608         dev->wanted_features &= ~NETIF_F_GRO_HW;
1609         netdev_update_features(dev);
1610
1611         if (unlikely(dev->features & NETIF_F_GRO_HW))
1612                 netdev_WARN(dev, "failed to disable GRO_HW!\n");
1613 }
1614
1615 const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1616 {
1617 #define N(val)                                          \
1618         case NETDEV_##val:                              \
1619                 return "NETDEV_" __stringify(val);
1620         switch (cmd) {
1621         N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1622         N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1623         N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1624         N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
1625         N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
1626         N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
1627         N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1628         N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1629         N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1630         N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE)
1631         N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA)
1632         }
1633 #undef N
1634         return "UNKNOWN_NETDEV_EVENT";
1635 }
1636 EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1637
1638 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1639                                    struct net_device *dev)
1640 {
1641         struct netdev_notifier_info info = {
1642                 .dev = dev,
1643         };
1644
1645         return nb->notifier_call(nb, val, &info);
1646 }
1647
1648 static int call_netdevice_register_notifiers(struct notifier_block *nb,
1649                                              struct net_device *dev)
1650 {
1651         int err;
1652
1653         err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1654         err = notifier_to_errno(err);
1655         if (err)
1656                 return err;
1657
1658         if (!(dev->flags & IFF_UP))
1659                 return 0;
1660
1661         call_netdevice_notifier(nb, NETDEV_UP, dev);
1662         return 0;
1663 }
1664
1665 static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
1666                                                 struct net_device *dev)
1667 {
1668         if (dev->flags & IFF_UP) {
1669                 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1670                                         dev);
1671                 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1672         }
1673         call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1674 }
1675
1676 static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
1677                                                  struct net *net)
1678 {
1679         struct net_device *dev;
1680         int err;
1681
1682         for_each_netdev(net, dev) {
1683                 err = call_netdevice_register_notifiers(nb, dev);
1684                 if (err)
1685                         goto rollback;
1686         }
1687         return 0;
1688
1689 rollback:
1690         for_each_netdev_continue_reverse(net, dev)
1691                 call_netdevice_unregister_notifiers(nb, dev);
1692         return err;
1693 }
1694
1695 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
1696                                                     struct net *net)
1697 {
1698         struct net_device *dev;
1699
1700         for_each_netdev(net, dev)
1701                 call_netdevice_unregister_notifiers(nb, dev);
1702 }
1703
1704 static int dev_boot_phase = 1;
1705
1706 /**
1707  * register_netdevice_notifier - register a network notifier block
1708  * @nb: notifier
1709  *
1710  * Register a notifier to be called when network device events occur.
1711  * The notifier passed is linked into the kernel structures and must
1712  * not be reused until it has been unregistered. A negative errno code
1713  * is returned on a failure.
1714  *
1715  * When registered all registration and up events are replayed
1716  * to the new notifier to allow device to have a race free
1717  * view of the network device list.
1718  */
1719
1720 int register_netdevice_notifier(struct notifier_block *nb)
1721 {
1722         struct net *net;
1723         int err;
1724
1725         /* Close race with setup_net() and cleanup_net() */
1726         down_write(&pernet_ops_rwsem);
1727         rtnl_lock();
1728         err = raw_notifier_chain_register(&netdev_chain, nb);
1729         if (err)
1730                 goto unlock;
1731         if (dev_boot_phase)
1732                 goto unlock;
1733         for_each_net(net) {
1734                 err = call_netdevice_register_net_notifiers(nb, net);
1735                 if (err)
1736                         goto rollback;
1737         }
1738
1739 unlock:
1740         rtnl_unlock();
1741         up_write(&pernet_ops_rwsem);
1742         return err;
1743
1744 rollback:
1745         for_each_net_continue_reverse(net)
1746                 call_netdevice_unregister_net_notifiers(nb, net);
1747
1748         raw_notifier_chain_unregister(&netdev_chain, nb);
1749         goto unlock;
1750 }
1751 EXPORT_SYMBOL(register_netdevice_notifier);
1752
1753 /**
1754  * unregister_netdevice_notifier - unregister a network notifier block
1755  * @nb: notifier
1756  *
1757  * Unregister a notifier previously registered by
1758  * register_netdevice_notifier(). The notifier is unlinked into the
1759  * kernel structures and may then be reused. A negative errno code
1760  * is returned on a failure.
1761  *
1762  * After unregistering unregister and down device events are synthesized
1763  * for all devices on the device list to the removed notifier to remove
1764  * the need for special case cleanup code.
1765  */
1766
1767 int unregister_netdevice_notifier(struct notifier_block *nb)
1768 {
1769         struct net *net;
1770         int err;
1771
1772         /* Close race with setup_net() and cleanup_net() */
1773         down_write(&pernet_ops_rwsem);
1774         rtnl_lock();
1775         err = raw_notifier_chain_unregister(&netdev_chain, nb);
1776         if (err)
1777                 goto unlock;
1778
1779         for_each_net(net)
1780                 call_netdevice_unregister_net_notifiers(nb, net);
1781
1782 unlock:
1783         rtnl_unlock();
1784         up_write(&pernet_ops_rwsem);
1785         return err;
1786 }
1787 EXPORT_SYMBOL(unregister_netdevice_notifier);
1788
1789 static int __register_netdevice_notifier_net(struct net *net,
1790                                              struct notifier_block *nb,
1791                                              bool ignore_call_fail)
1792 {
1793         int err;
1794
1795         err = raw_notifier_chain_register(&net->netdev_chain, nb);
1796         if (err)
1797                 return err;
1798         if (dev_boot_phase)
1799                 return 0;
1800
1801         err = call_netdevice_register_net_notifiers(nb, net);
1802         if (err && !ignore_call_fail)
1803                 goto chain_unregister;
1804
1805         return 0;
1806
1807 chain_unregister:
1808         raw_notifier_chain_unregister(&net->netdev_chain, nb);
1809         return err;
1810 }
1811
1812 static int __unregister_netdevice_notifier_net(struct net *net,
1813                                                struct notifier_block *nb)
1814 {
1815         int err;
1816
1817         err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
1818         if (err)
1819                 return err;
1820
1821         call_netdevice_unregister_net_notifiers(nb, net);
1822         return 0;
1823 }
1824
1825 /**
1826  * register_netdevice_notifier_net - register a per-netns network notifier block
1827  * @net: network namespace
1828  * @nb: notifier
1829  *
1830  * Register a notifier to be called when network device events occur.
1831  * The notifier passed is linked into the kernel structures and must
1832  * not be reused until it has been unregistered. A negative errno code
1833  * is returned on a failure.
1834  *
1835  * When registered all registration and up events are replayed
1836  * to the new notifier to allow device to have a race free
1837  * view of the network device list.
1838  */
1839
1840 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
1841 {
1842         int err;
1843
1844         rtnl_lock();
1845         err = __register_netdevice_notifier_net(net, nb, false);
1846         rtnl_unlock();
1847         return err;
1848 }
1849 EXPORT_SYMBOL(register_netdevice_notifier_net);
1850
1851 /**
1852  * unregister_netdevice_notifier_net - unregister a per-netns
1853  *                                     network notifier block
1854  * @net: network namespace
1855  * @nb: notifier
1856  *
1857  * Unregister a notifier previously registered by
1858  * register_netdevice_notifier(). The notifier is unlinked into the
1859  * kernel structures and may then be reused. A negative errno code
1860  * is returned on a failure.
1861  *
1862  * After unregistering unregister and down device events are synthesized
1863  * for all devices on the device list to the removed notifier to remove
1864  * the need for special case cleanup code.
1865  */
1866
1867 int unregister_netdevice_notifier_net(struct net *net,
1868                                       struct notifier_block *nb)
1869 {
1870         int err;
1871
1872         rtnl_lock();
1873         err = __unregister_netdevice_notifier_net(net, nb);
1874         rtnl_unlock();
1875         return err;
1876 }
1877 EXPORT_SYMBOL(unregister_netdevice_notifier_net);
1878
1879 int register_netdevice_notifier_dev_net(struct net_device *dev,
1880                                         struct notifier_block *nb,
1881                                         struct netdev_net_notifier *nn)
1882 {
1883         int err;
1884
1885         rtnl_lock();
1886         err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
1887         if (!err) {
1888                 nn->nb = nb;
1889                 list_add(&nn->list, &dev->net_notifier_list);
1890         }
1891         rtnl_unlock();
1892         return err;
1893 }
1894 EXPORT_SYMBOL(register_netdevice_notifier_dev_net);
1895
1896 int unregister_netdevice_notifier_dev_net(struct net_device *dev,
1897                                           struct notifier_block *nb,
1898                                           struct netdev_net_notifier *nn)
1899 {
1900         int err;
1901
1902         rtnl_lock();
1903         list_del(&nn->list);
1904         err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
1905         rtnl_unlock();
1906         return err;
1907 }
1908 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net);
1909
1910 static void move_netdevice_notifiers_dev_net(struct net_device *dev,
1911                                              struct net *net)
1912 {
1913         struct netdev_net_notifier *nn;
1914
1915         list_for_each_entry(nn, &dev->net_notifier_list, list) {
1916                 __unregister_netdevice_notifier_net(dev_net(dev), nn->nb);
1917                 __register_netdevice_notifier_net(net, nn->nb, true);
1918         }
1919 }
1920
1921 /**
1922  *      call_netdevice_notifiers_info - call all network notifier blocks
1923  *      @val: value passed unmodified to notifier function
1924  *      @info: notifier information data
1925  *
1926  *      Call all network notifier blocks.  Parameters and return value
1927  *      are as for raw_notifier_call_chain().
1928  */
1929
1930 static int call_netdevice_notifiers_info(unsigned long val,
1931                                          struct netdev_notifier_info *info)
1932 {
1933         struct net *net = dev_net(info->dev);
1934         int ret;
1935
1936         ASSERT_RTNL();
1937
1938         /* Run per-netns notifier block chain first, then run the global one.
1939          * Hopefully, one day, the global one is going to be removed after
1940          * all notifier block registrators get converted to be per-netns.
1941          */
1942         ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
1943         if (ret & NOTIFY_STOP_MASK)
1944                 return ret;
1945         return raw_notifier_call_chain(&netdev_chain, val, info);
1946 }
1947
1948 /**
1949  *      call_netdevice_notifiers_info_robust - call per-netns notifier blocks
1950  *                                             for and rollback on error
1951  *      @val_up: value passed unmodified to notifier function
1952  *      @val_down: value passed unmodified to the notifier function when
1953  *                 recovering from an error on @val_up
1954  *      @info: notifier information data
1955  *
1956  *      Call all per-netns network notifier blocks, but not notifier blocks on
1957  *      the global notifier chain. Parameters and return value are as for
1958  *      raw_notifier_call_chain_robust().
1959  */
1960
1961 static int
1962 call_netdevice_notifiers_info_robust(unsigned long val_up,
1963                                      unsigned long val_down,
1964                                      struct netdev_notifier_info *info)
1965 {
1966         struct net *net = dev_net(info->dev);
1967
1968         ASSERT_RTNL();
1969
1970         return raw_notifier_call_chain_robust(&net->netdev_chain,
1971                                               val_up, val_down, info);
1972 }
1973
1974 static int call_netdevice_notifiers_extack(unsigned long val,
1975                                            struct net_device *dev,
1976                                            struct netlink_ext_ack *extack)
1977 {
1978         struct netdev_notifier_info info = {
1979                 .dev = dev,
1980                 .extack = extack,
1981         };
1982
1983         return call_netdevice_notifiers_info(val, &info);
1984 }
1985
1986 /**
1987  *      call_netdevice_notifiers - call all network notifier blocks
1988  *      @val: value passed unmodified to notifier function
1989  *      @dev: net_device pointer passed unmodified to notifier function
1990  *
1991  *      Call all network notifier blocks.  Parameters and return value
1992  *      are as for raw_notifier_call_chain().
1993  */
1994
1995 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1996 {
1997         return call_netdevice_notifiers_extack(val, dev, NULL);
1998 }
1999 EXPORT_SYMBOL(call_netdevice_notifiers);
2000
2001 /**
2002  *      call_netdevice_notifiers_mtu - call all network notifier blocks
2003  *      @val: value passed unmodified to notifier function
2004  *      @dev: net_device pointer passed unmodified to notifier function
2005  *      @arg: additional u32 argument passed to the notifier function
2006  *
2007  *      Call all network notifier blocks.  Parameters and return value
2008  *      are as for raw_notifier_call_chain().
2009  */
2010 static int call_netdevice_notifiers_mtu(unsigned long val,
2011                                         struct net_device *dev, u32 arg)
2012 {
2013         struct netdev_notifier_info_ext info = {
2014                 .info.dev = dev,
2015                 .ext.mtu = arg,
2016         };
2017
2018         BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
2019
2020         return call_netdevice_notifiers_info(val, &info.info);
2021 }
2022
2023 #ifdef CONFIG_NET_INGRESS
2024 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
2025
2026 void net_inc_ingress_queue(void)
2027 {
2028         static_branch_inc(&ingress_needed_key);
2029 }
2030 EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
2031
2032 void net_dec_ingress_queue(void)
2033 {
2034         static_branch_dec(&ingress_needed_key);
2035 }
2036 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
2037 #endif
2038
2039 #ifdef CONFIG_NET_EGRESS
2040 static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
2041
2042 void net_inc_egress_queue(void)
2043 {
2044         static_branch_inc(&egress_needed_key);
2045 }
2046 EXPORT_SYMBOL_GPL(net_inc_egress_queue);
2047
2048 void net_dec_egress_queue(void)
2049 {
2050         static_branch_dec(&egress_needed_key);
2051 }
2052 EXPORT_SYMBOL_GPL(net_dec_egress_queue);
2053 #endif
2054
2055 DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
2056 EXPORT_SYMBOL(netstamp_needed_key);
2057 #ifdef CONFIG_JUMP_LABEL
2058 static atomic_t netstamp_needed_deferred;
2059 static atomic_t netstamp_wanted;
2060 static void netstamp_clear(struct work_struct *work)
2061 {
2062         int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
2063         int wanted;
2064
2065         wanted = atomic_add_return(deferred, &netstamp_wanted);
2066         if (wanted > 0)
2067                 static_branch_enable(&netstamp_needed_key);
2068         else
2069                 static_branch_disable(&netstamp_needed_key);
2070 }
2071 static DECLARE_WORK(netstamp_work, netstamp_clear);
2072 #endif
2073
2074 void net_enable_timestamp(void)
2075 {
2076 #ifdef CONFIG_JUMP_LABEL
2077         int wanted;
2078
2079         while (1) {
2080                 wanted = atomic_read(&netstamp_wanted);
2081                 if (wanted <= 0)
2082                         break;
2083                 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
2084                         return;
2085         }
2086         atomic_inc(&netstamp_needed_deferred);
2087         schedule_work(&netstamp_work);
2088 #else
2089         static_branch_inc(&netstamp_needed_key);
2090 #endif
2091 }
2092 EXPORT_SYMBOL(net_enable_timestamp);
2093
2094 void net_disable_timestamp(void)
2095 {
2096 #ifdef CONFIG_JUMP_LABEL
2097         int wanted;
2098
2099         while (1) {
2100                 wanted = atomic_read(&netstamp_wanted);
2101                 if (wanted <= 1)
2102                         break;
2103                 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
2104                         return;
2105         }
2106         atomic_dec(&netstamp_needed_deferred);
2107         schedule_work(&netstamp_work);
2108 #else
2109         static_branch_dec(&netstamp_needed_key);
2110 #endif
2111 }
2112 EXPORT_SYMBOL(net_disable_timestamp);
2113
2114 static inline void net_timestamp_set(struct sk_buff *skb)
2115 {
2116         skb->tstamp = 0;
2117         skb->mono_delivery_time = 0;
2118         if (static_branch_unlikely(&netstamp_needed_key))
2119                 skb->tstamp = ktime_get_real();
2120 }
2121
2122 #define net_timestamp_check(COND, SKB)                          \
2123         if (static_branch_unlikely(&netstamp_needed_key)) {     \
2124                 if ((COND) && !(SKB)->tstamp)                   \
2125                         (SKB)->tstamp = ktime_get_real();       \
2126         }                                                       \
2127
2128 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2129 {
2130         return __is_skb_forwardable(dev, skb, true);
2131 }
2132 EXPORT_SYMBOL_GPL(is_skb_forwardable);
2133
2134 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb,
2135                               bool check_mtu)
2136 {
2137         int ret = ____dev_forward_skb(dev, skb, check_mtu);
2138
2139         if (likely(!ret)) {
2140                 skb->protocol = eth_type_trans(skb, dev);
2141                 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
2142         }
2143
2144         return ret;
2145 }
2146
2147 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2148 {
2149         return __dev_forward_skb2(dev, skb, true);
2150 }
2151 EXPORT_SYMBOL_GPL(__dev_forward_skb);
2152
2153 /**
2154  * dev_forward_skb - loopback an skb to another netif
2155  *
2156  * @dev: destination network device
2157  * @skb: buffer to forward
2158  *
2159  * return values:
2160  *      NET_RX_SUCCESS  (no congestion)
2161  *      NET_RX_DROP     (packet was dropped, but freed)
2162  *
2163  * dev_forward_skb can be used for injecting an skb from the
2164  * start_xmit function of one device into the receive queue
2165  * of another device.
2166  *
2167  * The receiving device may be in another namespace, so
2168  * we have to clear all information in the skb that could
2169  * impact namespace isolation.
2170  */
2171 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2172 {
2173         return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
2174 }
2175 EXPORT_SYMBOL_GPL(dev_forward_skb);
2176
2177 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
2178 {
2179         return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb);
2180 }
2181
2182 static inline int deliver_skb(struct sk_buff *skb,
2183                               struct packet_type *pt_prev,
2184                               struct net_device *orig_dev)
2185 {
2186         if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
2187                 return -ENOMEM;
2188         refcount_inc(&skb->users);
2189         return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2190 }
2191
2192 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
2193                                           struct packet_type **pt,
2194                                           struct net_device *orig_dev,
2195                                           __be16 type,
2196                                           struct list_head *ptype_list)
2197 {
2198         struct packet_type *ptype, *pt_prev = *pt;
2199
2200         list_for_each_entry_rcu(ptype, ptype_list, list) {
2201                 if (ptype->type != type)
2202                         continue;
2203                 if (pt_prev)
2204                         deliver_skb(skb, pt_prev, orig_dev);
2205                 pt_prev = ptype;
2206         }
2207         *pt = pt_prev;
2208 }
2209
2210 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
2211 {
2212         if (!ptype->af_packet_priv || !skb->sk)
2213                 return false;
2214
2215         if (ptype->id_match)
2216                 return ptype->id_match(ptype, skb->sk);
2217         else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2218                 return true;
2219
2220         return false;
2221 }
2222
2223 /**
2224  * dev_nit_active - return true if any network interface taps are in use
2225  *
2226  * @dev: network device to check for the presence of taps
2227  */
2228 bool dev_nit_active(struct net_device *dev)
2229 {
2230         return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
2231 }
2232 EXPORT_SYMBOL_GPL(dev_nit_active);
2233
2234 /*
2235  *      Support routine. Sends outgoing frames to any network
2236  *      taps currently in use.
2237  */
2238
2239 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2240 {
2241         struct packet_type *ptype;
2242         struct sk_buff *skb2 = NULL;
2243         struct packet_type *pt_prev = NULL;
2244         struct list_head *ptype_list = &ptype_all;
2245
2246         rcu_read_lock();
2247 again:
2248         list_for_each_entry_rcu(ptype, ptype_list, list) {
2249                 if (ptype->ignore_outgoing)
2250                         continue;
2251
2252                 /* Never send packets back to the socket
2253                  * they originated from - MvS ([email protected])
2254                  */
2255                 if (skb_loop_sk(ptype, skb))
2256                         continue;
2257
2258                 if (pt_prev) {
2259                         deliver_skb(skb2, pt_prev, skb->dev);
2260                         pt_prev = ptype;
2261                         continue;
2262                 }
2263
2264                 /* need to clone skb, done only once */
2265                 skb2 = skb_clone(skb, GFP_ATOMIC);
2266                 if (!skb2)
2267                         goto out_unlock;
2268
2269                 net_timestamp_set(skb2);
2270
2271                 /* skb->nh should be correctly
2272                  * set by sender, so that the second statement is
2273                  * just protection against buggy protocols.
2274                  */
2275                 skb_reset_mac_header(skb2);
2276
2277                 if (skb_network_header(skb2) < skb2->data ||
2278                     skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2279                         net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2280                                              ntohs(skb2->protocol),
2281                                              dev->name);
2282                         skb_reset_network_header(skb2);
2283                 }
2284
2285                 skb2->transport_header = skb2->network_header;
2286                 skb2->pkt_type = PACKET_OUTGOING;
2287                 pt_prev = ptype;
2288         }
2289
2290         if (ptype_list == &ptype_all) {
2291                 ptype_list = &dev->ptype_all;
2292                 goto again;
2293         }
2294 out_unlock:
2295         if (pt_prev) {
2296                 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2297                         pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2298                 else
2299                         kfree_skb(skb2);
2300         }
2301         rcu_read_unlock();
2302 }
2303 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2304
2305 /**
2306  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2307  * @dev: Network device
2308  * @txq: number of queues available
2309  *
2310  * If real_num_tx_queues is changed the tc mappings may no longer be
2311  * valid. To resolve this verify the tc mapping remains valid and if
2312  * not NULL the mapping. With no priorities mapping to this
2313  * offset/count pair it will no longer be used. In the worst case TC0
2314  * is invalid nothing can be done so disable priority mappings. If is
2315  * expected that drivers will fix this mapping if they can before
2316  * calling netif_set_real_num_tx_queues.
2317  */
2318 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2319 {
2320         int i;
2321         struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2322
2323         /* If TC0 is invalidated disable TC mapping */
2324         if (tc->offset + tc->count > txq) {
2325                 netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2326                 dev->num_tc = 0;
2327                 return;
2328         }
2329
2330         /* Invalidated prio to tc mappings set to TC0 */
2331         for (i = 1; i < TC_BITMASK + 1; i++) {
2332                 int q = netdev_get_prio_tc_map(dev, i);
2333
2334                 tc = &dev->tc_to_txq[q];
2335                 if (tc->offset + tc->count > txq) {
2336                         netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2337                                     i, q);
2338                         netdev_set_prio_tc_map(dev, i, 0);
2339                 }
2340         }
2341 }
2342
2343 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2344 {
2345         if (dev->num_tc) {
2346                 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2347                 int i;
2348
2349                 /* walk through the TCs and see if it falls into any of them */
2350                 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2351                         if ((txq - tc->offset) < tc->count)
2352                                 return i;
2353                 }
2354
2355                 /* didn't find it, just return -1 to indicate no match */
2356                 return -1;
2357         }
2358
2359         return 0;
2360 }
2361 EXPORT_SYMBOL(netdev_txq_to_tc);
2362
2363 #ifdef CONFIG_XPS
2364 static struct static_key xps_needed __read_mostly;
2365 static struct static_key xps_rxqs_needed __read_mostly;
2366 static DEFINE_MUTEX(xps_map_mutex);
2367 #define xmap_dereference(P)             \
2368         rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2369
2370 static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2371                              struct xps_dev_maps *old_maps, int tci, u16 index)
2372 {
2373         struct xps_map *map = NULL;
2374         int pos;
2375
2376         if (dev_maps)
2377                 map = xmap_dereference(dev_maps->attr_map[tci]);
2378         if (!map)
2379                 return false;
2380
2381         for (pos = map->len; pos--;) {
2382                 if (map->queues[pos] != index)
2383                         continue;
2384
2385                 if (map->len > 1) {
2386                         map->queues[pos] = map->queues[--map->len];
2387                         break;
2388                 }
2389
2390                 if (old_maps)
2391                         RCU_INIT_POINTER(old_maps->attr_map[tci], NULL);
2392                 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2393                 kfree_rcu(map, rcu);
2394                 return false;
2395         }
2396
2397         return true;
2398 }
2399
2400 static bool remove_xps_queue_cpu(struct net_device *dev,
2401                                  struct xps_dev_maps *dev_maps,
2402                                  int cpu, u16 offset, u16 count)
2403 {
2404         int num_tc = dev_maps->num_tc;
2405         bool active = false;
2406         int tci;
2407
2408         for (tci = cpu * num_tc; num_tc--; tci++) {
2409                 int i, j;
2410
2411                 for (i = count, j = offset; i--; j++) {
2412                         if (!remove_xps_queue(dev_maps, NULL, tci, j))
2413                                 break;
2414                 }
2415
2416                 active |= i < 0;
2417         }
2418
2419         return active;
2420 }
2421
2422 static void reset_xps_maps(struct net_device *dev,
2423                            struct xps_dev_maps *dev_maps,
2424                            enum xps_map_type type)
2425 {
2426         static_key_slow_dec_cpuslocked(&xps_needed);
2427         if (type == XPS_RXQS)
2428                 static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2429
2430         RCU_INIT_POINTER(dev->xps_maps[type], NULL);
2431
2432         kfree_rcu(dev_maps, rcu);
2433 }
2434
2435 static void clean_xps_maps(struct net_device *dev, enum xps_map_type type,
2436                            u16 offset, u16 count)
2437 {
2438         struct xps_dev_maps *dev_maps;
2439         bool active = false;
2440         int i, j;
2441
2442         dev_maps = xmap_dereference(dev->xps_maps[type]);
2443         if (!dev_maps)
2444                 return;
2445
2446         for (j = 0; j < dev_maps->nr_ids; j++)
2447                 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count);
2448         if (!active)
2449                 reset_xps_maps(dev, dev_maps, type);
2450
2451         if (type == XPS_CPUS) {
2452                 for (i = offset + (count - 1); count--; i--)
2453                         netdev_queue_numa_node_write(
2454                                 netdev_get_tx_queue(dev, i), NUMA_NO_NODE);
2455         }
2456 }
2457
2458 static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2459                                    u16 count)
2460 {
2461         if (!static_key_false(&xps_needed))
2462                 return;
2463
2464         cpus_read_lock();
2465         mutex_lock(&xps_map_mutex);
2466
2467         if (static_key_false(&xps_rxqs_needed))
2468                 clean_xps_maps(dev, XPS_RXQS, offset, count);
2469
2470         clean_xps_maps(dev, XPS_CPUS, offset, count);
2471
2472         mutex_unlock(&xps_map_mutex);
2473         cpus_read_unlock();
2474 }
2475
2476 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2477 {
2478         netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2479 }
2480
2481 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2482                                       u16 index, bool is_rxqs_map)
2483 {
2484         struct xps_map *new_map;
2485         int alloc_len = XPS_MIN_MAP_ALLOC;
2486         int i, pos;
2487
2488         for (pos = 0; map && pos < map->len; pos++) {
2489                 if (map->queues[pos] != index)
2490                         continue;
2491                 return map;
2492         }
2493
2494         /* Need to add tx-queue to this CPU's/rx-queue's existing map */
2495         if (map) {
2496                 if (pos < map->alloc_len)
2497                         return map;
2498
2499                 alloc_len = map->alloc_len * 2;
2500         }
2501
2502         /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2503          *  map
2504          */
2505         if (is_rxqs_map)
2506                 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2507         else
2508                 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2509                                        cpu_to_node(attr_index));
2510         if (!new_map)
2511                 return NULL;
2512
2513         for (i = 0; i < pos; i++)
2514                 new_map->queues[i] = map->queues[i];
2515         new_map->alloc_len = alloc_len;
2516         new_map->len = pos;
2517
2518         return new_map;
2519 }
2520
2521 /* Copy xps maps at a given index */
2522 static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps,
2523                               struct xps_dev_maps *new_dev_maps, int index,
2524                               int tc, bool skip_tc)
2525 {
2526         int i, tci = index * dev_maps->num_tc;
2527         struct xps_map *map;
2528
2529         /* copy maps belonging to foreign traffic classes */
2530         for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2531                 if (i == tc && skip_tc)
2532                         continue;
2533
2534                 /* fill in the new device map from the old device map */
2535                 map = xmap_dereference(dev_maps->attr_map[tci]);
2536                 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2537         }
2538 }
2539
2540 /* Must be called under cpus_read_lock */
2541 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2542                           u16 index, enum xps_map_type type)
2543 {
2544         struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL;
2545         const unsigned long *online_mask = NULL;
2546         bool active = false, copy = false;
2547         int i, j, tci, numa_node_id = -2;
2548         int maps_sz, num_tc = 1, tc = 0;
2549         struct xps_map *map, *new_map;
2550         unsigned int nr_ids;
2551
2552         if (dev->num_tc) {
2553                 /* Do not allow XPS on subordinate device directly */
2554                 num_tc = dev->num_tc;
2555                 if (num_tc < 0)
2556                         return -EINVAL;
2557
2558                 /* If queue belongs to subordinate dev use its map */
2559                 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2560
2561                 tc = netdev_txq_to_tc(dev, index);
2562                 if (tc < 0)
2563                         return -EINVAL;
2564         }
2565
2566         mutex_lock(&xps_map_mutex);
2567
2568         dev_maps = xmap_dereference(dev->xps_maps[type]);
2569         if (type == XPS_RXQS) {
2570                 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2571                 nr_ids = dev->num_rx_queues;
2572         } else {
2573                 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2574                 if (num_possible_cpus() > 1)
2575                         online_mask = cpumask_bits(cpu_online_mask);
2576                 nr_ids = nr_cpu_ids;
2577         }
2578
2579         if (maps_sz < L1_CACHE_BYTES)
2580                 maps_sz = L1_CACHE_BYTES;
2581
2582         /* The old dev_maps could be larger or smaller than the one we're
2583          * setting up now, as dev->num_tc or nr_ids could have been updated in
2584          * between. We could try to be smart, but let's be safe instead and only
2585          * copy foreign traffic classes if the two map sizes match.
2586          */
2587         if (dev_maps &&
2588             dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids)
2589                 copy = true;
2590
2591         /* allocate memory for queue storage */
2592         for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2593              j < nr_ids;) {
2594                 if (!new_dev_maps) {
2595                         new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2596                         if (!new_dev_maps) {
2597                                 mutex_unlock(&xps_map_mutex);
2598                                 return -ENOMEM;
2599                         }
2600
2601                         new_dev_maps->nr_ids = nr_ids;
2602                         new_dev_maps->num_tc = num_tc;
2603                 }
2604
2605                 tci = j * num_tc + tc;
2606                 map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL;
2607
2608                 map = expand_xps_map(map, j, index, type == XPS_RXQS);
2609                 if (!map)
2610                         goto error;
2611
2612                 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2613         }
2614
2615         if (!new_dev_maps)
2616                 goto out_no_new_maps;
2617
2618         if (!dev_maps) {
2619                 /* Increment static keys at most once per type */
2620                 static_key_slow_inc_cpuslocked(&xps_needed);
2621                 if (type == XPS_RXQS)
2622                         static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2623         }
2624
2625         for (j = 0; j < nr_ids; j++) {
2626                 bool skip_tc = false;
2627
2628                 tci = j * num_tc + tc;
2629                 if (netif_attr_test_mask(j, mask, nr_ids) &&
2630                     netif_attr_test_online(j, online_mask, nr_ids)) {
2631                         /* add tx-queue to CPU/rx-queue maps */
2632                         int pos = 0;
2633
2634                         skip_tc = true;
2635
2636                         map = xmap_dereference(new_dev_maps->attr_map[tci]);
2637                         while ((pos < map->len) && (map->queues[pos] != index))
2638                                 pos++;
2639
2640                         if (pos == map->len)
2641                                 map->queues[map->len++] = index;
2642 #ifdef CONFIG_NUMA
2643                         if (type == XPS_CPUS) {
2644                                 if (numa_node_id == -2)
2645                                         numa_node_id = cpu_to_node(j);
2646                                 else if (numa_node_id != cpu_to_node(j))
2647                                         numa_node_id = -1;
2648                         }
2649 #endif
2650                 }
2651
2652                 if (copy)
2653                         xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc,
2654                                           skip_tc);
2655         }
2656
2657         rcu_assign_pointer(dev->xps_maps[type], new_dev_maps);
2658
2659         /* Cleanup old maps */
2660         if (!dev_maps)
2661                 goto out_no_old_maps;
2662
2663         for (j = 0; j < dev_maps->nr_ids; j++) {
2664                 for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) {
2665                         map = xmap_dereference(dev_maps->attr_map[tci]);
2666                         if (!map)
2667                                 continue;
2668
2669                         if (copy) {
2670                                 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2671                                 if (map == new_map)
2672                                         continue;
2673                         }
2674
2675                         RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2676                         kfree_rcu(map, rcu);
2677                 }
2678         }
2679
2680         old_dev_maps = dev_maps;
2681
2682 out_no_old_maps:
2683         dev_maps = new_dev_maps;
2684         active = true;
2685
2686 out_no_new_maps:
2687         if (type == XPS_CPUS)
2688                 /* update Tx queue numa node */
2689                 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2690                                              (numa_node_id >= 0) ?
2691                                              numa_node_id : NUMA_NO_NODE);
2692
2693         if (!dev_maps)
2694                 goto out_no_maps;
2695
2696         /* removes tx-queue from unused CPUs/rx-queues */
2697         for (j = 0; j < dev_maps->nr_ids; j++) {
2698                 tci = j * dev_maps->num_tc;
2699
2700                 for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2701                         if (i == tc &&
2702                             netif_attr_test_mask(j, mask, dev_maps->nr_ids) &&
2703                             netif_attr_test_online(j, online_mask, dev_maps->nr_ids))
2704                                 continue;
2705
2706                         active |= remove_xps_queue(dev_maps,
2707                                                    copy ? old_dev_maps : NULL,
2708                                                    tci, index);
2709                 }
2710         }
2711
2712         if (old_dev_maps)
2713                 kfree_rcu(old_dev_maps, rcu);
2714
2715         /* free map if not active */
2716         if (!active)
2717                 reset_xps_maps(dev, dev_maps, type);
2718
2719 out_no_maps:
2720         mutex_unlock(&xps_map_mutex);
2721
2722         return 0;
2723 error:
2724         /* remove any maps that we added */
2725         for (j = 0; j < nr_ids; j++) {
2726                 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2727                         new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2728                         map = copy ?
2729                               xmap_dereference(dev_maps->attr_map[tci]) :
2730                               NULL;
2731                         if (new_map && new_map != map)
2732                                 kfree(new_map);
2733                 }
2734         }
2735
2736         mutex_unlock(&xps_map_mutex);
2737
2738         kfree(new_dev_maps);
2739         return -ENOMEM;
2740 }
2741 EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
2742
2743 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2744                         u16 index)
2745 {
2746         int ret;
2747
2748         cpus_read_lock();
2749         ret =  __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS);
2750         cpus_read_unlock();
2751
2752         return ret;
2753 }
2754 EXPORT_SYMBOL(netif_set_xps_queue);
2755
2756 #endif
2757 static void netdev_unbind_all_sb_channels(struct net_device *dev)
2758 {
2759         struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2760
2761         /* Unbind any subordinate channels */
2762         while (txq-- != &dev->_tx[0]) {
2763                 if (txq->sb_dev)
2764                         netdev_unbind_sb_channel(dev, txq->sb_dev);
2765         }
2766 }
2767
2768 void netdev_reset_tc(struct net_device *dev)
2769 {
2770 #ifdef CONFIG_XPS
2771         netif_reset_xps_queues_gt(dev, 0);
2772 #endif
2773         netdev_unbind_all_sb_channels(dev);
2774
2775         /* Reset TC configuration of device */
2776         dev->num_tc = 0;
2777         memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2778         memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2779 }
2780 EXPORT_SYMBOL(netdev_reset_tc);
2781
2782 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2783 {
2784         if (tc >= dev->num_tc)
2785                 return -EINVAL;
2786
2787 #ifdef CONFIG_XPS
2788         netif_reset_xps_queues(dev, offset, count);
2789 #endif
2790         dev->tc_to_txq[tc].count = count;
2791         dev->tc_to_txq[tc].offset = offset;
2792         return 0;
2793 }
2794 EXPORT_SYMBOL(netdev_set_tc_queue);
2795
2796 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2797 {
2798         if (num_tc > TC_MAX_QUEUE)
2799                 return -EINVAL;
2800
2801 #ifdef CONFIG_XPS
2802         netif_reset_xps_queues_gt(dev, 0);
2803 #endif
2804         netdev_unbind_all_sb_channels(dev);
2805
2806         dev->num_tc = num_tc;
2807         return 0;
2808 }
2809 EXPORT_SYMBOL(netdev_set_num_tc);
2810
2811 void netdev_unbind_sb_channel(struct net_device *dev,
2812                               struct net_device *sb_dev)
2813 {
2814         struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2815
2816 #ifdef CONFIG_XPS
2817         netif_reset_xps_queues_gt(sb_dev, 0);
2818 #endif
2819         memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2820         memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2821
2822         while (txq-- != &dev->_tx[0]) {
2823                 if (txq->sb_dev == sb_dev)
2824                         txq->sb_dev = NULL;
2825         }
2826 }
2827 EXPORT_SYMBOL(netdev_unbind_sb_channel);
2828
2829 int netdev_bind_sb_channel_queue(struct net_device *dev,
2830                                  struct net_device *sb_dev,
2831                                  u8 tc, u16 count, u16 offset)
2832 {
2833         /* Make certain the sb_dev and dev are already configured */
2834         if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2835                 return -EINVAL;
2836
2837         /* We cannot hand out queues we don't have */
2838         if ((offset + count) > dev->real_num_tx_queues)
2839                 return -EINVAL;
2840
2841         /* Record the mapping */
2842         sb_dev->tc_to_txq[tc].count = count;
2843         sb_dev->tc_to_txq[tc].offset = offset;
2844
2845         /* Provide a way for Tx queue to find the tc_to_txq map or
2846          * XPS map for itself.
2847          */
2848         while (count--)
2849                 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2850
2851         return 0;
2852 }
2853 EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2854
2855 int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2856 {
2857         /* Do not use a multiqueue device to represent a subordinate channel */
2858         if (netif_is_multiqueue(dev))
2859                 return -ENODEV;
2860
2861         /* We allow channels 1 - 32767 to be used for subordinate channels.
2862          * Channel 0 is meant to be "native" mode and used only to represent
2863          * the main root device. We allow writing 0 to reset the device back
2864          * to normal mode after being used as a subordinate channel.
2865          */
2866         if (channel > S16_MAX)
2867                 return -EINVAL;
2868
2869         dev->num_tc = -channel;
2870
2871         return 0;
2872 }
2873 EXPORT_SYMBOL(netdev_set_sb_channel);
2874
2875 /*
2876  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2877  * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2878  */
2879 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2880 {
2881         bool disabling;
2882         int rc;
2883
2884         disabling = txq < dev->real_num_tx_queues;
2885
2886         if (txq < 1 || txq > dev->num_tx_queues)
2887                 return -EINVAL;
2888
2889         if (dev->reg_state == NETREG_REGISTERED ||
2890             dev->reg_state == NETREG_UNREGISTERING) {
2891                 ASSERT_RTNL();
2892
2893                 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2894                                                   txq);
2895                 if (rc)
2896                         return rc;
2897
2898                 if (dev->num_tc)
2899                         netif_setup_tc(dev, txq);
2900
2901                 dev_qdisc_change_real_num_tx(dev, txq);
2902
2903                 dev->real_num_tx_queues = txq;
2904
2905                 if (disabling) {
2906                         synchronize_net();
2907                         qdisc_reset_all_tx_gt(dev, txq);
2908 #ifdef CONFIG_XPS
2909                         netif_reset_xps_queues_gt(dev, txq);
2910 #endif
2911                 }
2912         } else {
2913                 dev->real_num_tx_queues = txq;
2914         }
2915
2916         return 0;
2917 }
2918 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2919
2920 #ifdef CONFIG_SYSFS
2921 /**
2922  *      netif_set_real_num_rx_queues - set actual number of RX queues used
2923  *      @dev: Network device
2924  *      @rxq: Actual number of RX queues
2925  *
2926  *      This must be called either with the rtnl_lock held or before
2927  *      registration of the net device.  Returns 0 on success, or a
2928  *      negative error code.  If called before registration, it always
2929  *      succeeds.
2930  */
2931 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2932 {
2933         int rc;
2934
2935         if (rxq < 1 || rxq > dev->num_rx_queues)
2936                 return -EINVAL;
2937
2938         if (dev->reg_state == NETREG_REGISTERED) {
2939                 ASSERT_RTNL();
2940
2941                 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2942                                                   rxq);
2943                 if (rc)
2944                         return rc;
2945         }
2946
2947         dev->real_num_rx_queues = rxq;
2948         return 0;
2949 }
2950 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2951 #endif
2952
2953 /**
2954  *      netif_set_real_num_queues - set actual number of RX and TX queues used
2955  *      @dev: Network device
2956  *      @txq: Actual number of TX queues
2957  *      @rxq: Actual number of RX queues
2958  *
2959  *      Set the real number of both TX and RX queues.
2960  *      Does nothing if the number of queues is already correct.
2961  */
2962 int netif_set_real_num_queues(struct net_device *dev,
2963                               unsigned int txq, unsigned int rxq)
2964 {
2965         unsigned int old_rxq = dev->real_num_rx_queues;
2966         int err;
2967
2968         if (txq < 1 || txq > dev->num_tx_queues ||
2969             rxq < 1 || rxq > dev->num_rx_queues)
2970                 return -EINVAL;
2971
2972         /* Start from increases, so the error path only does decreases -
2973          * decreases can't fail.
2974          */
2975         if (rxq > dev->real_num_rx_queues) {
2976                 err = netif_set_real_num_rx_queues(dev, rxq);
2977                 if (err)
2978                         return err;
2979         }
2980         if (txq > dev->real_num_tx_queues) {
2981                 err = netif_set_real_num_tx_queues(dev, txq);
2982                 if (err)
2983                         goto undo_rx;
2984         }
2985         if (rxq < dev->real_num_rx_queues)
2986                 WARN_ON(netif_set_real_num_rx_queues(dev, rxq));
2987         if (txq < dev->real_num_tx_queues)
2988                 WARN_ON(netif_set_real_num_tx_queues(dev, txq));
2989
2990         return 0;
2991 undo_rx:
2992         WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq));
2993         return err;
2994 }
2995 EXPORT_SYMBOL(netif_set_real_num_queues);
2996
2997 /**
2998  * netif_set_tso_max_size() - set the max size of TSO frames supported
2999  * @dev:        netdev to update
3000  * @size:       max skb->len of a TSO frame
3001  *
3002  * Set the limit on the size of TSO super-frames the device can handle.
3003  * Unless explicitly set the stack will assume the value of
3004  * %GSO_LEGACY_MAX_SIZE.
3005  */
3006 void netif_set_tso_max_size(struct net_device *dev, unsigned int size)
3007 {
3008         dev->tso_max_size = min(GSO_MAX_SIZE, size);
3009         if (size < READ_ONCE(dev->gso_max_size))
3010                 netif_set_gso_max_size(dev, size);
3011 }
3012 EXPORT_SYMBOL(netif_set_tso_max_size);
3013
3014 /**
3015  * netif_set_tso_max_segs() - set the max number of segs supported for TSO
3016  * @dev:        netdev to update
3017  * @segs:       max number of TCP segments
3018  *
3019  * Set the limit on the number of TCP segments the device can generate from
3020  * a single TSO super-frame.
3021  * Unless explicitly set the stack will assume the value of %GSO_MAX_SEGS.
3022  */
3023 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs)
3024 {
3025         dev->tso_max_segs = segs;
3026         if (segs < READ_ONCE(dev->gso_max_segs))
3027                 netif_set_gso_max_segs(dev, segs);
3028 }
3029 EXPORT_SYMBOL(netif_set_tso_max_segs);
3030
3031 /**
3032  * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper
3033  * @to:         netdev to update
3034  * @from:       netdev from which to copy the limits
3035  */
3036 void netif_inherit_tso_max(struct net_device *to, const struct net_device *from)
3037 {
3038         netif_set_tso_max_size(to, from->tso_max_size);
3039         netif_set_tso_max_segs(to, from->tso_max_segs);
3040 }
3041 EXPORT_SYMBOL(netif_inherit_tso_max);
3042
3043 /**
3044  * netif_get_num_default_rss_queues - default number of RSS queues
3045  *
3046  * Default value is the number of physical cores if there are only 1 or 2, or
3047  * divided by 2 if there are more.
3048  */
3049 int netif_get_num_default_rss_queues(void)
3050 {
3051         cpumask_var_t cpus;
3052         int cpu, count = 0;
3053
3054         if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus, GFP_KERNEL)))
3055                 return 1;
3056
3057         cpumask_copy(cpus, cpu_online_mask);
3058         for_each_cpu(cpu, cpus) {
3059                 ++count;
3060                 cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
3061         }
3062         free_cpumask_var(cpus);
3063
3064         return count > 2 ? DIV_ROUND_UP(count, 2) : count;
3065 }
3066 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
3067
3068 static void __netif_reschedule(struct Qdisc *q)
3069 {
3070         struct softnet_data *sd;
3071         unsigned long flags;
3072
3073         local_irq_save(flags);
3074         sd = this_cpu_ptr(&softnet_data);
3075         q->next_sched = NULL;
3076         *sd->output_queue_tailp = q;
3077         sd->output_queue_tailp = &q->next_sched;
3078         raise_softirq_irqoff(NET_TX_SOFTIRQ);
3079         local_irq_restore(flags);
3080 }
3081
3082 void __netif_schedule(struct Qdisc *q)
3083 {
3084         if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
3085                 __netif_reschedule(q);
3086 }
3087 EXPORT_SYMBOL(__netif_schedule);
3088
3089 struct dev_kfree_skb_cb {
3090         enum skb_free_reason reason;
3091 };
3092
3093 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
3094 {
3095         return (struct dev_kfree_skb_cb *)skb->cb;
3096 }
3097
3098 void netif_schedule_queue(struct netdev_queue *txq)
3099 {
3100         rcu_read_lock();
3101         if (!netif_xmit_stopped(txq)) {
3102                 struct Qdisc *q = rcu_dereference(txq->qdisc);
3103
3104                 __netif_schedule(q);
3105         }
3106         rcu_read_unlock();
3107 }
3108 EXPORT_SYMBOL(netif_schedule_queue);
3109
3110 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
3111 {
3112         if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
3113                 struct Qdisc *q;
3114
3115                 rcu_read_lock();
3116                 q = rcu_dereference(dev_queue->qdisc);
3117                 __netif_schedule(q);
3118                 rcu_read_unlock();
3119         }
3120 }
3121 EXPORT_SYMBOL(netif_tx_wake_queue);
3122
3123 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
3124 {
3125         unsigned long flags;
3126
3127         if (unlikely(!skb))
3128                 return;
3129
3130         if (likely(refcount_read(&skb->users) == 1)) {
3131                 smp_rmb();
3132                 refcount_set(&skb->users, 0);
3133         } else if (likely(!refcount_dec_and_test(&skb->users))) {
3134                 return;
3135         }
3136         get_kfree_skb_cb(skb)->reason = reason;
3137         local_irq_save(flags);
3138         skb->next = __this_cpu_read(softnet_data.completion_queue);
3139         __this_cpu_write(softnet_data.completion_queue, skb);
3140         raise_softirq_irqoff(NET_TX_SOFTIRQ);
3141         local_irq_restore(flags);
3142 }
3143 EXPORT_SYMBOL(__dev_kfree_skb_irq);
3144
3145 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
3146 {
3147         if (in_hardirq() || irqs_disabled())
3148                 __dev_kfree_skb_irq(skb, reason);
3149         else
3150                 dev_kfree_skb(skb);
3151 }
3152 EXPORT_SYMBOL(__dev_kfree_skb_any);
3153
3154
3155 /**
3156  * netif_device_detach - mark device as removed
3157  * @dev: network device
3158  *
3159  * Mark device as removed from system and therefore no longer available.
3160  */
3161 void netif_device_detach(struct net_device *dev)
3162 {
3163         if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
3164             netif_running(dev)) {
3165                 netif_tx_stop_all_queues(dev);
3166         }
3167 }
3168 EXPORT_SYMBOL(netif_device_detach);
3169
3170 /**
3171  * netif_device_attach - mark device as attached
3172  * @dev: network device
3173  *
3174  * Mark device as attached from system and restart if needed.
3175  */
3176 void netif_device_attach(struct net_device *dev)
3177 {
3178         if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
3179             netif_running(dev)) {
3180                 netif_tx_wake_all_queues(dev);
3181                 __netdev_watchdog_up(dev);
3182         }
3183 }
3184 EXPORT_SYMBOL(netif_device_attach);
3185
3186 /*
3187  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
3188  * to be used as a distribution range.
3189  */
3190 static u16 skb_tx_hash(const struct net_device *dev,
3191                        const struct net_device *sb_dev,
3192                        struct sk_buff *skb)
3193 {
3194         u32 hash;
3195         u16 qoffset = 0;
3196         u16 qcount = dev->real_num_tx_queues;
3197
3198         if (dev->num_tc) {
3199                 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3200
3201                 qoffset = sb_dev->tc_to_txq[tc].offset;
3202                 qcount = sb_dev->tc_to_txq[tc].count;
3203                 if (unlikely(!qcount)) {
3204                         net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
3205                                              sb_dev->name, qoffset, tc);
3206                         qoffset = 0;
3207                         qcount = dev->real_num_tx_queues;
3208                 }
3209         }
3210
3211         if (skb_rx_queue_recorded(skb)) {
3212                 hash = skb_get_rx_queue(skb);
3213                 if (hash >= qoffset)
3214                         hash -= qoffset;
3215                 while (unlikely(hash >= qcount))
3216                         hash -= qcount;
3217                 return hash + qoffset;
3218         }
3219
3220         return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
3221 }
3222
3223 static void skb_warn_bad_offload(const struct sk_buff *skb)
3224 {
3225         static const netdev_features_t null_features;
3226         struct net_device *dev = skb->dev;
3227         const char *name = "";
3228
3229         if (!net_ratelimit())
3230                 return;
3231
3232         if (dev) {
3233                 if (dev->dev.parent)
3234                         name = dev_driver_string(dev->dev.parent);
3235                 else
3236                         name = netdev_name(dev);
3237         }
3238         skb_dump(KERN_WARNING, skb, false);
3239         WARN(1, "%s: caps=(%pNF, %pNF)\n",
3240              name, dev ? &dev->features : &null_features,
3241              skb->sk ? &skb->sk->sk_route_caps : &null_features);
3242 }
3243
3244 /*
3245  * Invalidate hardware checksum when packet is to be mangled, and
3246  * complete checksum manually on outgoing path.
3247  */
3248 int skb_checksum_help(struct sk_buff *skb)
3249 {
3250         __wsum csum;
3251         int ret = 0, offset;
3252
3253         if (skb->ip_summed == CHECKSUM_COMPLETE)
3254                 goto out_set_summed;
3255
3256         if (unlikely(skb_is_gso(skb))) {
3257                 skb_warn_bad_offload(skb);
3258                 return -EINVAL;
3259         }
3260
3261         /* Before computing a checksum, we should make sure no frag could
3262          * be modified by an external entity : checksum could be wrong.
3263          */
3264         if (skb_has_shared_frag(skb)) {
3265                 ret = __skb_linearize(skb);
3266                 if (ret)
3267                         goto out;
3268         }
3269
3270         offset = skb_checksum_start_offset(skb);
3271         ret = -EINVAL;
3272         if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3273                 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
3274                 goto out;
3275         }
3276         csum = skb_checksum(skb, offset, skb->len - offset, 0);
3277
3278         offset += skb->csum_offset;
3279         if (WARN_ON_ONCE(offset + sizeof(__sum16) > skb_headlen(skb))) {
3280                 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
3281                 goto out;
3282         }
3283         ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
3284         if (ret)
3285                 goto out;
3286
3287         *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
3288 out_set_summed:
3289         skb->ip_summed = CHECKSUM_NONE;
3290 out:
3291         return ret;
3292 }
3293 EXPORT_SYMBOL(skb_checksum_help);
3294
3295 int skb_crc32c_csum_help(struct sk_buff *skb)
3296 {
3297         __le32 crc32c_csum;
3298         int ret = 0, offset, start;
3299
3300         if (skb->ip_summed != CHECKSUM_PARTIAL)
3301                 goto out;
3302
3303         if (unlikely(skb_is_gso(skb)))
3304                 goto out;
3305
3306         /* Before computing a checksum, we should make sure no frag could
3307          * be modified by an external entity : checksum could be wrong.
3308          */
3309         if (unlikely(skb_has_shared_frag(skb))) {
3310                 ret = __skb_linearize(skb);
3311                 if (ret)
3312                         goto out;
3313         }
3314         start = skb_checksum_start_offset(skb);
3315         offset = start + offsetof(struct sctphdr, checksum);
3316         if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3317                 ret = -EINVAL;
3318                 goto out;
3319         }
3320
3321         ret = skb_ensure_writable(skb, offset + sizeof(__le32));
3322         if (ret)
3323                 goto out;
3324
3325         crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
3326                                                   skb->len - start, ~(__u32)0,
3327                                                   crc32c_csum_stub));
3328         *(__le32 *)(skb->data + offset) = crc32c_csum;
3329         skb->ip_summed = CHECKSUM_NONE;
3330         skb->csum_not_inet = 0;
3331 out:
3332         return ret;
3333 }
3334
3335 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
3336 {
3337         __be16 type = skb->protocol;
3338
3339         /* Tunnel gso handlers can set protocol to ethernet. */
3340         if (type == htons(ETH_P_TEB)) {
3341                 struct ethhdr *eth;
3342
3343                 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3344                         return 0;
3345
3346                 eth = (struct ethhdr *)skb->data;
3347                 type = eth->h_proto;
3348         }
3349
3350         return __vlan_get_protocol(skb, type, depth);
3351 }
3352
3353 /* openvswitch calls this on rx path, so we need a different check.
3354  */
3355 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
3356 {
3357         if (tx_path)
3358                 return skb->ip_summed != CHECKSUM_PARTIAL &&
3359                        skb->ip_summed != CHECKSUM_UNNECESSARY;
3360
3361         return skb->ip_summed == CHECKSUM_NONE;
3362 }
3363
3364 /**
3365  *      __skb_gso_segment - Perform segmentation on skb.
3366  *      @skb: buffer to segment
3367  *      @features: features for the output path (see dev->features)
3368  *      @tx_path: whether it is called in TX path
3369  *
3370  *      This function segments the given skb and returns a list of segments.
3371  *
3372  *      It may return NULL if the skb requires no segmentation.  This is
3373  *      only possible when GSO is used for verifying header integrity.
3374  *
3375  *      Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb.
3376  */
3377 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3378                                   netdev_features_t features, bool tx_path)
3379 {
3380         struct sk_buff *segs;
3381
3382         if (unlikely(skb_needs_check(skb, tx_path))) {
3383                 int err;
3384
3385                 /* We're going to init ->check field in TCP or UDP header */
3386                 err = skb_cow_head(skb, 0);
3387                 if (err < 0)
3388                         return ERR_PTR(err);
3389         }
3390
3391         /* Only report GSO partial support if it will enable us to
3392          * support segmentation on this frame without needing additional
3393          * work.
3394          */
3395         if (features & NETIF_F_GSO_PARTIAL) {
3396                 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
3397                 struct net_device *dev = skb->dev;
3398
3399                 partial_features |= dev->features & dev->gso_partial_features;
3400                 if (!skb_gso_ok(skb, features | partial_features))
3401                         features &= ~NETIF_F_GSO_PARTIAL;
3402         }
3403
3404         BUILD_BUG_ON(SKB_GSO_CB_OFFSET +
3405                      sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
3406
3407         SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3408         SKB_GSO_CB(skb)->encap_level = 0;
3409
3410         skb_reset_mac_header(skb);
3411         skb_reset_mac_len(skb);
3412
3413         segs = skb_mac_gso_segment(skb, features);
3414
3415         if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
3416                 skb_warn_bad_offload(skb);
3417
3418         return segs;
3419 }
3420 EXPORT_SYMBOL(__skb_gso_segment);
3421
3422 /* Take action when hardware reception checksum errors are detected. */
3423 #ifdef CONFIG_BUG
3424 static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3425 {
3426         netdev_err(dev, "hw csum failure\n");
3427         skb_dump(KERN_ERR, skb, true);
3428         dump_stack();
3429 }
3430
3431 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3432 {
3433         DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb);
3434 }
3435 EXPORT_SYMBOL(netdev_rx_csum_fault);
3436 #endif
3437
3438 /* XXX: check that highmem exists at all on the given machine. */
3439 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3440 {
3441 #ifdef CONFIG_HIGHMEM
3442         int i;
3443
3444         if (!(dev->features & NETIF_F_HIGHDMA)) {
3445                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3446                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3447
3448                         if (PageHighMem(skb_frag_page(frag)))
3449                                 return 1;
3450                 }
3451         }
3452 #endif
3453         return 0;
3454 }
3455
3456 /* If MPLS offload request, verify we are testing hardware MPLS features
3457  * instead of standard features for the netdev.
3458  */
3459 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3460 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3461                                            netdev_features_t features,
3462                                            __be16 type)
3463 {
3464         if (eth_p_mpls(type))
3465                 features &= skb->dev->mpls_features;
3466
3467         return features;
3468 }
3469 #else
3470 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3471                                            netdev_features_t features,
3472                                            __be16 type)
3473 {
3474         return features;
3475 }
3476 #endif
3477
3478 static netdev_features_t harmonize_features(struct sk_buff *skb,
3479         netdev_features_t features)
3480 {
3481         __be16 type;
3482
3483         type = skb_network_protocol(skb, NULL);
3484         features = net_mpls_features(skb, features, type);
3485
3486         if (skb->ip_summed != CHECKSUM_NONE &&
3487             !can_checksum_protocol(features, type)) {
3488                 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3489         }
3490         if (illegal_highdma(skb->dev, skb))
3491                 features &= ~NETIF_F_SG;
3492
3493         return features;
3494 }
3495
3496 netdev_features_t passthru_features_check(struct sk_buff *skb,
3497                                           struct net_device *dev,
3498                                           netdev_features_t features)
3499 {
3500         return features;
3501 }
3502 EXPORT_SYMBOL(passthru_features_check);
3503
3504 static netdev_features_t dflt_features_check(struct sk_buff *skb,
3505                                              struct net_device *dev,
3506                                              netdev_features_t features)
3507 {
3508         return vlan_features_check(skb, features);
3509 }
3510
3511 static netdev_features_t gso_features_check(const struct sk_buff *skb,
3512                                             struct net_device *dev,
3513                                             netdev_features_t features)
3514 {
3515         u16 gso_segs = skb_shinfo(skb)->gso_segs;
3516
3517         if (gso_segs > READ_ONCE(dev->gso_max_segs))
3518                 return features & ~NETIF_F_GSO_MASK;
3519
3520         if (!skb_shinfo(skb)->gso_type) {
3521                 skb_warn_bad_offload(skb);
3522                 return features & ~NETIF_F_GSO_MASK;
3523         }
3524
3525         /* Support for GSO partial features requires software
3526          * intervention before we can actually process the packets
3527          * so we need to strip support for any partial features now
3528          * and we can pull them back in after we have partially
3529          * segmented the frame.
3530          */
3531         if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3532                 features &= ~dev->gso_partial_features;
3533
3534         /* Make sure to clear the IPv4 ID mangling feature if the
3535          * IPv4 header has the potential to be fragmented.
3536          */
3537         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3538                 struct iphdr *iph = skb->encapsulation ?
3539                                     inner_ip_hdr(skb) : ip_hdr(skb);
3540
3541                 if (!(iph->frag_off & htons(IP_DF)))
3542                         features &= ~NETIF_F_TSO_MANGLEID;
3543         }
3544
3545         return features;
3546 }
3547
3548 netdev_features_t netif_skb_features(struct sk_buff *skb)
3549 {
3550         struct net_device *dev = skb->dev;
3551         netdev_features_t features = dev->features;
3552
3553         if (skb_is_gso(skb))
3554                 features = gso_features_check(skb, dev, features);
3555
3556         /* If encapsulation offload request, verify we are testing
3557          * hardware encapsulation features instead of standard
3558          * features for the netdev
3559          */
3560         if (skb->encapsulation)
3561                 features &= dev->hw_enc_features;
3562
3563         if (skb_vlan_tagged(skb))
3564                 features = netdev_intersect_features(features,
3565                                                      dev->vlan_features |
3566                                                      NETIF_F_HW_VLAN_CTAG_TX |
3567                                                      NETIF_F_HW_VLAN_STAG_TX);
3568
3569         if (dev->netdev_ops->ndo_features_check)
3570                 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3571                                                                 features);
3572         else
3573                 features &= dflt_features_check(skb, dev, features);
3574
3575         return harmonize_features(skb, features);
3576 }
3577 EXPORT_SYMBOL(netif_skb_features);
3578
3579 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3580                     struct netdev_queue *txq, bool more)
3581 {
3582         unsigned int len;
3583         int rc;
3584
3585         if (dev_nit_active(dev))
3586                 dev_queue_xmit_nit(skb, dev);
3587
3588         len = skb->len;
3589         trace_net_dev_start_xmit(skb, dev);
3590         rc = netdev_start_xmit(skb, dev, txq, more);
3591         trace_net_dev_xmit(skb, rc, dev, len);
3592
3593         return rc;
3594 }
3595
3596 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3597                                     struct netdev_queue *txq, int *ret)
3598 {
3599         struct sk_buff *skb = first;
3600         int rc = NETDEV_TX_OK;
3601
3602         while (skb) {
3603                 struct sk_buff *next = skb->next;
3604
3605                 skb_mark_not_on_list(skb);
3606                 rc = xmit_one(skb, dev, txq, next != NULL);
3607                 if (unlikely(!dev_xmit_complete(rc))) {
3608                         skb->next = next;
3609                         goto out;
3610                 }
3611
3612                 skb = next;
3613                 if (netif_tx_queue_stopped(txq) && skb) {
3614                         rc = NETDEV_TX_BUSY;
3615                         break;
3616                 }
3617         }
3618
3619 out:
3620         *ret = rc;
3621         return skb;
3622 }
3623
3624 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3625                                           netdev_features_t features)
3626 {
3627         if (skb_vlan_tag_present(skb) &&
3628             !vlan_hw_offload_capable(features, skb->vlan_proto))
3629                 skb = __vlan_hwaccel_push_inside(skb);
3630         return skb;
3631 }
3632
3633 int skb_csum_hwoffload_help(struct sk_buff *skb,
3634                             const netdev_features_t features)
3635 {
3636         if (unlikely(skb_csum_is_sctp(skb)))
3637                 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3638                         skb_crc32c_csum_help(skb);
3639
3640         if (features & NETIF_F_HW_CSUM)
3641                 return 0;
3642
3643         if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3644                 switch (skb->csum_offset) {
3645                 case offsetof(struct tcphdr, check):
3646                 case offsetof(struct udphdr, check):
3647                         return 0;
3648                 }
3649         }
3650
3651         return skb_checksum_help(skb);
3652 }
3653 EXPORT_SYMBOL(skb_csum_hwoffload_help);
3654
3655 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3656 {
3657         netdev_features_t features;
3658
3659         features = netif_skb_features(skb);
3660         skb = validate_xmit_vlan(skb, features);
3661         if (unlikely(!skb))
3662                 goto out_null;
3663
3664         skb = sk_validate_xmit_skb(skb, dev);
3665         if (unlikely(!skb))
3666                 goto out_null;
3667
3668         if (netif_needs_gso(skb, features)) {
3669                 struct sk_buff *segs;
3670
3671                 segs = skb_gso_segment(skb, features);
3672                 if (IS_ERR(segs)) {
3673                         goto out_kfree_skb;
3674                 } else if (segs) {
3675                         consume_skb(skb);
3676                         skb = segs;
3677                 }
3678         } else {
3679                 if (skb_needs_linearize(skb, features) &&
3680                     __skb_linearize(skb))
3681                         goto out_kfree_skb;
3682
3683                 /* If packet is not checksummed and device does not
3684                  * support checksumming for this protocol, complete
3685                  * checksumming here.
3686                  */
3687                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3688                         if (skb->encapsulation)
3689                                 skb_set_inner_transport_header(skb,
3690                                                                skb_checksum_start_offset(skb));
3691                         else
3692                                 skb_set_transport_header(skb,
3693                                                          skb_checksum_start_offset(skb));
3694                         if (skb_csum_hwoffload_help(skb, features))
3695                                 goto out_kfree_skb;
3696                 }
3697         }
3698
3699         skb = validate_xmit_xfrm(skb, features, again);
3700
3701         return skb;
3702
3703 out_kfree_skb:
3704         kfree_skb(skb);
3705 out_null:
3706         dev_core_stats_tx_dropped_inc(dev);
3707         return NULL;
3708 }
3709
3710 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3711 {
3712         struct sk_buff *next, *head = NULL, *tail;
3713
3714         for (; skb != NULL; skb = next) {
3715                 next = skb->next;
3716                 skb_mark_not_on_list(skb);
3717
3718                 /* in case skb wont be segmented, point to itself */
3719                 skb->prev = skb;
3720
3721                 skb = validate_xmit_skb(skb, dev, again);
3722                 if (!skb)
3723                         continue;
3724
3725                 if (!head)
3726                         head = skb;
3727                 else
3728                         tail->next = skb;
3729                 /* If skb was segmented, skb->prev points to
3730                  * the last segment. If not, it still contains skb.
3731                  */
3732                 tail = skb->prev;
3733         }
3734         return head;
3735 }
3736 EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3737
3738 static void qdisc_pkt_len_init(struct sk_buff *skb)
3739 {
3740         const struct skb_shared_info *shinfo = skb_shinfo(skb);
3741
3742         qdisc_skb_cb(skb)->pkt_len = skb->len;
3743
3744         /* To get more precise estimation of bytes sent on wire,
3745          * we add to pkt_len the headers size of all segments
3746          */
3747         if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
3748                 unsigned int hdr_len;
3749                 u16 gso_segs = shinfo->gso_segs;
3750
3751                 /* mac layer + network layer */
3752                 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3753
3754                 /* + transport layer */
3755                 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3756                         const struct tcphdr *th;
3757                         struct tcphdr _tcphdr;
3758
3759                         th = skb_header_pointer(skb, skb_transport_offset(skb),
3760                                                 sizeof(_tcphdr), &_tcphdr);
3761                         if (likely(th))
3762                                 hdr_len += __tcp_hdrlen(th);
3763                 } else {
3764                         struct udphdr _udphdr;
3765
3766                         if (skb_header_pointer(skb, skb_transport_offset(skb),
3767                                                sizeof(_udphdr), &_udphdr))
3768                                 hdr_len += sizeof(struct udphdr);
3769                 }
3770
3771                 if (shinfo->gso_type & SKB_GSO_DODGY)
3772                         gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3773                                                 shinfo->gso_size);
3774
3775                 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3776         }
3777 }
3778
3779 static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
3780                              struct sk_buff **to_free,
3781                              struct netdev_queue *txq)
3782 {
3783         int rc;
3784
3785         rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
3786         if (rc == NET_XMIT_SUCCESS)
3787                 trace_qdisc_enqueue(q, txq, skb);
3788         return rc;
3789 }
3790
3791 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3792                                  struct net_device *dev,
3793                                  struct netdev_queue *txq)
3794 {
3795         spinlock_t *root_lock = qdisc_lock(q);
3796         struct sk_buff *to_free = NULL;
3797         bool contended;
3798         int rc;
3799
3800         qdisc_calculate_pkt_len(skb, q);
3801
3802         if (q->flags & TCQ_F_NOLOCK) {
3803                 if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) &&
3804                     qdisc_run_begin(q)) {
3805                         /* Retest nolock_qdisc_is_empty() within the protection
3806                          * of q->seqlock to protect from racing with requeuing.
3807                          */
3808                         if (unlikely(!nolock_qdisc_is_empty(q))) {
3809                                 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3810                                 __qdisc_run(q);
3811                                 qdisc_run_end(q);
3812
3813                                 goto no_lock_out;
3814                         }
3815
3816                         qdisc_bstats_cpu_update(q, skb);
3817                         if (sch_direct_xmit(skb, q, dev, txq, NULL, true) &&
3818                             !nolock_qdisc_is_empty(q))
3819                                 __qdisc_run(q);
3820
3821                         qdisc_run_end(q);
3822                         return NET_XMIT_SUCCESS;
3823                 }
3824
3825                 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3826                 qdisc_run(q);
3827
3828 no_lock_out:
3829                 if (unlikely(to_free))
3830                         kfree_skb_list_reason(to_free,
3831                                               SKB_DROP_REASON_QDISC_DROP);
3832                 return rc;
3833         }
3834
3835         /*
3836          * Heuristic to force contended enqueues to serialize on a
3837          * separate lock before trying to get qdisc main lock.
3838          * This permits qdisc->running owner to get the lock more
3839          * often and dequeue packets faster.
3840          * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit
3841          * and then other tasks will only enqueue packets. The packets will be
3842          * sent after the qdisc owner is scheduled again. To prevent this
3843          * scenario the task always serialize on the lock.
3844          */
3845         contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT);
3846         if (unlikely(contended))
3847                 spin_lock(&q->busylock);
3848
3849         spin_lock(root_lock);
3850         if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3851                 __qdisc_drop(skb, &to_free);
3852                 rc = NET_XMIT_DROP;
3853         } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3854                    qdisc_run_begin(q)) {
3855                 /*
3856                  * This is a work-conserving queue; there are no old skbs
3857                  * waiting to be sent out; and the qdisc is not running -
3858                  * xmit the skb directly.
3859                  */
3860
3861                 qdisc_bstats_update(q, skb);
3862
3863                 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3864                         if (unlikely(contended)) {
3865                                 spin_unlock(&q->busylock);
3866                                 contended = false;
3867                         }
3868                         __qdisc_run(q);
3869                 }
3870
3871                 qdisc_run_end(q);
3872                 rc = NET_XMIT_SUCCESS;
3873         } else {
3874                 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3875                 if (qdisc_run_begin(q)) {
3876                         if (unlikely(contended)) {
3877                                 spin_unlock(&q->busylock);
3878                                 contended = false;
3879                         }
3880                         __qdisc_run(q);
3881                         qdisc_run_end(q);
3882                 }
3883         }
3884         spin_unlock(root_lock);
3885         if (unlikely(to_free))
3886                 kfree_skb_list_reason(to_free, SKB_DROP_REASON_QDISC_DROP);
3887         if (unlikely(contended))
3888                 spin_unlock(&q->busylock);
3889         return rc;
3890 }
3891
3892 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3893 static void skb_update_prio(struct sk_buff *skb)
3894 {
3895         const struct netprio_map *map;
3896         const struct sock *sk;
3897         unsigned int prioidx;
3898
3899         if (skb->priority)
3900                 return;
3901         map = rcu_dereference_bh(skb->dev->priomap);
3902         if (!map)
3903                 return;
3904         sk = skb_to_full_sk(skb);
3905         if (!sk)
3906                 return;
3907
3908         prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3909
3910         if (prioidx < map->priomap_len)
3911                 skb->priority = map->priomap[prioidx];
3912 }
3913 #else
3914 #define skb_update_prio(skb)
3915 #endif
3916
3917 /**
3918  *      dev_loopback_xmit - loop back @skb
3919  *      @net: network namespace this loopback is happening in
3920  *      @sk:  sk needed to be a netfilter okfn
3921  *      @skb: buffer to transmit
3922  */
3923 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3924 {
3925         skb_reset_mac_header(skb);
3926         __skb_pull(skb, skb_network_offset(skb));
3927         skb->pkt_type = PACKET_LOOPBACK;
3928         if (skb->ip_summed == CHECKSUM_NONE)
3929                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3930         DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb));
3931         skb_dst_force(skb);
3932         netif_rx(skb);
3933         return 0;
3934 }
3935 EXPORT_SYMBOL(dev_loopback_xmit);
3936
3937 #ifdef CONFIG_NET_EGRESS
3938 static struct sk_buff *
3939 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3940 {
3941 #ifdef CONFIG_NET_CLS_ACT
3942         struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
3943         struct tcf_result cl_res;
3944
3945         if (!miniq)
3946                 return skb;
3947
3948         /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
3949         tc_skb_cb(skb)->mru = 0;
3950         tc_skb_cb(skb)->post_ct = false;
3951         mini_qdisc_bstats_cpu_update(miniq, skb);
3952
3953         switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) {
3954         case TC_ACT_OK:
3955         case TC_ACT_RECLASSIFY:
3956                 skb->tc_index = TC_H_MIN(cl_res.classid);
3957                 break;
3958         case TC_ACT_SHOT:
3959                 mini_qdisc_qstats_cpu_drop(miniq);
3960                 *ret = NET_XMIT_DROP;
3961                 kfree_skb_reason(skb, SKB_DROP_REASON_TC_EGRESS);
3962                 return NULL;
3963         case TC_ACT_STOLEN:
3964         case TC_ACT_QUEUED:
3965         case TC_ACT_TRAP:
3966                 *ret = NET_XMIT_SUCCESS;
3967                 consume_skb(skb);
3968                 return NULL;
3969         case TC_ACT_REDIRECT:
3970                 /* No need to push/pop skb's mac_header here on egress! */
3971                 skb_do_redirect(skb);
3972                 *ret = NET_XMIT_SUCCESS;
3973                 return NULL;
3974         default:
3975                 break;
3976         }
3977 #endif /* CONFIG_NET_CLS_ACT */
3978
3979         return skb;
3980 }
3981
3982 static struct netdev_queue *
3983 netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb)
3984 {
3985         int qm = skb_get_queue_mapping(skb);
3986
3987         return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm));
3988 }
3989
3990 static bool netdev_xmit_txqueue_skipped(void)
3991 {
3992         return __this_cpu_read(softnet_data.xmit.skip_txqueue);
3993 }
3994
3995 void netdev_xmit_skip_txqueue(bool skip)
3996 {
3997         __this_cpu_write(softnet_data.xmit.skip_txqueue, skip);
3998 }
3999 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
4000 #endif /* CONFIG_NET_EGRESS */
4001
4002 #ifdef CONFIG_XPS
4003 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
4004                                struct xps_dev_maps *dev_maps, unsigned int tci)
4005 {
4006         int tc = netdev_get_prio_tc_map(dev, skb->priority);
4007         struct xps_map *map;
4008         int queue_index = -1;
4009
4010         if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids)
4011                 return queue_index;
4012
4013         tci *= dev_maps->num_tc;
4014         tci += tc;
4015
4016         map = rcu_dereference(dev_maps->attr_map[tci]);
4017         if (map) {
4018                 if (map->len == 1)
4019                         queue_index = map->queues[0];
4020                 else
4021                         queue_index = map->queues[reciprocal_scale(
4022                                                 skb_get_hash(skb), map->len)];
4023                 if (unlikely(queue_index >= dev->real_num_tx_queues))
4024                         queue_index = -1;
4025         }
4026         return queue_index;
4027 }
4028 #endif
4029
4030 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
4031                          struct sk_buff *skb)
4032 {
4033 #ifdef CONFIG_XPS
4034         struct xps_dev_maps *dev_maps;
4035         struct sock *sk = skb->sk;
4036         int queue_index = -1;
4037
4038         if (!static_key_false(&xps_needed))
4039                 return -1;
4040
4041         rcu_read_lock();
4042         if (!static_key_false(&xps_rxqs_needed))
4043                 goto get_cpus_map;
4044
4045         dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]);
4046         if (dev_maps) {
4047                 int tci = sk_rx_queue_get(sk);
4048
4049                 if (tci >= 0)
4050                         queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4051                                                           tci);
4052         }
4053
4054 get_cpus_map:
4055         if (queue_index < 0) {
4056                 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]);
4057                 if (dev_maps) {
4058                         unsigned int tci = skb->sender_cpu - 1;
4059
4060                         queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4061                                                           tci);
4062                 }
4063         }
4064         rcu_read_unlock();
4065
4066         return queue_index;
4067 #else
4068         return -1;
4069 #endif
4070 }
4071
4072 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
4073                      struct net_device *sb_dev)
4074 {
4075         return 0;
4076 }
4077 EXPORT_SYMBOL(dev_pick_tx_zero);
4078
4079 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
4080                        struct net_device *sb_dev)
4081 {
4082         return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
4083 }
4084 EXPORT_SYMBOL(dev_pick_tx_cpu_id);
4085
4086 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
4087                      struct net_device *sb_dev)
4088 {
4089         struct sock *sk = skb->sk;
4090         int queue_index = sk_tx_queue_get(sk);
4091
4092         sb_dev = sb_dev ? : dev;
4093
4094         if (queue_index < 0 || skb->ooo_okay ||
4095             queue_index >= dev->real_num_tx_queues) {
4096                 int new_index = get_xps_queue(dev, sb_dev, skb);
4097
4098                 if (new_index < 0)
4099                         new_index = skb_tx_hash(dev, sb_dev, skb);
4100
4101                 if (queue_index != new_index && sk &&
4102                     sk_fullsock(sk) &&
4103                     rcu_access_pointer(sk->sk_dst_cache))
4104                         sk_tx_queue_set(sk, new_index);
4105
4106                 queue_index = new_index;
4107         }
4108
4109         return queue_index;
4110 }
4111 EXPORT_SYMBOL(netdev_pick_tx);
4112
4113 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
4114                                          struct sk_buff *skb,
4115                                          struct net_device *sb_dev)
4116 {
4117         int queue_index = 0;
4118
4119 #ifdef CONFIG_XPS
4120         u32 sender_cpu = skb->sender_cpu - 1;
4121
4122         if (sender_cpu >= (u32)NR_CPUS)
4123                 skb->sender_cpu = raw_smp_processor_id() + 1;
4124 #endif
4125
4126         if (dev->real_num_tx_queues != 1) {
4127                 const struct net_device_ops *ops = dev->netdev_ops;
4128
4129                 if (ops->ndo_select_queue)
4130                         queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
4131                 else
4132                         queue_index = netdev_pick_tx(dev, skb, sb_dev);
4133
4134                 queue_index = netdev_cap_txqueue(dev, queue_index);
4135         }
4136
4137         skb_set_queue_mapping(skb, queue_index);
4138         return netdev_get_tx_queue(dev, queue_index);
4139 }
4140
4141 /**
4142  * __dev_queue_xmit() - transmit a buffer
4143  * @skb:        buffer to transmit
4144  * @sb_dev:     suboordinate device used for L2 forwarding offload
4145  *
4146  * Queue a buffer for transmission to a network device. The caller must
4147  * have set the device and priority and built the buffer before calling
4148  * this function. The function can be called from an interrupt.
4149  *
4150  * When calling this method, interrupts MUST be enabled. This is because
4151  * the BH enable code must have IRQs enabled so that it will not deadlock.
4152  *
4153  * Regardless of the return value, the skb is consumed, so it is currently
4154  * difficult to retry a send to this method. (You can bump the ref count
4155  * before sending to hold a reference for retry if you are careful.)
4156  *
4157  * Return:
4158  * * 0                          - buffer successfully transmitted
4159  * * positive qdisc return code - NET_XMIT_DROP etc.
4160  * * negative errno             - other errors
4161  */
4162 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
4163 {
4164         struct net_device *dev = skb->dev;
4165         struct netdev_queue *txq = NULL;
4166         struct Qdisc *q;
4167         int rc = -ENOMEM;
4168         bool again = false;
4169
4170         skb_reset_mac_header(skb);
4171
4172         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
4173                 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED);
4174
4175         /* Disable soft irqs for various locks below. Also
4176          * stops preemption for RCU.
4177          */
4178         rcu_read_lock_bh();
4179
4180         skb_update_prio(skb);
4181
4182         qdisc_pkt_len_init(skb);
4183 #ifdef CONFIG_NET_CLS_ACT
4184         skb->tc_at_ingress = 0;
4185 #endif
4186 #ifdef CONFIG_NET_EGRESS
4187         if (static_branch_unlikely(&egress_needed_key)) {
4188                 if (nf_hook_egress_active()) {
4189                         skb = nf_hook_egress(skb, &rc, dev);
4190                         if (!skb)
4191                                 goto out;
4192                 }
4193
4194                 netdev_xmit_skip_txqueue(false);
4195
4196                 nf_skip_egress(skb, true);
4197                 skb = sch_handle_egress(skb, &rc, dev);
4198                 if (!skb)
4199                         goto out;
4200                 nf_skip_egress(skb, false);
4201
4202                 if (netdev_xmit_txqueue_skipped())
4203                         txq = netdev_tx_queue_mapping(dev, skb);
4204         }
4205 #endif
4206         /* If device/qdisc don't need skb->dst, release it right now while
4207          * its hot in this cpu cache.
4208          */
4209         if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
4210                 skb_dst_drop(skb);
4211         else
4212                 skb_dst_force(skb);
4213
4214         if (!txq)
4215                 txq = netdev_core_pick_tx(dev, skb, sb_dev);
4216
4217         q = rcu_dereference_bh(txq->qdisc);
4218
4219         trace_net_dev_queue(skb);
4220         if (q->enqueue) {
4221                 rc = __dev_xmit_skb(skb, q, dev, txq);
4222                 goto out;
4223         }
4224
4225         /* The device has no queue. Common case for software devices:
4226          * loopback, all the sorts of tunnels...
4227
4228          * Really, it is unlikely that netif_tx_lock protection is necessary
4229          * here.  (f.e. loopback and IP tunnels are clean ignoring statistics
4230          * counters.)
4231          * However, it is possible, that they rely on protection
4232          * made by us here.
4233
4234          * Check this and shot the lock. It is not prone from deadlocks.
4235          *Either shot noqueue qdisc, it is even simpler 8)
4236          */
4237         if (dev->flags & IFF_UP) {
4238                 int cpu = smp_processor_id(); /* ok because BHs are off */
4239
4240                 /* Other cpus might concurrently change txq->xmit_lock_owner
4241                  * to -1 or to their cpu id, but not to our id.
4242                  */
4243                 if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
4244                         if (dev_xmit_recursion())
4245                                 goto recursion_alert;
4246
4247                         skb = validate_xmit_skb(skb, dev, &again);
4248                         if (!skb)
4249                                 goto out;
4250
4251                         HARD_TX_LOCK(dev, txq, cpu);
4252
4253                         if (!netif_xmit_stopped(txq)) {
4254                                 dev_xmit_recursion_inc();
4255                                 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
4256                                 dev_xmit_recursion_dec();
4257                                 if (dev_xmit_complete(rc)) {
4258                                         HARD_TX_UNLOCK(dev, txq);
4259                                         goto out;
4260                                 }
4261                         }
4262                         HARD_TX_UNLOCK(dev, txq);
4263                         net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4264                                              dev->name);
4265                 } else {
4266                         /* Recursion is detected! It is possible,
4267                          * unfortunately
4268                          */
4269 recursion_alert:
4270                         net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4271                                              dev->name);
4272                 }
4273         }
4274
4275         rc = -ENETDOWN;
4276         rcu_read_unlock_bh();
4277
4278         dev_core_stats_tx_dropped_inc(dev);
4279         kfree_skb_list(skb);
4280         return rc;
4281 out:
4282         rcu_read_unlock_bh();
4283         return rc;
4284 }
4285 EXPORT_SYMBOL(__dev_queue_xmit);
4286
4287 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
4288 {
4289         struct net_device *dev = skb->dev;
4290         struct sk_buff *orig_skb = skb;
4291         struct netdev_queue *txq;
4292         int ret = NETDEV_TX_BUSY;
4293         bool again = false;
4294
4295         if (unlikely(!netif_running(dev) ||
4296                      !netif_carrier_ok(dev)))
4297                 goto drop;
4298
4299         skb = validate_xmit_skb_list(skb, dev, &again);
4300         if (skb != orig_skb)
4301                 goto drop;
4302
4303         skb_set_queue_mapping(skb, queue_id);
4304         txq = skb_get_tx_queue(dev, skb);
4305
4306         local_bh_disable();
4307
4308         dev_xmit_recursion_inc();
4309         HARD_TX_LOCK(dev, txq, smp_processor_id());
4310         if (!netif_xmit_frozen_or_drv_stopped(txq))
4311                 ret = netdev_start_xmit(skb, dev, txq, false);
4312         HARD_TX_UNLOCK(dev, txq);
4313         dev_xmit_recursion_dec();
4314
4315         local_bh_enable();
4316         return ret;
4317 drop:
4318         dev_core_stats_tx_dropped_inc(dev);
4319         kfree_skb_list(skb);
4320         return NET_XMIT_DROP;
4321 }
4322 EXPORT_SYMBOL(__dev_direct_xmit);
4323
4324 /*************************************************************************
4325  *                      Receiver routines
4326  *************************************************************************/
4327
4328 int netdev_max_backlog __read_mostly = 1000;
4329 EXPORT_SYMBOL(netdev_max_backlog);
4330
4331 int netdev_tstamp_prequeue __read_mostly = 1;
4332 unsigned int sysctl_skb_defer_max __read_mostly = 64;
4333 int netdev_budget __read_mostly = 300;
4334 /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
4335 unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
4336 int weight_p __read_mostly = 64;           /* old backlog weight */
4337 int dev_weight_rx_bias __read_mostly = 1;  /* bias for backlog weight */
4338 int dev_weight_tx_bias __read_mostly = 1;  /* bias for output_queue quota */
4339 int dev_rx_weight __read_mostly = 64;
4340 int dev_tx_weight __read_mostly = 64;
4341
4342 /* Called with irq disabled */
4343 static inline void ____napi_schedule(struct softnet_data *sd,
4344                                      struct napi_struct *napi)
4345 {
4346         struct task_struct *thread;
4347
4348         lockdep_assert_irqs_disabled();
4349
4350         if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
4351                 /* Paired with smp_mb__before_atomic() in
4352                  * napi_enable()/dev_set_threaded().
4353                  * Use READ_ONCE() to guarantee a complete
4354                  * read on napi->thread. Only call
4355                  * wake_up_process() when it's not NULL.
4356                  */
4357                 thread = READ_ONCE(napi->thread);
4358                 if (thread) {
4359                         /* Avoid doing set_bit() if the thread is in
4360                          * INTERRUPTIBLE state, cause napi_thread_wait()
4361                          * makes sure to proceed with napi polling
4362                          * if the thread is explicitly woken from here.
4363                          */
4364                         if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
4365                                 set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
4366                         wake_up_process(thread);
4367                         return;
4368                 }
4369         }
4370
4371         list_add_tail(&napi->poll_list, &sd->poll_list);
4372         __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4373 }
4374
4375 #ifdef CONFIG_RPS
4376
4377 /* One global table that all flow-based protocols share. */
4378 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
4379 EXPORT_SYMBOL(rps_sock_flow_table);
4380 u32 rps_cpu_mask __read_mostly;
4381 EXPORT_SYMBOL(rps_cpu_mask);
4382
4383 struct static_key_false rps_needed __read_mostly;
4384 EXPORT_SYMBOL(rps_needed);
4385 struct static_key_false rfs_needed __read_mostly;
4386 EXPORT_SYMBOL(rfs_needed);
4387
4388 static struct rps_dev_flow *
4389 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4390             struct rps_dev_flow *rflow, u16 next_cpu)
4391 {
4392         if (next_cpu < nr_cpu_ids) {
4393 #ifdef CONFIG_RFS_ACCEL
4394                 struct netdev_rx_queue *rxqueue;
4395                 struct rps_dev_flow_table *flow_table;
4396                 struct rps_dev_flow *old_rflow;
4397                 u32 flow_id;
4398                 u16 rxq_index;
4399                 int rc;
4400
4401                 /* Should we steer this flow to a different hardware queue? */
4402                 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4403                     !(dev->features & NETIF_F_NTUPLE))
4404                         goto out;
4405                 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4406                 if (rxq_index == skb_get_rx_queue(skb))
4407                         goto out;
4408
4409                 rxqueue = dev->_rx + rxq_index;
4410                 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4411                 if (!flow_table)
4412                         goto out;
4413                 flow_id = skb_get_hash(skb) & flow_table->mask;
4414                 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4415                                                         rxq_index, flow_id);
4416                 if (rc < 0)
4417                         goto out;
4418                 old_rflow = rflow;
4419                 rflow = &flow_table->flows[flow_id];
4420                 rflow->filter = rc;
4421                 if (old_rflow->filter == rflow->filter)
4422                         old_rflow->filter = RPS_NO_FILTER;
4423         out:
4424 #endif
4425                 rflow->last_qtail =
4426                         per_cpu(softnet_data, next_cpu).input_queue_head;
4427         }
4428
4429         rflow->cpu = next_cpu;
4430         return rflow;
4431 }
4432
4433 /*
4434  * get_rps_cpu is called from netif_receive_skb and returns the target
4435  * CPU from the RPS map of the receiving queue for a given skb.
4436  * rcu_read_lock must be held on entry.
4437  */
4438 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4439                        struct rps_dev_flow **rflowp)
4440 {
4441         const struct rps_sock_flow_table *sock_flow_table;
4442         struct netdev_rx_queue *rxqueue = dev->_rx;
4443         struct rps_dev_flow_table *flow_table;
4444         struct rps_map *map;
4445         int cpu = -1;
4446         u32 tcpu;
4447         u32 hash;
4448
4449         if (skb_rx_queue_recorded(skb)) {
4450                 u16 index = skb_get_rx_queue(skb);
4451
4452                 if (unlikely(index >= dev->real_num_rx_queues)) {
4453                         WARN_ONCE(dev->real_num_rx_queues > 1,
4454                                   "%s received packet on queue %u, but number "
4455                                   "of RX queues is %u\n",
4456                                   dev->name, index, dev->real_num_rx_queues);
4457                         goto done;
4458                 }
4459                 rxqueue += index;
4460         }
4461
4462         /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4463
4464         flow_table = rcu_dereference(rxqueue->rps_flow_table);
4465         map = rcu_dereference(rxqueue->rps_map);
4466         if (!flow_table && !map)
4467                 goto done;
4468
4469         skb_reset_network_header(skb);
4470         hash = skb_get_hash(skb);
4471         if (!hash)
4472                 goto done;
4473
4474         sock_flow_table = rcu_dereference(rps_sock_flow_table);
4475         if (flow_table && sock_flow_table) {
4476                 struct rps_dev_flow *rflow;
4477                 u32 next_cpu;
4478                 u32 ident;
4479
4480                 /* First check into global flow table if there is a match */
4481                 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
4482                 if ((ident ^ hash) & ~rps_cpu_mask)
4483                         goto try_rps;
4484
4485                 next_cpu = ident & rps_cpu_mask;
4486
4487                 /* OK, now we know there is a match,
4488                  * we can look at the local (per receive queue) flow table
4489                  */
4490                 rflow = &flow_table->flows[hash & flow_table->mask];
4491                 tcpu = rflow->cpu;
4492
4493                 /*
4494                  * If the desired CPU (where last recvmsg was done) is
4495                  * different from current CPU (one in the rx-queue flow
4496                  * table entry), switch if one of the following holds:
4497                  *   - Current CPU is unset (>= nr_cpu_ids).
4498                  *   - Current CPU is offline.
4499                  *   - The current CPU's queue tail has advanced beyond the
4500                  *     last packet that was enqueued using this table entry.
4501                  *     This guarantees that all previous packets for the flow
4502                  *     have been dequeued, thus preserving in order delivery.
4503                  */
4504                 if (unlikely(tcpu != next_cpu) &&
4505                     (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
4506                      ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
4507                       rflow->last_qtail)) >= 0)) {
4508                         tcpu = next_cpu;
4509                         rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4510                 }
4511
4512                 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
4513                         *rflowp = rflow;
4514                         cpu = tcpu;
4515                         goto done;
4516                 }
4517         }
4518
4519 try_rps:
4520
4521         if (map) {
4522                 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4523                 if (cpu_online(tcpu)) {
4524                         cpu = tcpu;
4525                         goto done;
4526                 }
4527         }
4528
4529 done:
4530         return cpu;
4531 }
4532
4533 #ifdef CONFIG_RFS_ACCEL
4534
4535 /**
4536  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4537  * @dev: Device on which the filter was set
4538  * @rxq_index: RX queue index
4539  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4540  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4541  *
4542  * Drivers that implement ndo_rx_flow_steer() should periodically call
4543  * this function for each installed filter and remove the filters for
4544  * which it returns %true.
4545  */
4546 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4547                          u32 flow_id, u16 filter_id)
4548 {
4549         struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4550         struct rps_dev_flow_table *flow_table;
4551         struct rps_dev_flow *rflow;
4552         bool expire = true;
4553         unsigned int cpu;
4554
4555         rcu_read_lock();
4556         flow_table = rcu_dereference(rxqueue->rps_flow_table);
4557         if (flow_table && flow_id <= flow_table->mask) {
4558                 rflow = &flow_table->flows[flow_id];
4559                 cpu = READ_ONCE(rflow->cpu);
4560                 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
4561                     ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4562                            rflow->last_qtail) <
4563                      (int)(10 * flow_table->mask)))
4564                         expire = false;
4565         }
4566         rcu_read_unlock();
4567         return expire;
4568 }
4569 EXPORT_SYMBOL(rps_may_expire_flow);
4570
4571 #endif /* CONFIG_RFS_ACCEL */
4572
4573 /* Called from hardirq (IPI) context */
4574 static void rps_trigger_softirq(void *data)
4575 {
4576         struct softnet_data *sd = data;
4577
4578         ____napi_schedule(sd, &sd->backlog);
4579         sd->received_rps++;
4580 }
4581
4582 #endif /* CONFIG_RPS */
4583
4584 /* Called from hardirq (IPI) context */
4585 static void trigger_rx_softirq(void *data)
4586 {
4587         struct softnet_data *sd = data;
4588
4589         __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4590         smp_store_release(&sd->defer_ipi_scheduled, 0);
4591 }
4592
4593 /*
4594  * Check if this softnet_data structure is another cpu one
4595  * If yes, queue it to our IPI list and return 1
4596  * If no, return 0
4597  */
4598 static int napi_schedule_rps(struct softnet_data *sd)
4599 {
4600         struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
4601
4602 #ifdef CONFIG_RPS
4603         if (sd != mysd) {
4604                 sd->rps_ipi_next = mysd->rps_ipi_list;
4605                 mysd->rps_ipi_list = sd;
4606
4607                 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4608                 return 1;
4609         }
4610 #endif /* CONFIG_RPS */
4611         __napi_schedule_irqoff(&mysd->backlog);
4612         return 0;
4613 }
4614
4615 #ifdef CONFIG_NET_FLOW_LIMIT
4616 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4617 #endif
4618
4619 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4620 {
4621 #ifdef CONFIG_NET_FLOW_LIMIT
4622         struct sd_flow_limit *fl;
4623         struct softnet_data *sd;
4624         unsigned int old_flow, new_flow;
4625
4626         if (qlen < (netdev_max_backlog >> 1))
4627                 return false;
4628
4629         sd = this_cpu_ptr(&softnet_data);
4630
4631         rcu_read_lock();
4632         fl = rcu_dereference(sd->flow_limit);
4633         if (fl) {
4634                 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4635                 old_flow = fl->history[fl->history_head];
4636                 fl->history[fl->history_head] = new_flow;
4637
4638                 fl->history_head++;
4639                 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4640
4641                 if (likely(fl->buckets[old_flow]))
4642                         fl->buckets[old_flow]--;
4643
4644                 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4645                         fl->count++;
4646                         rcu_read_unlock();
4647                         return true;
4648                 }
4649         }
4650         rcu_read_unlock();
4651 #endif
4652         return false;
4653 }
4654
4655 /*
4656  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4657  * queue (may be a remote CPU queue).
4658  */
4659 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4660                               unsigned int *qtail)
4661 {
4662         enum skb_drop_reason reason;
4663         struct softnet_data *sd;
4664         unsigned long flags;
4665         unsigned int qlen;
4666
4667         reason = SKB_DROP_REASON_NOT_SPECIFIED;
4668         sd = &per_cpu(softnet_data, cpu);
4669
4670         rps_lock_irqsave(sd, &flags);
4671         if (!netif_running(skb->dev))
4672                 goto drop;
4673         qlen = skb_queue_len(&sd->input_pkt_queue);
4674         if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
4675                 if (qlen) {
4676 enqueue:
4677                         __skb_queue_tail(&sd->input_pkt_queue, skb);
4678                         input_queue_tail_incr_save(sd, qtail);
4679                         rps_unlock_irq_restore(sd, &flags);
4680                         return NET_RX_SUCCESS;
4681                 }
4682
4683                 /* Schedule NAPI for backlog device
4684                  * We can use non atomic operation since we own the queue lock
4685                  */
4686                 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
4687                         napi_schedule_rps(sd);
4688                 goto enqueue;
4689         }
4690         reason = SKB_DROP_REASON_CPU_BACKLOG;
4691
4692 drop:
4693         sd->dropped++;
4694         rps_unlock_irq_restore(sd, &flags);
4695
4696         dev_core_stats_rx_dropped_inc(skb->dev);
4697         kfree_skb_reason(skb, reason);
4698         return NET_RX_DROP;
4699 }
4700
4701 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4702 {
4703         struct net_device *dev = skb->dev;
4704         struct netdev_rx_queue *rxqueue;
4705
4706         rxqueue = dev->_rx;
4707
4708         if (skb_rx_queue_recorded(skb)) {
4709                 u16 index = skb_get_rx_queue(skb);
4710
4711                 if (unlikely(index >= dev->real_num_rx_queues)) {
4712                         WARN_ONCE(dev->real_num_rx_queues > 1,
4713                                   "%s received packet on queue %u, but number "
4714                                   "of RX queues is %u\n",
4715                                   dev->name, index, dev->real_num_rx_queues);
4716
4717                         return rxqueue; /* Return first rxqueue */
4718                 }
4719                 rxqueue += index;
4720         }
4721         return rxqueue;
4722 }
4723
4724 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
4725                              struct bpf_prog *xdp_prog)
4726 {
4727         void *orig_data, *orig_data_end, *hard_start;
4728         struct netdev_rx_queue *rxqueue;
4729         bool orig_bcast, orig_host;
4730         u32 mac_len, frame_sz;
4731         __be16 orig_eth_type;
4732         struct ethhdr *eth;
4733         u32 metalen, act;
4734         int off;
4735
4736         /* The XDP program wants to see the packet starting at the MAC
4737          * header.
4738          */
4739         mac_len = skb->data - skb_mac_header(skb);
4740         hard_start = skb->data - skb_headroom(skb);
4741
4742         /* SKB "head" area always have tailroom for skb_shared_info */
4743         frame_sz = (void *)skb_end_pointer(skb) - hard_start;
4744         frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4745
4746         rxqueue = netif_get_rxqueue(skb);
4747         xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
4748         xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
4749                          skb_headlen(skb) + mac_len, true);
4750
4751         orig_data_end = xdp->data_end;
4752         orig_data = xdp->data;
4753         eth = (struct ethhdr *)xdp->data;
4754         orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
4755         orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4756         orig_eth_type = eth->h_proto;
4757
4758         act = bpf_prog_run_xdp(xdp_prog, xdp);
4759
4760         /* check if bpf_xdp_adjust_head was used */
4761         off = xdp->data - orig_data;
4762         if (off) {
4763                 if (off > 0)
4764                         __skb_pull(skb, off);
4765                 else if (off < 0)
4766                         __skb_push(skb, -off);
4767
4768                 skb->mac_header += off;
4769                 skb_reset_network_header(skb);
4770         }
4771
4772         /* check if bpf_xdp_adjust_tail was used */
4773         off = xdp->data_end - orig_data_end;
4774         if (off != 0) {
4775                 skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4776                 skb->len += off; /* positive on grow, negative on shrink */
4777         }
4778
4779         /* check if XDP changed eth hdr such SKB needs update */
4780         eth = (struct ethhdr *)xdp->data;
4781         if ((orig_eth_type != eth->h_proto) ||
4782             (orig_host != ether_addr_equal_64bits(eth->h_dest,
4783                                                   skb->dev->dev_addr)) ||
4784             (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4785                 __skb_push(skb, ETH_HLEN);
4786                 skb->pkt_type = PACKET_HOST;
4787                 skb->protocol = eth_type_trans(skb, skb->dev);
4788         }
4789
4790         /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull
4791          * before calling us again on redirect path. We do not call do_redirect
4792          * as we leave that up to the caller.
4793          *
4794          * Caller is responsible for managing lifetime of skb (i.e. calling
4795          * kfree_skb in response to actions it cannot handle/XDP_DROP).
4796          */
4797         switch (act) {
4798         case XDP_REDIRECT:
4799         case XDP_TX:
4800                 __skb_push(skb, mac_len);
4801                 break;
4802         case XDP_PASS:
4803                 metalen = xdp->data - xdp->data_meta;
4804                 if (metalen)
4805                         skb_metadata_set(skb, metalen);
4806                 break;
4807         }
4808
4809         return act;
4810 }
4811
4812 static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4813                                      struct xdp_buff *xdp,
4814                                      struct bpf_prog *xdp_prog)
4815 {
4816         u32 act = XDP_DROP;
4817
4818         /* Reinjected packets coming from act_mirred or similar should
4819          * not get XDP generic processing.
4820          */
4821         if (skb_is_redirected(skb))
4822                 return XDP_PASS;
4823
4824         /* XDP packets must be linear and must have sufficient headroom
4825          * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4826          * native XDP provides, thus we need to do it here as well.
4827          */
4828         if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
4829             skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4830                 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4831                 int troom = skb->tail + skb->data_len - skb->end;
4832
4833                 /* In case we have to go down the path and also linearize,
4834                  * then lets do the pskb_expand_head() work just once here.
4835                  */
4836                 if (pskb_expand_head(skb,
4837                                      hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4838                                      troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4839                         goto do_drop;
4840                 if (skb_linearize(skb))
4841                         goto do_drop;
4842         }
4843
4844         act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
4845         switch (act) {
4846         case XDP_REDIRECT:
4847         case XDP_TX:
4848         case XDP_PASS:
4849                 break;
4850         default:
4851                 bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act);
4852                 fallthrough;
4853         case XDP_ABORTED:
4854                 trace_xdp_exception(skb->dev, xdp_prog, act);
4855                 fallthrough;
4856         case XDP_DROP:
4857         do_drop:
4858                 kfree_skb(skb);
4859                 break;
4860         }
4861
4862         return act;
4863 }
4864
4865 /* When doing generic XDP we have to bypass the qdisc layer and the
4866  * network taps in order to match in-driver-XDP behavior.
4867  */
4868 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
4869 {
4870         struct net_device *dev = skb->dev;
4871         struct netdev_queue *txq;
4872         bool free_skb = true;
4873         int cpu, rc;
4874
4875         txq = netdev_core_pick_tx(dev, skb, NULL);
4876         cpu = smp_processor_id();
4877         HARD_TX_LOCK(dev, txq, cpu);
4878         if (!netif_xmit_stopped(txq)) {
4879                 rc = netdev_start_xmit(skb, dev, txq, 0);
4880                 if (dev_xmit_complete(rc))
4881                         free_skb = false;
4882         }
4883         HARD_TX_UNLOCK(dev, txq);
4884         if (free_skb) {
4885                 trace_xdp_exception(dev, xdp_prog, XDP_TX);
4886                 kfree_skb(skb);
4887         }
4888 }
4889
4890 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
4891
4892 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
4893 {
4894         if (xdp_prog) {
4895                 struct xdp_buff xdp;
4896                 u32 act;
4897                 int err;
4898
4899                 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
4900                 if (act != XDP_PASS) {
4901                         switch (act) {
4902                         case XDP_REDIRECT:
4903                                 err = xdp_do_generic_redirect(skb->dev, skb,
4904                                                               &xdp, xdp_prog);
4905                                 if (err)
4906                                         goto out_redir;
4907                                 break;
4908                         case XDP_TX:
4909                                 generic_xdp_tx(skb, xdp_prog);
4910                                 break;
4911                         }
4912                         return XDP_DROP;
4913                 }
4914         }
4915         return XDP_PASS;
4916 out_redir:
4917         kfree_skb_reason(skb, SKB_DROP_REASON_XDP);
4918         return XDP_DROP;
4919 }
4920 EXPORT_SYMBOL_GPL(do_xdp_generic);
4921
4922 static int netif_rx_internal(struct sk_buff *skb)
4923 {
4924         int ret;
4925
4926         net_timestamp_check(netdev_tstamp_prequeue, skb);
4927
4928         trace_netif_rx(skb);
4929
4930 #ifdef CONFIG_RPS
4931         if (static_branch_unlikely(&rps_needed)) {
4932                 struct rps_dev_flow voidflow, *rflow = &voidflow;
4933                 int cpu;
4934
4935                 rcu_read_lock();
4936
4937                 cpu = get_rps_cpu(skb->dev, skb, &rflow);
4938                 if (cpu < 0)
4939                         cpu = smp_processor_id();
4940
4941                 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4942
4943                 rcu_read_unlock();
4944         } else
4945 #endif
4946         {
4947                 unsigned int qtail;
4948
4949                 ret = enqueue_to_backlog(skb, smp_processor_id(), &qtail);
4950         }
4951         return ret;
4952 }
4953
4954 /**
4955  *      __netif_rx      -       Slightly optimized version of netif_rx
4956  *      @skb: buffer to post
4957  *
4958  *      This behaves as netif_rx except that it does not disable bottom halves.
4959  *      As a result this function may only be invoked from the interrupt context
4960  *      (either hard or soft interrupt).
4961  */
4962 int __netif_rx(struct sk_buff *skb)
4963 {
4964         int ret;
4965
4966         lockdep_assert_once(hardirq_count() | softirq_count());
4967
4968         trace_netif_rx_entry(skb);
4969         ret = netif_rx_internal(skb);
4970         trace_netif_rx_exit(ret);
4971         return ret;
4972 }
4973 EXPORT_SYMBOL(__netif_rx);
4974
4975 /**
4976  *      netif_rx        -       post buffer to the network code
4977  *      @skb: buffer to post
4978  *
4979  *      This function receives a packet from a device driver and queues it for
4980  *      the upper (protocol) levels to process via the backlog NAPI device. It
4981  *      always succeeds. The buffer may be dropped during processing for
4982  *      congestion control or by the protocol layers.
4983  *      The network buffer is passed via the backlog NAPI device. Modern NIC
4984  *      driver should use NAPI and GRO.
4985  *      This function can used from interrupt and from process context. The
4986  *      caller from process context must not disable interrupts before invoking
4987  *      this function.
4988  *
4989  *      return values:
4990  *      NET_RX_SUCCESS  (no congestion)
4991  *      NET_RX_DROP     (packet was dropped)
4992  *
4993  */
4994 int netif_rx(struct sk_buff *skb)
4995 {
4996         bool need_bh_off = !(hardirq_count() | softirq_count());
4997         int ret;
4998
4999         if (need_bh_off)
5000                 local_bh_disable();
5001         trace_netif_rx_entry(skb);
5002         ret = netif_rx_internal(skb);
5003         trace_netif_rx_exit(ret);
5004         if (need_bh_off)
5005                 local_bh_enable();
5006         return ret;
5007 }
5008 EXPORT_SYMBOL(netif_rx);
5009
5010 static __latent_entropy void net_tx_action(struct softirq_action *h)
5011 {
5012         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
5013
5014         if (sd->completion_queue) {
5015                 struct sk_buff *clist;
5016
5017                 local_irq_disable();
5018                 clist = sd->completion_queue;
5019                 sd->completion_queue = NULL;
5020                 local_irq_enable();
5021
5022                 while (clist) {
5023                         struct sk_buff *skb = clist;
5024
5025                         clist = clist->next;
5026
5027                         WARN_ON(refcount_read(&skb->users));
5028                         if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
5029                                 trace_consume_skb(skb);
5030                         else
5031                                 trace_kfree_skb(skb, net_tx_action,
5032                                                 SKB_DROP_REASON_NOT_SPECIFIED);
5033
5034                         if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
5035                                 __kfree_skb(skb);
5036                         else
5037                                 __kfree_skb_defer(skb);
5038                 }
5039         }
5040
5041         if (sd->output_queue) {
5042                 struct Qdisc *head;
5043
5044                 local_irq_disable();
5045                 head = sd->output_queue;
5046                 sd->output_queue = NULL;
5047                 sd->output_queue_tailp = &sd->output_queue;
5048                 local_irq_enable();
5049
5050                 rcu_read_lock();
5051
5052                 while (head) {
5053                         struct Qdisc *q = head;
5054                         spinlock_t *root_lock = NULL;
5055
5056                         head = head->next_sched;
5057
5058                         /* We need to make sure head->next_sched is read
5059                          * before clearing __QDISC_STATE_SCHED
5060                          */
5061                         smp_mb__before_atomic();
5062
5063                         if (!(q->flags & TCQ_F_NOLOCK)) {
5064                                 root_lock = qdisc_lock(q);
5065                                 spin_lock(root_lock);
5066                         } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
5067                                                      &q->state))) {
5068                                 /* There is a synchronize_net() between
5069                                  * STATE_DEACTIVATED flag being set and
5070                                  * qdisc_reset()/some_qdisc_is_busy() in
5071                                  * dev_deactivate(), so we can safely bail out
5072                                  * early here to avoid data race between
5073                                  * qdisc_deactivate() and some_qdisc_is_busy()
5074                                  * for lockless qdisc.
5075                                  */
5076                                 clear_bit(__QDISC_STATE_SCHED, &q->state);
5077                                 continue;
5078                         }
5079
5080                         clear_bit(__QDISC_STATE_SCHED, &q->state);
5081                         qdisc_run(q);
5082                         if (root_lock)
5083                                 spin_unlock(root_lock);
5084                 }
5085
5086                 rcu_read_unlock();
5087         }
5088
5089         xfrm_dev_backlog(sd);
5090 }
5091
5092 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
5093 /* This hook is defined here for ATM LANE */
5094 int (*br_fdb_test_addr_hook)(struct net_device *dev,
5095                              unsigned char *addr) __read_mostly;
5096 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
5097 #endif
5098
5099 static inline struct sk_buff *
5100 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
5101                    struct net_device *orig_dev, bool *another)
5102 {
5103 #ifdef CONFIG_NET_CLS_ACT
5104         struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
5105         struct tcf_result cl_res;
5106
5107         /* If there's at least one ingress present somewhere (so
5108          * we get here via enabled static key), remaining devices
5109          * that are not configured with an ingress qdisc will bail
5110          * out here.
5111          */
5112         if (!miniq)
5113                 return skb;
5114
5115         if (*pt_prev) {
5116                 *ret = deliver_skb(skb, *pt_prev, orig_dev);
5117                 *pt_prev = NULL;
5118         }
5119
5120         qdisc_skb_cb(skb)->pkt_len = skb->len;
5121         tc_skb_cb(skb)->mru = 0;
5122         tc_skb_cb(skb)->post_ct = false;
5123         skb->tc_at_ingress = 1;
5124         mini_qdisc_bstats_cpu_update(miniq, skb);
5125
5126         switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) {
5127         case TC_ACT_OK:
5128         case TC_ACT_RECLASSIFY:
5129                 skb->tc_index = TC_H_MIN(cl_res.classid);
5130                 break;
5131         case TC_ACT_SHOT:
5132                 mini_qdisc_qstats_cpu_drop(miniq);
5133                 kfree_skb_reason(skb, SKB_DROP_REASON_TC_INGRESS);
5134                 return NULL;
5135         case TC_ACT_STOLEN:
5136         case TC_ACT_QUEUED:
5137         case TC_ACT_TRAP:
5138                 consume_skb(skb);
5139                 return NULL;
5140         case TC_ACT_REDIRECT:
5141                 /* skb_mac_header check was done by cls/act_bpf, so
5142                  * we can safely push the L2 header back before
5143                  * redirecting to another netdev
5144                  */
5145                 __skb_push(skb, skb->mac_len);
5146                 if (skb_do_redirect(skb) == -EAGAIN) {
5147                         __skb_pull(skb, skb->mac_len);
5148                         *another = true;
5149                         break;
5150                 }
5151                 return NULL;
5152         case TC_ACT_CONSUMED:
5153                 return NULL;
5154         default:
5155                 break;
5156         }
5157 #endif /* CONFIG_NET_CLS_ACT */
5158         return skb;
5159 }
5160
5161 /**
5162  *      netdev_is_rx_handler_busy - check if receive handler is registered
5163  *      @dev: device to check
5164  *
5165  *      Check if a receive handler is already registered for a given device.
5166  *      Return true if there one.
5167  *
5168  *      The caller must hold the rtnl_mutex.
5169  */
5170 bool netdev_is_rx_handler_busy(struct net_device *dev)
5171 {
5172         ASSERT_RTNL();
5173         return dev && rtnl_dereference(dev->rx_handler);
5174 }
5175 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
5176
5177 /**
5178  *      netdev_rx_handler_register - register receive handler
5179  *      @dev: device to register a handler for
5180  *      @rx_handler: receive handler to register
5181  *      @rx_handler_data: data pointer that is used by rx handler
5182  *
5183  *      Register a receive handler for a device. This handler will then be
5184  *      called from __netif_receive_skb. A negative errno code is returned
5185  *      on a failure.
5186  *
5187  *      The caller must hold the rtnl_mutex.
5188  *
5189  *      For a general description of rx_handler, see enum rx_handler_result.
5190  */
5191 int netdev_rx_handler_register(struct net_device *dev,
5192                                rx_handler_func_t *rx_handler,
5193                                void *rx_handler_data)
5194 {
5195         if (netdev_is_rx_handler_busy(dev))
5196                 return -EBUSY;
5197
5198         if (dev->priv_flags & IFF_NO_RX_HANDLER)
5199                 return -EINVAL;
5200
5201         /* Note: rx_handler_data must be set before rx_handler */
5202         rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
5203         rcu_assign_pointer(dev->rx_handler, rx_handler);
5204
5205         return 0;
5206 }
5207 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
5208
5209 /**
5210  *      netdev_rx_handler_unregister - unregister receive handler
5211  *      @dev: device to unregister a handler from
5212  *
5213  *      Unregister a receive handler from a device.
5214  *
5215  *      The caller must hold the rtnl_mutex.
5216  */
5217 void netdev_rx_handler_unregister(struct net_device *dev)
5218 {
5219
5220         ASSERT_RTNL();
5221         RCU_INIT_POINTER(dev->rx_handler, NULL);
5222         /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
5223          * section has a guarantee to see a non NULL rx_handler_data
5224          * as well.
5225          */
5226         synchronize_net();
5227         RCU_INIT_POINTER(dev->rx_handler_data, NULL);
5228 }
5229 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
5230
5231 /*
5232  * Limit the use of PFMEMALLOC reserves to those protocols that implement
5233  * the special handling of PFMEMALLOC skbs.
5234  */
5235 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
5236 {
5237         switch (skb->protocol) {
5238         case htons(ETH_P_ARP):
5239         case htons(ETH_P_IP):
5240         case htons(ETH_P_IPV6):
5241         case htons(ETH_P_8021Q):
5242         case htons(ETH_P_8021AD):
5243                 return true;
5244         default:
5245                 return false;
5246         }
5247 }
5248
5249 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
5250                              int *ret, struct net_device *orig_dev)
5251 {
5252         if (nf_hook_ingress_active(skb)) {
5253                 int ingress_retval;
5254
5255                 if (*pt_prev) {
5256                         *ret = deliver_skb(skb, *pt_prev, orig_dev);
5257                         *pt_prev = NULL;
5258                 }
5259
5260                 rcu_read_lock();
5261                 ingress_retval = nf_hook_ingress(skb);
5262                 rcu_read_unlock();
5263                 return ingress_retval;
5264         }
5265         return 0;
5266 }
5267
5268 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
5269                                     struct packet_type **ppt_prev)
5270 {
5271         struct packet_type *ptype, *pt_prev;
5272         rx_handler_func_t *rx_handler;
5273         struct sk_buff *skb = *pskb;
5274         struct net_device *orig_dev;
5275         bool deliver_exact = false;
5276         int ret = NET_RX_DROP;
5277         __be16 type;
5278
5279         net_timestamp_check(!netdev_tstamp_prequeue, skb);
5280
5281         trace_netif_receive_skb(skb);
5282
5283         orig_dev = skb->dev;
5284
5285         skb_reset_network_header(skb);
5286         if (!skb_transport_header_was_set(skb))
5287                 skb_reset_transport_header(skb);
5288         skb_reset_mac_len(skb);
5289
5290         pt_prev = NULL;
5291
5292 another_round:
5293         skb->skb_iif = skb->dev->ifindex;
5294
5295         __this_cpu_inc(softnet_data.processed);
5296
5297         if (static_branch_unlikely(&generic_xdp_needed_key)) {
5298                 int ret2;
5299
5300                 migrate_disable();
5301                 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
5302                 migrate_enable();
5303
5304                 if (ret2 != XDP_PASS) {
5305                         ret = NET_RX_DROP;
5306                         goto out;
5307                 }
5308         }
5309
5310         if (eth_type_vlan(skb->protocol)) {
5311                 skb = skb_vlan_untag(skb);
5312                 if (unlikely(!skb))
5313                         goto out;
5314         }
5315
5316         if (skb_skip_tc_classify(skb))
5317                 goto skip_classify;
5318
5319         if (pfmemalloc)
5320                 goto skip_taps;
5321
5322         list_for_each_entry_rcu(ptype, &ptype_all, list) {
5323                 if (pt_prev)
5324                         ret = deliver_skb(skb, pt_prev, orig_dev);
5325                 pt_prev = ptype;
5326         }
5327
5328         list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
5329                 if (pt_prev)
5330                         ret = deliver_skb(skb, pt_prev, orig_dev);
5331                 pt_prev = ptype;
5332         }
5333
5334 skip_taps:
5335 #ifdef CONFIG_NET_INGRESS
5336         if (static_branch_unlikely(&ingress_needed_key)) {
5337                 bool another = false;
5338
5339                 nf_skip_egress(skb, true);
5340                 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
5341                                          &another);
5342                 if (another)
5343                         goto another_round;
5344                 if (!skb)
5345                         goto out;
5346
5347                 nf_skip_egress(skb, false);
5348                 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
5349                         goto out;
5350         }
5351 #endif
5352         skb_reset_redirect(skb);
5353 skip_classify:
5354         if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
5355                 goto drop;
5356
5357         if (skb_vlan_tag_present(skb)) {
5358                 if (pt_prev) {
5359                         ret = deliver_skb(skb, pt_prev, orig_dev);
5360                         pt_prev = NULL;
5361                 }
5362                 if (vlan_do_receive(&skb))
5363                         goto another_round;
5364                 else if (unlikely(!skb))
5365                         goto out;
5366         }
5367
5368         rx_handler = rcu_dereference(skb->dev->rx_handler);
5369         if (rx_handler) {
5370                 if (pt_prev) {
5371                         ret = deliver_skb(skb, pt_prev, orig_dev);
5372                         pt_prev = NULL;
5373                 }
5374                 switch (rx_handler(&skb)) {
5375                 case RX_HANDLER_CONSUMED:
5376                         ret = NET_RX_SUCCESS;
5377                         goto out;
5378                 case RX_HANDLER_ANOTHER:
5379                         goto another_round;
5380                 case RX_HANDLER_EXACT:
5381                         deliver_exact = true;
5382                         break;
5383                 case RX_HANDLER_PASS:
5384                         break;
5385                 default:
5386                         BUG();
5387                 }
5388         }
5389
5390         if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) {
5391 check_vlan_id:
5392                 if (skb_vlan_tag_get_id(skb)) {
5393                         /* Vlan id is non 0 and vlan_do_receive() above couldn't
5394                          * find vlan device.
5395                          */
5396                         skb->pkt_type = PACKET_OTHERHOST;
5397                 } else if (eth_type_vlan(skb->protocol)) {
5398                         /* Outer header is 802.1P with vlan 0, inner header is
5399                          * 802.1Q or 802.1AD and vlan_do_receive() above could
5400                          * not find vlan dev for vlan id 0.
5401                          */
5402                         __vlan_hwaccel_clear_tag(skb);
5403                         skb = skb_vlan_untag(skb);
5404                         if (unlikely(!skb))
5405                                 goto out;
5406                         if (vlan_do_receive(&skb))
5407                                 /* After stripping off 802.1P header with vlan 0
5408                                  * vlan dev is found for inner header.
5409                                  */
5410                                 goto another_round;
5411                         else if (unlikely(!skb))
5412                                 goto out;
5413                         else
5414                                 /* We have stripped outer 802.1P vlan 0 header.
5415                                  * But could not find vlan dev.
5416                                  * check again for vlan id to set OTHERHOST.
5417                                  */
5418                                 goto check_vlan_id;
5419                 }
5420                 /* Note: we might in the future use prio bits
5421                  * and set skb->priority like in vlan_do_receive()
5422                  * For the time being, just ignore Priority Code Point
5423                  */
5424                 __vlan_hwaccel_clear_tag(skb);
5425         }
5426
5427         type = skb->protocol;
5428
5429         /* deliver only exact match when indicated */
5430         if (likely(!deliver_exact)) {
5431                 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5432                                        &ptype_base[ntohs(type) &
5433                                                    PTYPE_HASH_MASK]);
5434         }
5435
5436         deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5437                                &orig_dev->ptype_specific);
5438
5439         if (unlikely(skb->dev != orig_dev)) {
5440                 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5441                                        &skb->dev->ptype_specific);
5442         }
5443
5444         if (pt_prev) {
5445                 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
5446                         goto drop;
5447                 *ppt_prev = pt_prev;
5448         } else {
5449 drop:
5450                 if (!deliver_exact)
5451                         dev_core_stats_rx_dropped_inc(skb->dev);
5452                 else
5453                         dev_core_stats_rx_nohandler_inc(skb->dev);
5454                 kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO);
5455                 /* Jamal, now you will not able to escape explaining
5456                  * me how you were going to use this. :-)
5457                  */
5458                 ret = NET_RX_DROP;
5459         }
5460
5461 out:
5462         /* The invariant here is that if *ppt_prev is not NULL
5463          * then skb should also be non-NULL.
5464          *
5465          * Apparently *ppt_prev assignment above holds this invariant due to
5466          * skb dereferencing near it.
5467          */
5468         *pskb = skb;
5469         return ret;
5470 }
5471
5472 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
5473 {
5474         struct net_device *orig_dev = skb->dev;
5475         struct packet_type *pt_prev = NULL;
5476         int ret;
5477
5478         ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5479         if (pt_prev)
5480                 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5481                                          skb->dev, pt_prev, orig_dev);
5482         return ret;
5483 }
5484
5485 /**
5486  *      netif_receive_skb_core - special purpose version of netif_receive_skb
5487  *      @skb: buffer to process
5488  *
5489  *      More direct receive version of netif_receive_skb().  It should
5490  *      only be used by callers that have a need to skip RPS and Generic XDP.
5491  *      Caller must also take care of handling if ``(page_is_)pfmemalloc``.
5492  *
5493  *      This function may only be called from softirq context and interrupts
5494  *      should be enabled.
5495  *
5496  *      Return values (usually ignored):
5497  *      NET_RX_SUCCESS: no congestion
5498  *      NET_RX_DROP: packet was dropped
5499  */
5500 int netif_receive_skb_core(struct sk_buff *skb)
5501 {
5502         int ret;
5503
5504         rcu_read_lock();
5505         ret = __netif_receive_skb_one_core(skb, false);
5506         rcu_read_unlock();
5507
5508         return ret;
5509 }
5510 EXPORT_SYMBOL(netif_receive_skb_core);
5511
5512 static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5513                                                   struct packet_type *pt_prev,
5514                                                   struct net_device *orig_dev)
5515 {
5516         struct sk_buff *skb, *next;
5517
5518         if (!pt_prev)
5519                 return;
5520         if (list_empty(head))
5521                 return;
5522         if (pt_prev->list_func != NULL)
5523                 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5524                                    ip_list_rcv, head, pt_prev, orig_dev);
5525         else
5526                 list_for_each_entry_safe(skb, next, head, list) {
5527                         skb_list_del_init(skb);
5528                         pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5529                 }
5530 }
5531
5532 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5533 {
5534         /* Fast-path assumptions:
5535          * - There is no RX handler.
5536          * - Only one packet_type matches.
5537          * If either of these fails, we will end up doing some per-packet
5538          * processing in-line, then handling the 'last ptype' for the whole
5539          * sublist.  This can't cause out-of-order delivery to any single ptype,
5540          * because the 'last ptype' must be constant across the sublist, and all
5541          * other ptypes are handled per-packet.
5542          */
5543         /* Current (common) ptype of sublist */
5544         struct packet_type *pt_curr = NULL;
5545         /* Current (common) orig_dev of sublist */
5546         struct net_device *od_curr = NULL;
5547         struct list_head sublist;
5548         struct sk_buff *skb, *next;
5549
5550         INIT_LIST_HEAD(&sublist);
5551         list_for_each_entry_safe(skb, next, head, list) {
5552                 struct net_device *orig_dev = skb->dev;
5553                 struct packet_type *pt_prev = NULL;
5554
5555                 skb_list_del_init(skb);
5556                 __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5557                 if (!pt_prev)
5558                         continue;
5559                 if (pt_curr != pt_prev || od_curr != orig_dev) {
5560                         /* dispatch old sublist */
5561                         __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5562                         /* start new sublist */
5563                         INIT_LIST_HEAD(&sublist);
5564                         pt_curr = pt_prev;
5565                         od_curr = orig_dev;
5566                 }
5567                 list_add_tail(&skb->list, &sublist);
5568         }
5569
5570         /* dispatch final sublist */
5571         __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5572 }
5573
5574 static int __netif_receive_skb(struct sk_buff *skb)
5575 {
5576         int ret;
5577
5578         if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
5579                 unsigned int noreclaim_flag;
5580
5581                 /*
5582                  * PFMEMALLOC skbs are special, they should
5583                  * - be delivered to SOCK_MEMALLOC sockets only
5584                  * - stay away from userspace
5585                  * - have bounded memory usage
5586                  *
5587                  * Use PF_MEMALLOC as this saves us from propagating the allocation
5588                  * context down to all allocation sites.
5589                  */
5590                 noreclaim_flag = memalloc_noreclaim_save();
5591                 ret = __netif_receive_skb_one_core(skb, true);
5592                 memalloc_noreclaim_restore(noreclaim_flag);
5593         } else
5594                 ret = __netif_receive_skb_one_core(skb, false);
5595
5596         return ret;
5597 }
5598
5599 static void __netif_receive_skb_list(struct list_head *head)
5600 {
5601         unsigned long noreclaim_flag = 0;
5602         struct sk_buff *skb, *next;
5603         bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
5604
5605         list_for_each_entry_safe(skb, next, head, list) {
5606                 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5607                         struct list_head sublist;
5608
5609                         /* Handle the previous sublist */
5610                         list_cut_before(&sublist, head, &skb->list);
5611                         if (!list_empty(&sublist))
5612                                 __netif_receive_skb_list_core(&sublist, pfmemalloc);
5613                         pfmemalloc = !pfmemalloc;
5614                         /* See comments in __netif_receive_skb */
5615                         if (pfmemalloc)
5616                                 noreclaim_flag = memalloc_noreclaim_save();
5617                         else
5618                                 memalloc_noreclaim_restore(noreclaim_flag);
5619                 }
5620         }
5621         /* Handle the remaining sublist */
5622         if (!list_empty(head))
5623                 __netif_receive_skb_list_core(head, pfmemalloc);
5624         /* Restore pflags */
5625         if (pfmemalloc)
5626                 memalloc_noreclaim_restore(noreclaim_flag);
5627 }
5628
5629 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
5630 {
5631         struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
5632         struct bpf_prog *new = xdp->prog;
5633         int ret = 0;
5634
5635         switch (xdp->command) {
5636         case XDP_SETUP_PROG:
5637                 rcu_assign_pointer(dev->xdp_prog, new);
5638                 if (old)
5639                         bpf_prog_put(old);
5640
5641                 if (old && !new) {
5642                         static_branch_dec(&generic_xdp_needed_key);
5643                 } else if (new && !old) {
5644                         static_branch_inc(&generic_xdp_needed_key);
5645                         dev_disable_lro(dev);
5646                         dev_disable_gro_hw(dev);
5647                 }
5648                 break;
5649
5650         default:
5651                 ret = -EINVAL;
5652                 break;
5653         }
5654
5655         return ret;
5656 }
5657
5658 static int netif_receive_skb_internal(struct sk_buff *skb)
5659 {
5660         int ret;
5661
5662         net_timestamp_check(netdev_tstamp_prequeue, skb);
5663
5664         if (skb_defer_rx_timestamp(skb))
5665                 return NET_RX_SUCCESS;
5666
5667         rcu_read_lock();
5668 #ifdef CONFIG_RPS
5669         if (static_branch_unlikely(&rps_needed)) {
5670                 struct rps_dev_flow voidflow, *rflow = &voidflow;
5671                 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5672
5673                 if (cpu >= 0) {
5674                         ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5675                         rcu_read_unlock();
5676                         return ret;
5677                 }
5678         }
5679 #endif
5680         ret = __netif_receive_skb(skb);
5681         rcu_read_unlock();
5682         return ret;
5683 }
5684
5685 void netif_receive_skb_list_internal(struct list_head *head)
5686 {
5687         struct sk_buff *skb, *next;
5688         struct list_head sublist;
5689
5690         INIT_LIST_HEAD(&sublist);
5691         list_for_each_entry_safe(skb, next, head, list) {
5692                 net_timestamp_check(netdev_tstamp_prequeue, skb);
5693                 skb_list_del_init(skb);
5694                 if (!skb_defer_rx_timestamp(skb))
5695                         list_add_tail(&skb->list, &sublist);
5696         }
5697         list_splice_init(&sublist, head);
5698
5699         rcu_read_lock();
5700 #ifdef CONFIG_RPS
5701         if (static_branch_unlikely(&rps_needed)) {
5702                 list_for_each_entry_safe(skb, next, head, list) {
5703                         struct rps_dev_flow voidflow, *rflow = &voidflow;
5704                         int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5705
5706                         if (cpu >= 0) {
5707                                 /* Will be handled, remove from list */
5708                                 skb_list_del_init(skb);
5709                                 enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5710                         }
5711                 }
5712         }
5713 #endif
5714         __netif_receive_skb_list(head);
5715         rcu_read_unlock();
5716 }
5717
5718 /**
5719  *      netif_receive_skb - process receive buffer from network
5720  *      @skb: buffer to process
5721  *
5722  *      netif_receive_skb() is the main receive data processing function.
5723  *      It always succeeds. The buffer may be dropped during processing
5724  *      for congestion control or by the protocol layers.
5725  *
5726  *      This function may only be called from softirq context and interrupts
5727  *      should be enabled.
5728  *
5729  *      Return values (usually ignored):
5730  *      NET_RX_SUCCESS: no congestion
5731  *      NET_RX_DROP: packet was dropped
5732  */
5733 int netif_receive_skb(struct sk_buff *skb)
5734 {
5735         int ret;
5736
5737         trace_netif_receive_skb_entry(skb);
5738
5739         ret = netif_receive_skb_internal(skb);
5740         trace_netif_receive_skb_exit(ret);
5741
5742         return ret;
5743 }
5744 EXPORT_SYMBOL(netif_receive_skb);
5745
5746 /**
5747  *      netif_receive_skb_list - process many receive buffers from network
5748  *      @head: list of skbs to process.
5749  *
5750  *      Since return value of netif_receive_skb() is normally ignored, and
5751  *      wouldn't be meaningful for a list, this function returns void.
5752  *
5753  *      This function may only be called from softirq context and interrupts
5754  *      should be enabled.
5755  */
5756 void netif_receive_skb_list(struct list_head *head)
5757 {
5758         struct sk_buff *skb;
5759
5760         if (list_empty(head))
5761                 return;
5762         if (trace_netif_receive_skb_list_entry_enabled()) {
5763                 list_for_each_entry(skb, head, list)
5764                         trace_netif_receive_skb_list_entry(skb);
5765         }
5766         netif_receive_skb_list_internal(head);
5767         trace_netif_receive_skb_list_exit(0);
5768 }
5769 EXPORT_SYMBOL(netif_receive_skb_list);
5770
5771 static DEFINE_PER_CPU(struct work_struct, flush_works);
5772
5773 /* Network device is going away, flush any packets still pending */
5774 static void flush_backlog(struct work_struct *work)
5775 {
5776         struct sk_buff *skb, *tmp;
5777         struct softnet_data *sd;
5778
5779         local_bh_disable();
5780         sd = this_cpu_ptr(&softnet_data);
5781
5782         rps_lock_irq_disable(sd);
5783         skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
5784                 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5785                         __skb_unlink(skb, &sd->input_pkt_queue);
5786                         dev_kfree_skb_irq(skb);
5787                         input_queue_head_incr(sd);
5788                 }
5789         }
5790         rps_unlock_irq_enable(sd);
5791
5792         skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
5793                 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5794                         __skb_unlink(skb, &sd->process_queue);
5795                         kfree_skb(skb);
5796                         input_queue_head_incr(sd);
5797                 }
5798         }
5799         local_bh_enable();
5800 }
5801
5802 static bool flush_required(int cpu)
5803 {
5804 #if IS_ENABLED(CONFIG_RPS)
5805         struct softnet_data *sd = &per_cpu(softnet_data, cpu);
5806         bool do_flush;
5807
5808         rps_lock_irq_disable(sd);
5809
5810         /* as insertion into process_queue happens with the rps lock held,
5811          * process_queue access may race only with dequeue
5812          */
5813         do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
5814                    !skb_queue_empty_lockless(&sd->process_queue);
5815         rps_unlock_irq_enable(sd);
5816
5817         return do_flush;
5818 #endif
5819         /* without RPS we can't safely check input_pkt_queue: during a
5820          * concurrent remote skb_queue_splice() we can detect as empty both
5821          * input_pkt_queue and process_queue even if the latter could end-up
5822          * containing a lot of packets.
5823          */
5824         return true;
5825 }
5826
5827 static void flush_all_backlogs(void)
5828 {
5829         static cpumask_t flush_cpus;
5830         unsigned int cpu;
5831
5832         /* since we are under rtnl lock protection we can use static data
5833          * for the cpumask and avoid allocating on stack the possibly
5834          * large mask
5835          */
5836         ASSERT_RTNL();
5837
5838         cpus_read_lock();
5839
5840         cpumask_clear(&flush_cpus);
5841         for_each_online_cpu(cpu) {
5842                 if (flush_required(cpu)) {
5843                         queue_work_on(cpu, system_highpri_wq,
5844                                       per_cpu_ptr(&flush_works, cpu));
5845                         cpumask_set_cpu(cpu, &flush_cpus);
5846                 }
5847         }
5848
5849         /* we can have in flight packet[s] on the cpus we are not flushing,
5850          * synchronize_net() in unregister_netdevice_many() will take care of
5851          * them
5852          */
5853         for_each_cpu(cpu, &flush_cpus)
5854                 flush_work(per_cpu_ptr(&flush_works, cpu));
5855
5856         cpus_read_unlock();
5857 }
5858
5859 static void net_rps_send_ipi(struct softnet_data *remsd)
5860 {
5861 #ifdef CONFIG_RPS
5862         while (remsd) {
5863                 struct softnet_data *next = remsd->rps_ipi_next;
5864
5865                 if (cpu_online(remsd->cpu))
5866                         smp_call_function_single_async(remsd->cpu, &remsd->csd);
5867                 remsd = next;
5868         }
5869 #endif
5870 }
5871
5872 /*
5873  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
5874  * Note: called with local irq disabled, but exits with local irq enabled.
5875  */
5876 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
5877 {
5878 #ifdef CONFIG_RPS
5879         struct softnet_data *remsd = sd->rps_ipi_list;
5880
5881         if (remsd) {
5882                 sd->rps_ipi_list = NULL;
5883
5884                 local_irq_enable();
5885
5886                 /* Send pending IPI's to kick RPS processing on remote cpus. */
5887                 net_rps_send_ipi(remsd);
5888         } else
5889 #endif
5890                 local_irq_enable();
5891 }
5892
5893 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
5894 {
5895 #ifdef CONFIG_RPS
5896         return sd->rps_ipi_list != NULL;
5897 #else
5898         return false;
5899 #endif
5900 }
5901
5902 static int process_backlog(struct napi_struct *napi, int quota)
5903 {
5904         struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
5905         bool again = true;
5906         int work = 0;
5907
5908         /* Check if we have pending ipi, its better to send them now,
5909          * not waiting net_rx_action() end.
5910          */
5911         if (sd_has_rps_ipi_waiting(sd)) {
5912                 local_irq_disable();
5913                 net_rps_action_and_irq_enable(sd);
5914         }
5915
5916         napi->weight = dev_rx_weight;
5917         while (again) {
5918                 struct sk_buff *skb;
5919
5920                 while ((skb = __skb_dequeue(&sd->process_queue))) {
5921                         rcu_read_lock();
5922                         __netif_receive_skb(skb);
5923                         rcu_read_unlock();
5924                         input_queue_head_incr(sd);
5925                         if (++work >= quota)
5926                                 return work;
5927
5928                 }
5929
5930                 rps_lock_irq_disable(sd);
5931                 if (skb_queue_empty(&sd->input_pkt_queue)) {
5932                         /*
5933                          * Inline a custom version of __napi_complete().
5934                          * only current cpu owns and manipulates this napi,
5935                          * and NAPI_STATE_SCHED is the only possible flag set
5936                          * on backlog.
5937                          * We can use a plain write instead of clear_bit(),
5938                          * and we dont need an smp_mb() memory barrier.
5939                          */
5940                         napi->state = 0;
5941                         again = false;
5942                 } else {
5943                         skb_queue_splice_tail_init(&sd->input_pkt_queue,
5944                                                    &sd->process_queue);
5945                 }
5946                 rps_unlock_irq_enable(sd);
5947         }
5948
5949         return work;
5950 }
5951
5952 /**
5953  * __napi_schedule - schedule for receive
5954  * @n: entry to schedule
5955  *
5956  * The entry's receive function will be scheduled to run.
5957  * Consider using __napi_schedule_irqoff() if hard irqs are masked.
5958  */
5959 void __napi_schedule(struct napi_struct *n)
5960 {
5961         unsigned long flags;
5962
5963         local_irq_save(flags);
5964         ____napi_schedule(this_cpu_ptr(&softnet_data), n);
5965         local_irq_restore(flags);
5966 }
5967 EXPORT_SYMBOL(__napi_schedule);
5968
5969 /**
5970  *      napi_schedule_prep - check if napi can be scheduled
5971  *      @n: napi context
5972  *
5973  * Test if NAPI routine is already running, and if not mark
5974  * it as running.  This is used as a condition variable to
5975  * insure only one NAPI poll instance runs.  We also make
5976  * sure there is no pending NAPI disable.
5977  */
5978 bool napi_schedule_prep(struct napi_struct *n)
5979 {
5980         unsigned long val, new;
5981
5982         do {
5983                 val = READ_ONCE(n->state);
5984                 if (unlikely(val & NAPIF_STATE_DISABLE))
5985                         return false;
5986                 new = val | NAPIF_STATE_SCHED;
5987
5988                 /* Sets STATE_MISSED bit if STATE_SCHED was already set
5989                  * This was suggested by Alexander Duyck, as compiler
5990                  * emits better code than :
5991                  * if (val & NAPIF_STATE_SCHED)
5992                  *     new |= NAPIF_STATE_MISSED;
5993                  */
5994                 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
5995                                                    NAPIF_STATE_MISSED;
5996         } while (cmpxchg(&n->state, val, new) != val);
5997
5998         return !(val & NAPIF_STATE_SCHED);
5999 }
6000 EXPORT_SYMBOL(napi_schedule_prep);
6001
6002 /**
6003  * __napi_schedule_irqoff - schedule for receive
6004  * @n: entry to schedule
6005  *
6006  * Variant of __napi_schedule() assuming hard irqs are masked.
6007  *
6008  * On PREEMPT_RT enabled kernels this maps to __napi_schedule()
6009  * because the interrupt disabled assumption might not be true
6010  * due to force-threaded interrupts and spinlock substitution.
6011  */
6012 void __napi_schedule_irqoff(struct napi_struct *n)
6013 {
6014         if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6015                 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
6016         else
6017                 __napi_schedule(n);
6018 }
6019 EXPORT_SYMBOL(__napi_schedule_irqoff);
6020
6021 bool napi_complete_done(struct napi_struct *n, int work_done)
6022 {
6023         unsigned long flags, val, new, timeout = 0;
6024         bool ret = true;
6025
6026         /*
6027          * 1) Don't let napi dequeue from the cpu poll list
6028          *    just in case its running on a different cpu.
6029          * 2) If we are busy polling, do nothing here, we have
6030          *    the guarantee we will be called later.
6031          */
6032         if (unlikely(n->state & (NAPIF_STATE_NPSVC |
6033                                  NAPIF_STATE_IN_BUSY_POLL)))
6034                 return false;
6035
6036         if (work_done) {
6037                 if (n->gro_bitmask)
6038                         timeout = READ_ONCE(n->dev->gro_flush_timeout);
6039                 n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs);
6040         }
6041         if (n->defer_hard_irqs_count > 0) {
6042                 n->defer_hard_irqs_count--;
6043                 timeout = READ_ONCE(n->dev->gro_flush_timeout);
6044                 if (timeout)
6045                         ret = false;
6046         }
6047         if (n->gro_bitmask) {
6048                 /* When the NAPI instance uses a timeout and keeps postponing
6049                  * it, we need to bound somehow the time packets are kept in
6050                  * the GRO layer
6051                  */
6052                 napi_gro_flush(n, !!timeout);
6053         }
6054
6055         gro_normal_list(n);
6056
6057         if (unlikely(!list_empty(&n->poll_list))) {
6058                 /* If n->poll_list is not empty, we need to mask irqs */
6059                 local_irq_save(flags);
6060                 list_del_init(&n->poll_list);
6061                 local_irq_restore(flags);
6062         }
6063
6064         do {
6065                 val = READ_ONCE(n->state);
6066
6067                 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
6068
6069                 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED |
6070                               NAPIF_STATE_SCHED_THREADED |
6071                               NAPIF_STATE_PREFER_BUSY_POLL);
6072
6073                 /* If STATE_MISSED was set, leave STATE_SCHED set,
6074                  * because we will call napi->poll() one more time.
6075                  * This C code was suggested by Alexander Duyck to help gcc.
6076                  */
6077                 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
6078                                                     NAPIF_STATE_SCHED;
6079         } while (cmpxchg(&n->state, val, new) != val);
6080
6081         if (unlikely(val & NAPIF_STATE_MISSED)) {
6082                 __napi_schedule(n);
6083                 return false;
6084         }
6085
6086         if (timeout)
6087                 hrtimer_start(&n->timer, ns_to_ktime(timeout),
6088                               HRTIMER_MODE_REL_PINNED);
6089         return ret;
6090 }
6091 EXPORT_SYMBOL(napi_complete_done);
6092
6093 /* must be called under rcu_read_lock(), as we dont take a reference */
6094 static struct napi_struct *napi_by_id(unsigned int napi_id)
6095 {
6096         unsigned int hash = napi_id % HASH_SIZE(napi_hash);
6097         struct napi_struct *napi;
6098
6099         hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
6100                 if (napi->napi_id == napi_id)
6101                         return napi;
6102
6103         return NULL;
6104 }
6105
6106 #if defined(CONFIG_NET_RX_BUSY_POLL)
6107
6108 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
6109 {
6110         if (!skip_schedule) {
6111                 gro_normal_list(napi);
6112                 __napi_schedule(napi);
6113                 return;
6114         }
6115
6116         if (napi->gro_bitmask) {
6117                 /* flush too old packets
6118                  * If HZ < 1000, flush all packets.
6119                  */
6120                 napi_gro_flush(napi, HZ >= 1000);
6121         }
6122
6123         gro_normal_list(napi);
6124         clear_bit(NAPI_STATE_SCHED, &napi->state);
6125 }
6126
6127 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool prefer_busy_poll,
6128                            u16 budget)
6129 {
6130         bool skip_schedule = false;
6131         unsigned long timeout;
6132         int rc;
6133
6134         /* Busy polling means there is a high chance device driver hard irq
6135          * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6136          * set in napi_schedule_prep().
6137          * Since we are about to call napi->poll() once more, we can safely
6138          * clear NAPI_STATE_MISSED.
6139          *
6140          * Note: x86 could use a single "lock and ..." instruction
6141          * to perform these two clear_bit()
6142          */
6143         clear_bit(NAPI_STATE_MISSED, &napi->state);
6144         clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6145
6146         local_bh_disable();
6147
6148         if (prefer_busy_poll) {
6149                 napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
6150                 timeout = READ_ONCE(napi->dev->gro_flush_timeout);
6151                 if (napi->defer_hard_irqs_count && timeout) {
6152                         hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
6153                         skip_schedule = true;
6154                 }
6155         }
6156
6157         /* All we really want here is to re-enable device interrupts.
6158          * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6159          */
6160         rc = napi->poll(napi, budget);
6161         /* We can't gro_normal_list() here, because napi->poll() might have
6162          * rearmed the napi (napi_complete_done()) in which case it could
6163          * already be running on another CPU.
6164          */
6165         trace_napi_poll(napi, rc, budget);
6166         netpoll_poll_unlock(have_poll_lock);
6167         if (rc == budget)
6168                 __busy_poll_stop(napi, skip_schedule);
6169         local_bh_enable();
6170 }
6171
6172 void napi_busy_loop(unsigned int napi_id,
6173                     bool (*loop_end)(void *, unsigned long),
6174                     void *loop_end_arg, bool prefer_busy_poll, u16 budget)
6175 {
6176         unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
6177         int (*napi_poll)(struct napi_struct *napi, int budget);
6178         void *have_poll_lock = NULL;
6179         struct napi_struct *napi;
6180
6181 restart:
6182         napi_poll = NULL;
6183
6184         rcu_read_lock();
6185
6186         napi = napi_by_id(napi_id);
6187         if (!napi)
6188                 goto out;
6189
6190         preempt_disable();
6191         for (;;) {
6192                 int work = 0;
6193
6194                 local_bh_disable();
6195                 if (!napi_poll) {
6196                         unsigned long val = READ_ONCE(napi->state);
6197
6198                         /* If multiple threads are competing for this napi,
6199                          * we avoid dirtying napi->state as much as we can.
6200                          */
6201                         if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6202                                    NAPIF_STATE_IN_BUSY_POLL)) {
6203                                 if (prefer_busy_poll)
6204                                         set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6205                                 goto count;
6206                         }
6207                         if (cmpxchg(&napi->state, val,
6208                                     val | NAPIF_STATE_IN_BUSY_POLL |
6209                                           NAPIF_STATE_SCHED) != val) {
6210                                 if (prefer_busy_poll)
6211                                         set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6212                                 goto count;
6213                         }
6214                         have_poll_lock = netpoll_poll_lock(napi);
6215                         napi_poll = napi->poll;
6216                 }
6217                 work = napi_poll(napi, budget);
6218                 trace_napi_poll(napi, work, budget);
6219                 gro_normal_list(napi);
6220 count:
6221                 if (work > 0)
6222                         __NET_ADD_STATS(dev_net(napi->dev),
6223                                         LINUX_MIB_BUSYPOLLRXPACKETS, work);
6224                 local_bh_enable();
6225
6226                 if (!loop_end || loop_end(loop_end_arg, start_time))
6227                         break;
6228
6229                 if (unlikely(need_resched())) {
6230                         if (napi_poll)
6231                                 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
6232                         preempt_enable();
6233                         rcu_read_unlock();
6234                         cond_resched();
6235                         if (loop_end(loop_end_arg, start_time))
6236                                 return;
6237                         goto restart;
6238                 }
6239                 cpu_relax();
6240         }
6241         if (napi_poll)
6242                 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
6243         preempt_enable();
6244 out:
6245         rcu_read_unlock();
6246 }
6247 EXPORT_SYMBOL(napi_busy_loop);
6248
6249 #endif /* CONFIG_NET_RX_BUSY_POLL */
6250
6251 static void napi_hash_add(struct napi_struct *napi)
6252 {
6253         if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
6254                 return;
6255
6256         spin_lock(&napi_hash_lock);
6257
6258         /* 0..NR_CPUS range is reserved for sender_cpu use */
6259         do {
6260                 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6261                         napi_gen_id = MIN_NAPI_ID;
6262         } while (napi_by_id(napi_gen_id));
6263         napi->napi_id = napi_gen_id;
6264
6265         hlist_add_head_rcu(&napi->napi_hash_node,
6266                            &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6267
6268         spin_unlock(&napi_hash_lock);
6269 }
6270
6271 /* Warning : caller is responsible to make sure rcu grace period
6272  * is respected before freeing memory containing @napi
6273  */
6274 static void napi_hash_del(struct napi_struct *napi)
6275 {
6276         spin_lock(&napi_hash_lock);
6277
6278         hlist_del_init_rcu(&napi->napi_hash_node);
6279
6280         spin_unlock(&napi_hash_lock);
6281 }
6282
6283 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6284 {
6285         struct napi_struct *napi;
6286
6287         napi = container_of(timer, struct napi_struct, timer);
6288
6289         /* Note : we use a relaxed variant of napi_schedule_prep() not setting
6290          * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6291          */
6292         if (!napi_disable_pending(napi) &&
6293             !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) {
6294                 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6295                 __napi_schedule_irqoff(napi);
6296         }
6297
6298         return HRTIMER_NORESTART;
6299 }
6300
6301 static void init_gro_hash(struct napi_struct *napi)
6302 {
6303         int i;
6304
6305         for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6306                 INIT_LIST_HEAD(&napi->gro_hash[i].list);
6307                 napi->gro_hash[i].count = 0;
6308         }
6309         napi->gro_bitmask = 0;
6310 }
6311
6312 int dev_set_threaded(struct net_device *dev, bool threaded)
6313 {
6314         struct napi_struct *napi;
6315         int err = 0;
6316
6317         if (dev->threaded == threaded)
6318                 return 0;
6319
6320         if (threaded) {
6321                 list_for_each_entry(napi, &dev->napi_list, dev_list) {
6322                         if (!napi->thread) {
6323                                 err = napi_kthread_create(napi);
6324                                 if (err) {
6325                                         threaded = false;
6326                                         break;
6327                                 }
6328                         }
6329                 }
6330         }
6331
6332         dev->threaded = threaded;
6333
6334         /* Make sure kthread is created before THREADED bit
6335          * is set.
6336          */
6337         smp_mb__before_atomic();
6338
6339         /* Setting/unsetting threaded mode on a napi might not immediately
6340          * take effect, if the current napi instance is actively being
6341          * polled. In this case, the switch between threaded mode and
6342          * softirq mode will happen in the next round of napi_schedule().
6343          * This should not cause hiccups/stalls to the live traffic.
6344          */
6345         list_for_each_entry(napi, &dev->napi_list, dev_list) {
6346                 if (threaded)
6347                         set_bit(NAPI_STATE_THREADED, &napi->state);
6348                 else
6349                         clear_bit(NAPI_STATE_THREADED, &napi->state);
6350         }
6351
6352         return err;
6353 }
6354 EXPORT_SYMBOL(dev_set_threaded);
6355
6356 /* Double check that napi_get_frags() allocates skbs with
6357  * skb->head being backed by slab, not a page fragment.
6358  * This is to make sure bug fixed in 3226b158e67c
6359  * ("net: avoid 32 x truesize under-estimation for tiny skbs")
6360  * does not accidentally come back.
6361  */
6362 static void napi_get_frags_check(struct napi_struct *napi)
6363 {
6364         struct sk_buff *skb;
6365
6366         local_bh_disable();
6367         skb = napi_get_frags(napi);
6368         WARN_ON_ONCE(skb && skb->head_frag);
6369         napi_free_frags(napi);
6370         local_bh_enable();
6371 }
6372
6373 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
6374                            int (*poll)(struct napi_struct *, int), int weight)
6375 {
6376         if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
6377                 return;
6378
6379         INIT_LIST_HEAD(&napi->poll_list);
6380         INIT_HLIST_NODE(&napi->napi_hash_node);
6381         hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6382         napi->timer.function = napi_watchdog;
6383         init_gro_hash(napi);
6384         napi->skb = NULL;
6385         INIT_LIST_HEAD(&napi->rx_list);
6386         napi->rx_count = 0;
6387         napi->poll = poll;
6388         if (weight > NAPI_POLL_WEIGHT)
6389                 netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6390                                 weight);
6391         napi->weight = weight;
6392         napi->dev = dev;
6393 #ifdef CONFIG_NETPOLL
6394         napi->poll_owner = -1;
6395 #endif
6396         set_bit(NAPI_STATE_SCHED, &napi->state);
6397         set_bit(NAPI_STATE_NPSVC, &napi->state);
6398         list_add_rcu(&napi->dev_list, &dev->napi_list);
6399         napi_hash_add(napi);
6400         napi_get_frags_check(napi);
6401         /* Create kthread for this napi if dev->threaded is set.
6402          * Clear dev->threaded if kthread creation failed so that
6403          * threaded mode will not be enabled in napi_enable().
6404          */
6405         if (dev->threaded && napi_kthread_create(napi))
6406                 dev->threaded = 0;
6407 }
6408 EXPORT_SYMBOL(netif_napi_add_weight);
6409
6410 void napi_disable(struct napi_struct *n)
6411 {
6412         unsigned long val, new;
6413
6414         might_sleep();
6415         set_bit(NAPI_STATE_DISABLE, &n->state);
6416
6417         for ( ; ; ) {
6418                 val = READ_ONCE(n->state);
6419                 if (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) {
6420                         usleep_range(20, 200);
6421                         continue;
6422                 }
6423
6424                 new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC;
6425                 new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL);
6426
6427                 if (cmpxchg(&n->state, val, new) == val)
6428                         break;
6429         }
6430
6431         hrtimer_cancel(&n->timer);
6432
6433         clear_bit(NAPI_STATE_DISABLE, &n->state);
6434 }
6435 EXPORT_SYMBOL(napi_disable);
6436
6437 /**
6438  *      napi_enable - enable NAPI scheduling
6439  *      @n: NAPI context
6440  *
6441  * Resume NAPI from being scheduled on this context.
6442  * Must be paired with napi_disable.
6443  */
6444 void napi_enable(struct napi_struct *n)
6445 {
6446         unsigned long val, new;
6447
6448         do {
6449                 val = READ_ONCE(n->state);
6450                 BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
6451
6452                 new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC);
6453                 if (n->dev->threaded && n->thread)
6454                         new |= NAPIF_STATE_THREADED;
6455         } while (cmpxchg(&n->state, val, new) != val);
6456 }
6457 EXPORT_SYMBOL(napi_enable);
6458
6459 static void flush_gro_hash(struct napi_struct *napi)
6460 {
6461         int i;
6462
6463         for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6464                 struct sk_buff *skb, *n;
6465
6466                 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
6467                         kfree_skb(skb);
6468                 napi->gro_hash[i].count = 0;
6469         }
6470 }
6471
6472 /* Must be called in process context */
6473 void __netif_napi_del(struct napi_struct *napi)
6474 {
6475         if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
6476                 return;
6477
6478         napi_hash_del(napi);
6479         list_del_rcu(&napi->dev_list);
6480         napi_free_frags(napi);
6481
6482         flush_gro_hash(napi);
6483         napi->gro_bitmask = 0;
6484
6485         if (napi->thread) {
6486                 kthread_stop(napi->thread);
6487                 napi->thread = NULL;
6488         }
6489 }
6490 EXPORT_SYMBOL(__netif_napi_del);
6491
6492 static int __napi_poll(struct napi_struct *n, bool *repoll)
6493 {
6494         int work, weight;
6495
6496         weight = n->weight;
6497
6498         /* This NAPI_STATE_SCHED test is for avoiding a race
6499          * with netpoll's poll_napi().  Only the entity which
6500          * obtains the lock and sees NAPI_STATE_SCHED set will
6501          * actually make the ->poll() call.  Therefore we avoid
6502          * accidentally calling ->poll() when NAPI is not scheduled.
6503          */
6504         work = 0;
6505         if (test_bit(NAPI_STATE_SCHED, &n->state)) {
6506                 work = n->poll(n, weight);
6507                 trace_napi_poll(n, work, weight);
6508         }
6509
6510         if (unlikely(work > weight))
6511                 netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
6512                                 n->poll, work, weight);
6513
6514         if (likely(work < weight))
6515                 return work;
6516
6517         /* Drivers must not modify the NAPI state if they
6518          * consume the entire weight.  In such cases this code
6519          * still "owns" the NAPI instance and therefore can
6520          * move the instance around on the list at-will.
6521          */
6522         if (unlikely(napi_disable_pending(n))) {
6523                 napi_complete(n);
6524                 return work;
6525         }
6526
6527         /* The NAPI context has more processing work, but busy-polling
6528          * is preferred. Exit early.
6529          */
6530         if (napi_prefer_busy_poll(n)) {
6531                 if (napi_complete_done(n, work)) {
6532                         /* If timeout is not set, we need to make sure
6533                          * that the NAPI is re-scheduled.
6534                          */
6535                         napi_schedule(n);
6536                 }
6537                 return work;
6538         }
6539
6540         if (n->gro_bitmask) {
6541                 /* flush too old packets
6542                  * If HZ < 1000, flush all packets.
6543                  */
6544                 napi_gro_flush(n, HZ >= 1000);
6545         }
6546
6547         gro_normal_list(n);
6548
6549         /* Some drivers may have called napi_schedule
6550          * prior to exhausting their budget.
6551          */
6552         if (unlikely(!list_empty(&n->poll_list))) {
6553                 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6554                              n->dev ? n->dev->name : "backlog");
6555                 return work;
6556         }
6557
6558         *repoll = true;
6559
6560         return work;
6561 }
6562
6563 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
6564 {
6565         bool do_repoll = false;
6566         void *have;
6567         int work;
6568
6569         list_del_init(&n->poll_list);
6570
6571         have = netpoll_poll_lock(n);
6572
6573         work = __napi_poll(n, &do_repoll);
6574
6575         if (do_repoll)
6576                 list_add_tail(&n->poll_list, repoll);
6577
6578         netpoll_poll_unlock(have);
6579
6580         return work;
6581 }
6582
6583 static int napi_thread_wait(struct napi_struct *napi)
6584 {
6585         bool woken = false;
6586
6587         set_current_state(TASK_INTERRUPTIBLE);
6588
6589         while (!kthread_should_stop()) {
6590                 /* Testing SCHED_THREADED bit here to make sure the current
6591                  * kthread owns this napi and could poll on this napi.
6592                  * Testing SCHED bit is not enough because SCHED bit might be
6593                  * set by some other busy poll thread or by napi_disable().
6594                  */
6595                 if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
6596                         WARN_ON(!list_empty(&napi->poll_list));
6597                         __set_current_state(TASK_RUNNING);
6598                         return 0;
6599                 }
6600
6601                 schedule();
6602                 /* woken being true indicates this thread owns this napi. */
6603                 woken = true;
6604                 set_current_state(TASK_INTERRUPTIBLE);
6605         }
6606         __set_current_state(TASK_RUNNING);
6607
6608         return -1;
6609 }
6610
6611 static int napi_threaded_poll(void *data)
6612 {
6613         struct napi_struct *napi = data;
6614         void *have;
6615
6616         while (!napi_thread_wait(napi)) {
6617                 for (;;) {
6618                         bool repoll = false;
6619
6620                         local_bh_disable();
6621
6622                         have = netpoll_poll_lock(napi);
6623                         __napi_poll(napi, &repoll);
6624                         netpoll_poll_unlock(have);
6625
6626                         local_bh_enable();
6627
6628                         if (!repoll)
6629                                 break;
6630
6631                         cond_resched();
6632                 }
6633         }
6634         return 0;
6635 }
6636
6637 static void skb_defer_free_flush(struct softnet_data *sd)
6638 {
6639         struct sk_buff *skb, *next;
6640         unsigned long flags;
6641
6642         /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
6643         if (!READ_ONCE(sd->defer_list))
6644                 return;
6645
6646         spin_lock_irqsave(&sd->defer_lock, flags);
6647         skb = sd->defer_list;
6648         sd->defer_list = NULL;
6649         sd->defer_count = 0;
6650         spin_unlock_irqrestore(&sd->defer_lock, flags);
6651
6652         while (skb != NULL) {
6653                 next = skb->next;
6654                 napi_consume_skb(skb, 1);
6655                 skb = next;
6656         }
6657 }
6658
6659 static __latent_entropy void net_rx_action(struct softirq_action *h)
6660 {
6661         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6662         unsigned long time_limit = jiffies +
6663                 usecs_to_jiffies(netdev_budget_usecs);
6664         int budget = netdev_budget;
6665         LIST_HEAD(list);
6666         LIST_HEAD(repoll);
6667
6668         local_irq_disable();
6669         list_splice_init(&sd->poll_list, &list);
6670         local_irq_enable();
6671
6672         for (;;) {
6673                 struct napi_struct *n;
6674
6675                 skb_defer_free_flush(sd);
6676
6677                 if (list_empty(&list)) {
6678                         if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
6679                                 goto end;
6680                         break;
6681                 }
6682
6683                 n = list_first_entry(&list, struct napi_struct, poll_list);
6684                 budget -= napi_poll(n, &repoll);
6685
6686                 /* If softirq window is exhausted then punt.
6687                  * Allow this to run for 2 jiffies since which will allow
6688                  * an average latency of 1.5/HZ.
6689                  */
6690                 if (unlikely(budget <= 0 ||
6691                              time_after_eq(jiffies, time_limit))) {
6692                         sd->time_squeeze++;
6693                         break;
6694                 }
6695         }
6696
6697         local_irq_disable();
6698
6699         list_splice_tail_init(&sd->poll_list, &list);
6700         list_splice_tail(&repoll, &list);
6701         list_splice(&list, &sd->poll_list);
6702         if (!list_empty(&sd->poll_list))
6703                 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
6704
6705         net_rps_action_and_irq_enable(sd);
6706 end:;
6707 }
6708
6709 struct netdev_adjacent {
6710         struct net_device *dev;
6711         netdevice_tracker dev_tracker;
6712
6713         /* upper master flag, there can only be one master device per list */
6714         bool master;
6715
6716         /* lookup ignore flag */
6717         bool ignore;
6718
6719         /* counter for the number of times this device was added to us */
6720         u16 ref_nr;
6721
6722         /* private field for the users */
6723         void *private;
6724
6725         struct list_head list;
6726         struct rcu_head rcu;
6727 };
6728
6729 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
6730                                                  struct list_head *adj_list)
6731 {
6732         struct netdev_adjacent *adj;
6733
6734         list_for_each_entry(adj, adj_list, list) {
6735                 if (adj->dev == adj_dev)
6736                         return adj;
6737         }
6738         return NULL;
6739 }
6740
6741 static int ____netdev_has_upper_dev(struct net_device *upper_dev,
6742                                     struct netdev_nested_priv *priv)
6743 {
6744         struct net_device *dev = (struct net_device *)priv->data;
6745
6746         return upper_dev == dev;
6747 }
6748
6749 /**
6750  * netdev_has_upper_dev - Check if device is linked to an upper device
6751  * @dev: device
6752  * @upper_dev: upper device to check
6753  *
6754  * Find out if a device is linked to specified upper device and return true
6755  * in case it is. Note that this checks only immediate upper device,
6756  * not through a complete stack of devices. The caller must hold the RTNL lock.
6757  */
6758 bool netdev_has_upper_dev(struct net_device *dev,
6759                           struct net_device *upper_dev)
6760 {
6761         struct netdev_nested_priv priv = {
6762                 .data = (void *)upper_dev,
6763         };
6764
6765         ASSERT_RTNL();
6766
6767         return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6768                                              &priv);
6769 }
6770 EXPORT_SYMBOL(netdev_has_upper_dev);
6771
6772 /**
6773  * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
6774  * @dev: device
6775  * @upper_dev: upper device to check
6776  *
6777  * Find out if a device is linked to specified upper device and return true
6778  * in case it is. Note that this checks the entire upper device chain.
6779  * The caller must hold rcu lock.
6780  */
6781
6782 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
6783                                   struct net_device *upper_dev)
6784 {
6785         struct netdev_nested_priv priv = {
6786                 .data = (void *)upper_dev,
6787         };
6788
6789         return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6790                                                &priv);
6791 }
6792 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
6793
6794 /**
6795  * netdev_has_any_upper_dev - Check if device is linked to some device
6796  * @dev: device
6797  *
6798  * Find out if a device is linked to an upper device and return true in case
6799  * it is. The caller must hold the RTNL lock.
6800  */
6801 bool netdev_has_any_upper_dev(struct net_device *dev)
6802 {
6803         ASSERT_RTNL();
6804
6805         return !list_empty(&dev->adj_list.upper);
6806 }
6807 EXPORT_SYMBOL(netdev_has_any_upper_dev);
6808
6809 /**
6810  * netdev_master_upper_dev_get - Get master upper device
6811  * @dev: device
6812  *
6813  * Find a master upper device and return pointer to it or NULL in case
6814  * it's not there. The caller must hold the RTNL lock.
6815  */
6816 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
6817 {
6818         struct netdev_adjacent *upper;
6819
6820         ASSERT_RTNL();
6821
6822         if (list_empty(&dev->adj_list.upper))
6823                 return NULL;
6824
6825         upper = list_first_entry(&dev->adj_list.upper,
6826                                  struct netdev_adjacent, list);
6827         if (likely(upper->master))
6828                 return upper->dev;
6829         return NULL;
6830 }
6831 EXPORT_SYMBOL(netdev_master_upper_dev_get);
6832
6833 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
6834 {
6835         struct netdev_adjacent *upper;
6836
6837         ASSERT_RTNL();
6838
6839         if (list_empty(&dev->adj_list.upper))
6840                 return NULL;
6841
6842         upper = list_first_entry(&dev->adj_list.upper,
6843                                  struct netdev_adjacent, list);
6844         if (likely(upper->master) && !upper->ignore)
6845                 return upper->dev;
6846         return NULL;
6847 }
6848
6849 /**
6850  * netdev_has_any_lower_dev - Check if device is linked to some device
6851  * @dev: device
6852  *
6853  * Find out if a device is linked to a lower device and return true in case
6854  * it is. The caller must hold the RTNL lock.
6855  */
6856 static bool netdev_has_any_lower_dev(struct net_device *dev)
6857 {
6858         ASSERT_RTNL();
6859
6860         return !list_empty(&dev->adj_list.lower);
6861 }
6862
6863 void *netdev_adjacent_get_private(struct list_head *adj_list)
6864 {
6865         struct netdev_adjacent *adj;
6866
6867         adj = list_entry(adj_list, struct netdev_adjacent, list);
6868
6869         return adj->private;
6870 }
6871 EXPORT_SYMBOL(netdev_adjacent_get_private);
6872
6873 /**
6874  * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
6875  * @dev: device
6876  * @iter: list_head ** of the current position
6877  *
6878  * Gets the next device from the dev's upper list, starting from iter
6879  * position. The caller must hold RCU read lock.
6880  */
6881 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
6882                                                  struct list_head **iter)
6883 {
6884         struct netdev_adjacent *upper;
6885
6886         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6887
6888         upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6889
6890         if (&upper->list == &dev->adj_list.upper)
6891                 return NULL;
6892
6893         *iter = &upper->list;
6894
6895         return upper->dev;
6896 }
6897 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
6898
6899 static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
6900                                                   struct list_head **iter,
6901                                                   bool *ignore)
6902 {
6903         struct netdev_adjacent *upper;
6904
6905         upper = list_entry((*iter)->next, struct netdev_adjacent, list);
6906
6907         if (&upper->list == &dev->adj_list.upper)
6908                 return NULL;
6909
6910         *iter = &upper->list;
6911         *ignore = upper->ignore;
6912
6913         return upper->dev;
6914 }
6915
6916 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
6917                                                     struct list_head **iter)
6918 {
6919         struct netdev_adjacent *upper;
6920
6921         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6922
6923         upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6924
6925         if (&upper->list == &dev->adj_list.upper)
6926                 return NULL;
6927
6928         *iter = &upper->list;
6929
6930         return upper->dev;
6931 }
6932
6933 static int __netdev_walk_all_upper_dev(struct net_device *dev,
6934                                        int (*fn)(struct net_device *dev,
6935                                          struct netdev_nested_priv *priv),
6936                                        struct netdev_nested_priv *priv)
6937 {
6938         struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
6939         struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
6940         int ret, cur = 0;
6941         bool ignore;
6942
6943         now = dev;
6944         iter = &dev->adj_list.upper;
6945
6946         while (1) {
6947                 if (now != dev) {
6948                         ret = fn(now, priv);
6949                         if (ret)
6950                                 return ret;
6951                 }
6952
6953                 next = NULL;
6954                 while (1) {
6955                         udev = __netdev_next_upper_dev(now, &iter, &ignore);
6956                         if (!udev)
6957                                 break;
6958                         if (ignore)
6959                                 continue;
6960
6961                         next = udev;
6962                         niter = &udev->adj_list.upper;
6963                         dev_stack[cur] = now;
6964                         iter_stack[cur++] = iter;
6965                         break;
6966                 }
6967
6968                 if (!next) {
6969                         if (!cur)
6970                                 return 0;
6971                         next = dev_stack[--cur];
6972                         niter = iter_stack[cur];
6973                 }
6974
6975                 now = next;
6976                 iter = niter;
6977         }
6978
6979         return 0;
6980 }
6981
6982 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
6983                                   int (*fn)(struct net_device *dev,
6984                                             struct netdev_nested_priv *priv),
6985                                   struct netdev_nested_priv *priv)
6986 {
6987         struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
6988         struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
6989         int ret, cur = 0;
6990
6991         now = dev;
6992         iter = &dev->adj_list.upper;
6993
6994         while (1) {
6995                 if (now != dev) {
6996                         ret = fn(now, priv);
6997                         if (ret)
6998                                 return ret;
6999                 }
7000
7001                 next = NULL;
7002                 while (1) {
7003                         udev = netdev_next_upper_dev_rcu(now, &iter);
7004                         if (!udev)
7005                                 break;
7006
7007                         next = udev;
7008                         niter = &udev->adj_list.upper;
7009                         dev_stack[cur] = now;
7010                         iter_stack[cur++] = iter;
7011                         break;
7012                 }
7013
7014                 if (!next) {
7015                         if (!cur)
7016                                 return 0;
7017                         next = dev_stack[--cur];
7018                         niter = iter_stack[cur];
7019                 }
7020
7021                 now = next;
7022                 iter = niter;
7023         }
7024
7025         return 0;
7026 }
7027 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
7028
7029 static bool __netdev_has_upper_dev(struct net_device *dev,
7030                                    struct net_device *upper_dev)
7031 {
7032         struct netdev_nested_priv priv = {
7033                 .flags = 0,
7034                 .data = (void *)upper_dev,
7035         };
7036
7037         ASSERT_RTNL();
7038
7039         return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
7040                                            &priv);
7041 }
7042
7043 /**
7044  * netdev_lower_get_next_private - Get the next ->private from the
7045  *                                 lower neighbour list
7046  * @dev: device
7047  * @iter: list_head ** of the current position
7048  *
7049  * Gets the next netdev_adjacent->private from the dev's lower neighbour
7050  * list, starting from iter position. The caller must hold either hold the
7051  * RTNL lock or its own locking that guarantees that the neighbour lower
7052  * list will remain unchanged.
7053  */
7054 void *netdev_lower_get_next_private(struct net_device *dev,
7055                                     struct list_head **iter)
7056 {
7057         struct netdev_adjacent *lower;
7058
7059         lower = list_entry(*iter, struct netdev_adjacent, list);
7060
7061         if (&lower->list == &dev->adj_list.lower)
7062                 return NULL;
7063
7064         *iter = lower->list.next;
7065
7066         return lower->private;
7067 }
7068 EXPORT_SYMBOL(netdev_lower_get_next_private);
7069
7070 /**
7071  * netdev_lower_get_next_private_rcu - Get the next ->private from the
7072  *                                     lower neighbour list, RCU
7073  *                                     variant
7074  * @dev: device
7075  * @iter: list_head ** of the current position
7076  *
7077  * Gets the next netdev_adjacent->private from the dev's lower neighbour
7078  * list, starting from iter position. The caller must hold RCU read lock.
7079  */
7080 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
7081                                         struct list_head **iter)
7082 {
7083         struct netdev_adjacent *lower;
7084
7085         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
7086
7087         lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7088
7089         if (&lower->list == &dev->adj_list.lower)
7090                 return NULL;
7091
7092         *iter = &lower->list;
7093
7094         return lower->private;
7095 }
7096 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
7097
7098 /**
7099  * netdev_lower_get_next - Get the next device from the lower neighbour
7100  *                         list
7101  * @dev: device
7102  * @iter: list_head ** of the current position
7103  *
7104  * Gets the next netdev_adjacent from the dev's lower neighbour
7105  * list, starting from iter position. The caller must hold RTNL lock or
7106  * its own locking that guarantees that the neighbour lower
7107  * list will remain unchanged.
7108  */
7109 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
7110 {
7111         struct netdev_adjacent *lower;
7112
7113         lower = list_entry(*iter, struct netdev_adjacent, list);
7114
7115         if (&lower->list == &dev->adj_list.lower)
7116                 return NULL;
7117
7118         *iter = lower->list.next;
7119
7120         return lower->dev;
7121 }
7122 EXPORT_SYMBOL(netdev_lower_get_next);
7123
7124 static struct net_device *netdev_next_lower_dev(struct net_device *dev,
7125                                                 struct list_head **iter)
7126 {
7127         struct netdev_adjacent *lower;
7128
7129         lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7130
7131         if (&lower->list == &dev->adj_list.lower)
7132                 return NULL;
7133
7134         *iter = &lower->list;
7135
7136         return lower->dev;
7137 }
7138
7139 static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
7140                                                   struct list_head **iter,
7141                                                   bool *ignore)
7142 {
7143         struct netdev_adjacent *lower;
7144
7145         lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7146
7147         if (&lower->list == &dev->adj_list.lower)
7148                 return NULL;
7149
7150         *iter = &lower->list;
7151         *ignore = lower->ignore;
7152
7153         return lower->dev;
7154 }
7155
7156 int netdev_walk_all_lower_dev(struct net_device *dev,
7157                               int (*fn)(struct net_device *dev,
7158                                         struct netdev_nested_priv *priv),
7159                               struct netdev_nested_priv *priv)
7160 {
7161         struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7162         struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7163         int ret, cur = 0;
7164
7165         now = dev;
7166         iter = &dev->adj_list.lower;
7167
7168         while (1) {
7169                 if (now != dev) {
7170                         ret = fn(now, priv);
7171                         if (ret)
7172                                 return ret;
7173                 }
7174
7175                 next = NULL;
7176                 while (1) {
7177                         ldev = netdev_next_lower_dev(now, &iter);
7178                         if (!ldev)
7179                                 break;
7180
7181                         next = ldev;
7182                         niter = &ldev->adj_list.lower;
7183                         dev_stack[cur] = now;
7184                         iter_stack[cur++] = iter;
7185                         break;
7186                 }
7187
7188                 if (!next) {
7189                         if (!cur)
7190                                 return 0;
7191                         next = dev_stack[--cur];
7192                         niter = iter_stack[cur];
7193                 }
7194
7195                 now = next;
7196                 iter = niter;
7197         }
7198
7199         return 0;
7200 }
7201 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
7202
7203 static int __netdev_walk_all_lower_dev(struct net_device *dev,
7204                                        int (*fn)(struct net_device *dev,
7205                                          struct netdev_nested_priv *priv),
7206                                        struct netdev_nested_priv *priv)
7207 {
7208         struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7209         struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7210         int ret, cur = 0;
7211         bool ignore;
7212
7213         now = dev;
7214         iter = &dev->adj_list.lower;
7215
7216         while (1) {
7217                 if (now != dev) {
7218                         ret = fn(now, priv);
7219                         if (ret)
7220                                 return ret;
7221                 }
7222
7223                 next = NULL;
7224                 while (1) {
7225                         ldev = __netdev_next_lower_dev(now, &iter, &ignore);
7226                         if (!ldev)
7227                                 break;
7228                         if (ignore)
7229                                 continue;
7230
7231                         next = ldev;
7232                         niter = &ldev->adj_list.lower;
7233                         dev_stack[cur] = now;
7234                         iter_stack[cur++] = iter;
7235                         break;
7236                 }
7237
7238                 if (!next) {
7239                         if (!cur)
7240                                 return 0;
7241                         next = dev_stack[--cur];
7242                         niter = iter_stack[cur];
7243                 }
7244
7245                 now = next;
7246                 iter = niter;
7247         }
7248
7249         return 0;
7250 }
7251
7252 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
7253                                              struct list_head **iter)
7254 {
7255         struct netdev_adjacent *lower;
7256
7257         lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7258         if (&lower->list == &dev->adj_list.lower)
7259                 return NULL;
7260
7261         *iter = &lower->list;
7262
7263         return lower->dev;
7264 }
7265 EXPORT_SYMBOL(netdev_next_lower_dev_rcu);
7266
7267 static u8 __netdev_upper_depth(struct net_device *dev)
7268 {
7269         struct net_device *udev;
7270         struct list_head *iter;
7271         u8 max_depth = 0;
7272         bool ignore;
7273
7274         for (iter = &dev->adj_list.upper,
7275              udev = __netdev_next_upper_dev(dev, &iter, &ignore);
7276              udev;
7277              udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
7278                 if (ignore)
7279                         continue;
7280                 if (max_depth < udev->upper_level)
7281                         max_depth = udev->upper_level;
7282         }
7283
7284         return max_depth;
7285 }
7286
7287 static u8 __netdev_lower_depth(struct net_device *dev)
7288 {
7289         struct net_device *ldev;
7290         struct list_head *iter;
7291         u8 max_depth = 0;
7292         bool ignore;
7293
7294         for (iter = &dev->adj_list.lower,
7295              ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
7296              ldev;
7297              ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
7298                 if (ignore)
7299                         continue;
7300                 if (max_depth < ldev->lower_level)
7301                         max_depth = ldev->lower_level;
7302         }
7303
7304         return max_depth;
7305 }
7306
7307 static int __netdev_update_upper_level(struct net_device *dev,
7308                                        struct netdev_nested_priv *__unused)
7309 {
7310         dev->upper_level = __netdev_upper_depth(dev) + 1;
7311         return 0;
7312 }
7313
7314 #ifdef CONFIG_LOCKDEP
7315 static LIST_HEAD(net_unlink_list);
7316
7317 static void net_unlink_todo(struct net_device *dev)
7318 {
7319         if (list_empty(&dev->unlink_list))
7320                 list_add_tail(&dev->unlink_list, &net_unlink_list);
7321 }
7322 #endif
7323
7324 static int __netdev_update_lower_level(struct net_device *dev,
7325                                        struct netdev_nested_priv *priv)
7326 {
7327         dev->lower_level = __netdev_lower_depth(dev) + 1;
7328
7329 #ifdef CONFIG_LOCKDEP
7330         if (!priv)
7331                 return 0;
7332
7333         if (priv->flags & NESTED_SYNC_IMM)
7334                 dev->nested_level = dev->lower_level - 1;
7335         if (priv->flags & NESTED_SYNC_TODO)
7336                 net_unlink_todo(dev);
7337 #endif
7338         return 0;
7339 }
7340
7341 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
7342                                   int (*fn)(struct net_device *dev,
7343                                             struct netdev_nested_priv *priv),
7344                                   struct netdev_nested_priv *priv)
7345 {
7346         struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7347         struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7348         int ret, cur = 0;
7349
7350         now = dev;
7351         iter = &dev->adj_list.lower;
7352
7353         while (1) {
7354                 if (now != dev) {
7355                         ret = fn(now, priv);
7356                         if (ret)
7357                                 return ret;
7358                 }
7359
7360                 next = NULL;
7361                 while (1) {
7362                         ldev = netdev_next_lower_dev_rcu(now, &iter);
7363                         if (!ldev)
7364                                 break;
7365
7366                         next = ldev;
7367                         niter = &ldev->adj_list.lower;
7368                         dev_stack[cur] = now;
7369                         iter_stack[cur++] = iter;
7370                         break;
7371                 }
7372
7373                 if (!next) {
7374                         if (!cur)
7375                                 return 0;
7376                         next = dev_stack[--cur];
7377                         niter = iter_stack[cur];
7378                 }
7379
7380                 now = next;
7381                 iter = niter;
7382         }
7383
7384         return 0;
7385 }
7386 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
7387
7388 /**
7389  * netdev_lower_get_first_private_rcu - Get the first ->private from the
7390  *                                     lower neighbour list, RCU
7391  *                                     variant
7392  * @dev: device
7393  *
7394  * Gets the first netdev_adjacent->private from the dev's lower neighbour
7395  * list. The caller must hold RCU read lock.
7396  */
7397 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
7398 {
7399         struct netdev_adjacent *lower;
7400
7401         lower = list_first_or_null_rcu(&dev->adj_list.lower,
7402                         struct netdev_adjacent, list);
7403         if (lower)
7404                 return lower->private;
7405         return NULL;
7406 }
7407 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
7408
7409 /**
7410  * netdev_master_upper_dev_get_rcu - Get master upper device
7411  * @dev: device
7412  *
7413  * Find a master upper device and return pointer to it or NULL in case
7414  * it's not there. The caller must hold the RCU read lock.
7415  */
7416 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
7417 {
7418         struct netdev_adjacent *upper;
7419
7420         upper = list_first_or_null_rcu(&dev->adj_list.upper,
7421                                        struct netdev_adjacent, list);
7422         if (upper && likely(upper->master))
7423                 return upper->dev;
7424         return NULL;
7425 }
7426 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
7427
7428 static int netdev_adjacent_sysfs_add(struct net_device *dev,
7429                               struct net_device *adj_dev,
7430                               struct list_head *dev_list)
7431 {
7432         char linkname[IFNAMSIZ+7];
7433
7434         sprintf(linkname, dev_list == &dev->adj_list.upper ?
7435                 "upper_%s" : "lower_%s", adj_dev->name);
7436         return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
7437                                  linkname);
7438 }
7439 static void netdev_adjacent_sysfs_del(struct net_device *dev,
7440                                char *name,
7441                                struct list_head *dev_list)
7442 {
7443         char linkname[IFNAMSIZ+7];
7444
7445         sprintf(linkname, dev_list == &dev->adj_list.upper ?
7446                 "upper_%s" : "lower_%s", name);
7447         sysfs_remove_link(&(dev->dev.kobj), linkname);
7448 }
7449
7450 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
7451                                                  struct net_device *adj_dev,
7452                                                  struct list_head *dev_list)
7453 {
7454         return (dev_list == &dev->adj_list.upper ||
7455                 dev_list == &dev->adj_list.lower) &&
7456                 net_eq(dev_net(dev), dev_net(adj_dev));
7457 }
7458
7459 static int __netdev_adjacent_dev_insert(struct net_device *dev,
7460                                         struct net_device *adj_dev,
7461                                         struct list_head *dev_list,
7462                                         void *private, bool master)
7463 {
7464         struct netdev_adjacent *adj;
7465         int ret;
7466
7467         adj = __netdev_find_adj(adj_dev, dev_list);
7468
7469         if (adj) {
7470                 adj->ref_nr += 1;
7471                 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7472                          dev->name, adj_dev->name, adj->ref_nr);
7473
7474                 return 0;
7475         }
7476
7477         adj = kmalloc(sizeof(*adj), GFP_KERNEL);
7478         if (!adj)
7479                 return -ENOMEM;
7480
7481         adj->dev = adj_dev;
7482         adj->master = master;
7483         adj->ref_nr = 1;
7484         adj->private = private;
7485         adj->ignore = false;
7486         netdev_hold(adj_dev, &adj->dev_tracker, GFP_KERNEL);
7487
7488         pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7489                  dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
7490
7491         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
7492                 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
7493                 if (ret)
7494                         goto free_adj;
7495         }
7496
7497         /* Ensure that master link is always the first item in list. */
7498         if (master) {
7499                 ret = sysfs_create_link(&(dev->dev.kobj),
7500                                         &(adj_dev->dev.kobj), "master");
7501                 if (ret)
7502                         goto remove_symlinks;
7503
7504                 list_add_rcu(&adj->list, dev_list);
7505         } else {
7506                 list_add_tail_rcu(&adj->list, dev_list);
7507         }
7508
7509         return 0;
7510
7511 remove_symlinks:
7512         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7513                 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7514 free_adj:
7515         netdev_put(adj_dev, &adj->dev_tracker);
7516         kfree(adj);
7517
7518         return ret;
7519 }
7520
7521 static void __netdev_adjacent_dev_remove(struct net_device *dev,
7522                                          struct net_device *adj_dev,
7523                                          u16 ref_nr,
7524                                          struct list_head *dev_list)
7525 {
7526         struct netdev_adjacent *adj;
7527
7528         pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7529                  dev->name, adj_dev->name, ref_nr);
7530
7531         adj = __netdev_find_adj(adj_dev, dev_list);
7532
7533         if (!adj) {
7534                 pr_err("Adjacency does not exist for device %s from %s\n",
7535                        dev->name, adj_dev->name);
7536                 WARN_ON(1);
7537                 return;
7538         }
7539
7540         if (adj->ref_nr > ref_nr) {
7541                 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7542                          dev->name, adj_dev->name, ref_nr,
7543                          adj->ref_nr - ref_nr);
7544                 adj->ref_nr -= ref_nr;
7545                 return;
7546         }
7547
7548         if (adj->master)
7549                 sysfs_remove_link(&(dev->dev.kobj), "master");
7550
7551         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7552                 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7553
7554         list_del_rcu(&adj->list);
7555         pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
7556                  adj_dev->name, dev->name, adj_dev->name);
7557         netdev_put(adj_dev, &adj->dev_tracker);
7558         kfree_rcu(adj, rcu);
7559 }
7560
7561 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
7562                                             struct net_device *upper_dev,
7563                                             struct list_head *up_list,
7564                                             struct list_head *down_list,
7565                                             void *private, bool master)
7566 {
7567         int ret;
7568
7569         ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
7570                                            private, master);
7571         if (ret)
7572                 return ret;
7573
7574         ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
7575                                            private, false);
7576         if (ret) {
7577                 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
7578                 return ret;
7579         }
7580
7581         return 0;
7582 }
7583
7584 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
7585                                                struct net_device *upper_dev,
7586                                                u16 ref_nr,
7587                                                struct list_head *up_list,
7588                                                struct list_head *down_list)
7589 {
7590         __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
7591         __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
7592 }
7593
7594 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
7595                                                 struct net_device *upper_dev,
7596                                                 void *private, bool master)
7597 {
7598         return __netdev_adjacent_dev_link_lists(dev, upper_dev,
7599                                                 &dev->adj_list.upper,
7600                                                 &upper_dev->adj_list.lower,
7601                                                 private, master);
7602 }
7603
7604 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
7605                                                    struct net_device *upper_dev)
7606 {
7607         __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
7608                                            &dev->adj_list.upper,
7609                                            &upper_dev->adj_list.lower);
7610 }
7611
7612 static int __netdev_upper_dev_link(struct net_device *dev,
7613                                    struct net_device *upper_dev, bool master,
7614                                    void *upper_priv, void *upper_info,
7615                                    struct netdev_nested_priv *priv,
7616                                    struct netlink_ext_ack *extack)
7617 {
7618         struct netdev_notifier_changeupper_info changeupper_info = {
7619                 .info = {
7620                         .dev = dev,
7621                         .extack = extack,
7622                 },
7623                 .upper_dev = upper_dev,
7624                 .master = master,
7625                 .linking = true,
7626                 .upper_info = upper_info,
7627         };
7628         struct net_device *master_dev;
7629         int ret = 0;
7630
7631         ASSERT_RTNL();
7632
7633         if (dev == upper_dev)
7634                 return -EBUSY;
7635
7636         /* To prevent loops, check if dev is not upper device to upper_dev. */
7637         if (__netdev_has_upper_dev(upper_dev, dev))
7638                 return -EBUSY;
7639
7640         if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
7641                 return -EMLINK;
7642
7643         if (!master) {
7644                 if (__netdev_has_upper_dev(dev, upper_dev))
7645                         return -EEXIST;
7646         } else {
7647                 master_dev = __netdev_master_upper_dev_get(dev);
7648                 if (master_dev)
7649                         return master_dev == upper_dev ? -EEXIST : -EBUSY;
7650         }
7651
7652         ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7653                                             &changeupper_info.info);
7654         ret = notifier_to_errno(ret);
7655         if (ret)
7656                 return ret;
7657
7658         ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
7659                                                    master);
7660         if (ret)
7661                 return ret;
7662
7663         ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7664                                             &changeupper_info.info);
7665         ret = notifier_to_errno(ret);
7666         if (ret)
7667                 goto rollback;
7668
7669         __netdev_update_upper_level(dev, NULL);
7670         __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7671
7672         __netdev_update_lower_level(upper_dev, priv);
7673         __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7674                                     priv);
7675
7676         return 0;
7677
7678 rollback:
7679         __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7680
7681         return ret;
7682 }
7683
7684 /**
7685  * netdev_upper_dev_link - Add a link to the upper device
7686  * @dev: device
7687  * @upper_dev: new upper device
7688  * @extack: netlink extended ack
7689  *
7690  * Adds a link to device which is upper to this one. The caller must hold
7691  * the RTNL lock. On a failure a negative errno code is returned.
7692  * On success the reference counts are adjusted and the function
7693  * returns zero.
7694  */
7695 int netdev_upper_dev_link(struct net_device *dev,
7696                           struct net_device *upper_dev,
7697                           struct netlink_ext_ack *extack)
7698 {
7699         struct netdev_nested_priv priv = {
7700                 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7701                 .data = NULL,
7702         };
7703
7704         return __netdev_upper_dev_link(dev, upper_dev, false,
7705                                        NULL, NULL, &priv, extack);
7706 }
7707 EXPORT_SYMBOL(netdev_upper_dev_link);
7708
7709 /**
7710  * netdev_master_upper_dev_link - Add a master link to the upper device
7711  * @dev: device
7712  * @upper_dev: new upper device
7713  * @upper_priv: upper device private
7714  * @upper_info: upper info to be passed down via notifier
7715  * @extack: netlink extended ack
7716  *
7717  * Adds a link to device which is upper to this one. In this case, only
7718  * one master upper device can be linked, although other non-master devices
7719  * might be linked as well. The caller must hold the RTNL lock.
7720  * On a failure a negative errno code is returned. On success the reference
7721  * counts are adjusted and the function returns zero.
7722  */
7723 int netdev_master_upper_dev_link(struct net_device *dev,
7724                                  struct net_device *upper_dev,
7725                                  void *upper_priv, void *upper_info,
7726                                  struct netlink_ext_ack *extack)
7727 {
7728         struct netdev_nested_priv priv = {
7729                 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7730                 .data = NULL,
7731         };
7732
7733         return __netdev_upper_dev_link(dev, upper_dev, true,
7734                                        upper_priv, upper_info, &priv, extack);
7735 }
7736 EXPORT_SYMBOL(netdev_master_upper_dev_link);
7737
7738 static void __netdev_upper_dev_unlink(struct net_device *dev,
7739                                       struct net_device *upper_dev,
7740                                       struct netdev_nested_priv *priv)
7741 {
7742         struct netdev_notifier_changeupper_info changeupper_info = {
7743                 .info = {
7744                         .dev = dev,
7745                 },
7746                 .upper_dev = upper_dev,
7747                 .linking = false,
7748         };
7749
7750         ASSERT_RTNL();
7751
7752         changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
7753
7754         call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7755                                       &changeupper_info.info);
7756
7757         __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7758
7759         call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7760                                       &changeupper_info.info);
7761
7762         __netdev_update_upper_level(dev, NULL);
7763         __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7764
7765         __netdev_update_lower_level(upper_dev, priv);
7766         __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7767                                     priv);
7768 }
7769
7770 /**
7771  * netdev_upper_dev_unlink - Removes a link to upper device
7772  * @dev: device
7773  * @upper_dev: new upper device
7774  *
7775  * Removes a link to device which is upper to this one. The caller must hold
7776  * the RTNL lock.
7777  */
7778 void netdev_upper_dev_unlink(struct net_device *dev,
7779                              struct net_device *upper_dev)
7780 {
7781         struct netdev_nested_priv priv = {
7782                 .flags = NESTED_SYNC_TODO,
7783                 .data = NULL,
7784         };
7785
7786         __netdev_upper_dev_unlink(dev, upper_dev, &priv);
7787 }
7788 EXPORT_SYMBOL(netdev_upper_dev_unlink);
7789
7790 static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
7791                                       struct net_device *lower_dev,
7792                                       bool val)
7793 {
7794         struct netdev_adjacent *adj;
7795
7796         adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
7797         if (adj)
7798                 adj->ignore = val;
7799
7800         adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
7801         if (adj)
7802                 adj->ignore = val;
7803 }
7804
7805 static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
7806                                         struct net_device *lower_dev)
7807 {
7808         __netdev_adjacent_dev_set(upper_dev, lower_dev, true);
7809 }
7810
7811 static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
7812                                        struct net_device *lower_dev)
7813 {
7814         __netdev_adjacent_dev_set(upper_dev, lower_dev, false);
7815 }
7816
7817 int netdev_adjacent_change_prepare(struct net_device *old_dev,
7818                                    struct net_device *new_dev,
7819                                    struct net_device *dev,
7820                                    struct netlink_ext_ack *extack)
7821 {
7822         struct netdev_nested_priv priv = {
7823                 .flags = 0,
7824                 .data = NULL,
7825         };
7826         int err;
7827
7828         if (!new_dev)
7829                 return 0;
7830
7831         if (old_dev && new_dev != old_dev)
7832                 netdev_adjacent_dev_disable(dev, old_dev);
7833         err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv,
7834                                       extack);
7835         if (err) {
7836                 if (old_dev && new_dev != old_dev)
7837                         netdev_adjacent_dev_enable(dev, old_dev);
7838                 return err;
7839         }
7840
7841         return 0;
7842 }
7843 EXPORT_SYMBOL(netdev_adjacent_change_prepare);
7844
7845 void netdev_adjacent_change_commit(struct net_device *old_dev,
7846                                    struct net_device *new_dev,
7847                                    struct net_device *dev)
7848 {
7849         struct netdev_nested_priv priv = {
7850                 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7851                 .data = NULL,
7852         };
7853
7854         if (!new_dev || !old_dev)
7855                 return;
7856
7857         if (new_dev == old_dev)
7858                 return;
7859
7860         netdev_adjacent_dev_enable(dev, old_dev);
7861         __netdev_upper_dev_unlink(old_dev, dev, &priv);
7862 }
7863 EXPORT_SYMBOL(netdev_adjacent_change_commit);
7864
7865 void netdev_adjacent_change_abort(struct net_device *old_dev,
7866                                   struct net_device *new_dev,
7867                                   struct net_device *dev)
7868 {
7869         struct netdev_nested_priv priv = {
7870                 .flags = 0,
7871                 .data = NULL,
7872         };
7873
7874         if (!new_dev)
7875                 return;
7876
7877         if (old_dev && new_dev != old_dev)
7878                 netdev_adjacent_dev_enable(dev, old_dev);
7879
7880         __netdev_upper_dev_unlink(new_dev, dev, &priv);
7881 }
7882 EXPORT_SYMBOL(netdev_adjacent_change_abort);
7883
7884 /**
7885  * netdev_bonding_info_change - Dispatch event about slave change
7886  * @dev: device
7887  * @bonding_info: info to dispatch
7888  *
7889  * Send NETDEV_BONDING_INFO to netdev notifiers with info.
7890  * The caller must hold the RTNL lock.
7891  */
7892 void netdev_bonding_info_change(struct net_device *dev,
7893                                 struct netdev_bonding_info *bonding_info)
7894 {
7895         struct netdev_notifier_bonding_info info = {
7896                 .info.dev = dev,
7897         };
7898
7899         memcpy(&info.bonding_info, bonding_info,
7900                sizeof(struct netdev_bonding_info));
7901         call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
7902                                       &info.info);
7903 }
7904 EXPORT_SYMBOL(netdev_bonding_info_change);
7905
7906 static int netdev_offload_xstats_enable_l3(struct net_device *dev,
7907                                            struct netlink_ext_ack *extack)
7908 {
7909         struct netdev_notifier_offload_xstats_info info = {
7910                 .info.dev = dev,
7911                 .info.extack = extack,
7912                 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
7913         };
7914         int err;
7915         int rc;
7916
7917         dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3),
7918                                          GFP_KERNEL);
7919         if (!dev->offload_xstats_l3)
7920                 return -ENOMEM;
7921
7922         rc = call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE,
7923                                                   NETDEV_OFFLOAD_XSTATS_DISABLE,
7924                                                   &info.info);
7925         err = notifier_to_errno(rc);
7926         if (err)
7927                 goto free_stats;
7928
7929         return 0;
7930
7931 free_stats:
7932         kfree(dev->offload_xstats_l3);
7933         dev->offload_xstats_l3 = NULL;
7934         return err;
7935 }
7936
7937 int netdev_offload_xstats_enable(struct net_device *dev,
7938                                  enum netdev_offload_xstats_type type,
7939                                  struct netlink_ext_ack *extack)
7940 {
7941         ASSERT_RTNL();
7942
7943         if (netdev_offload_xstats_enabled(dev, type))
7944                 return -EALREADY;
7945
7946         switch (type) {
7947         case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
7948                 return netdev_offload_xstats_enable_l3(dev, extack);
7949         }
7950
7951         WARN_ON(1);
7952         return -EINVAL;
7953 }
7954 EXPORT_SYMBOL(netdev_offload_xstats_enable);
7955
7956 static void netdev_offload_xstats_disable_l3(struct net_device *dev)
7957 {
7958         struct netdev_notifier_offload_xstats_info info = {
7959                 .info.dev = dev,
7960                 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
7961         };
7962
7963         call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE,
7964                                       &info.info);
7965         kfree(dev->offload_xstats_l3);
7966         dev->offload_xstats_l3 = NULL;
7967 }
7968
7969 int netdev_offload_xstats_disable(struct net_device *dev,
7970                                   enum netdev_offload_xstats_type type)
7971 {
7972         ASSERT_RTNL();
7973
7974         if (!netdev_offload_xstats_enabled(dev, type))
7975                 return -EALREADY;
7976
7977         switch (type) {
7978         case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
7979                 netdev_offload_xstats_disable_l3(dev);
7980                 return 0;
7981         }
7982
7983         WARN_ON(1);
7984         return -EINVAL;
7985 }
7986 EXPORT_SYMBOL(netdev_offload_xstats_disable);
7987
7988 static void netdev_offload_xstats_disable_all(struct net_device *dev)
7989 {
7990         netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
7991 }
7992
7993 static struct rtnl_hw_stats64 *
7994 netdev_offload_xstats_get_ptr(const struct net_device *dev,
7995                               enum netdev_offload_xstats_type type)
7996 {
7997         switch (type) {
7998         case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
7999                 return dev->offload_xstats_l3;
8000         }
8001
8002         WARN_ON(1);
8003         return NULL;
8004 }
8005
8006 bool netdev_offload_xstats_enabled(const struct net_device *dev,
8007                                    enum netdev_offload_xstats_type type)
8008 {
8009         ASSERT_RTNL();
8010
8011         return netdev_offload_xstats_get_ptr(dev, type);
8012 }
8013 EXPORT_SYMBOL(netdev_offload_xstats_enabled);
8014
8015 struct netdev_notifier_offload_xstats_ru {
8016         bool used;
8017 };
8018
8019 struct netdev_notifier_offload_xstats_rd {
8020         struct rtnl_hw_stats64 stats;
8021         bool used;
8022 };
8023
8024 static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest,
8025                                   const struct rtnl_hw_stats64 *src)
8026 {
8027         dest->rx_packets          += src->rx_packets;
8028         dest->tx_packets          += src->tx_packets;
8029         dest->rx_bytes            += src->rx_bytes;
8030         dest->tx_bytes            += src->tx_bytes;
8031         dest->rx_errors           += src->rx_errors;
8032         dest->tx_errors           += src->tx_errors;
8033         dest->rx_dropped          += src->rx_dropped;
8034         dest->tx_dropped          += src->tx_dropped;
8035         dest->multicast           += src->multicast;
8036 }
8037
8038 static int netdev_offload_xstats_get_used(struct net_device *dev,
8039                                           enum netdev_offload_xstats_type type,
8040                                           bool *p_used,
8041                                           struct netlink_ext_ack *extack)
8042 {
8043         struct netdev_notifier_offload_xstats_ru report_used = {};
8044         struct netdev_notifier_offload_xstats_info info = {
8045                 .info.dev = dev,
8046                 .info.extack = extack,
8047                 .type = type,
8048                 .report_used = &report_used,
8049         };
8050         int rc;
8051
8052         WARN_ON(!netdev_offload_xstats_enabled(dev, type));
8053         rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED,
8054                                            &info.info);
8055         *p_used = report_used.used;
8056         return notifier_to_errno(rc);
8057 }
8058
8059 static int netdev_offload_xstats_get_stats(struct net_device *dev,
8060                                            enum netdev_offload_xstats_type type,
8061                                            struct rtnl_hw_stats64 *p_stats,
8062                                            bool *p_used,
8063                                            struct netlink_ext_ack *extack)
8064 {
8065         struct netdev_notifier_offload_xstats_rd report_delta = {};
8066         struct netdev_notifier_offload_xstats_info info = {
8067                 .info.dev = dev,
8068                 .info.extack = extack,
8069                 .type = type,
8070                 .report_delta = &report_delta,
8071         };
8072         struct rtnl_hw_stats64 *stats;
8073         int rc;
8074
8075         stats = netdev_offload_xstats_get_ptr(dev, type);
8076         if (WARN_ON(!stats))
8077                 return -EINVAL;
8078
8079         rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
8080                                            &info.info);
8081
8082         /* Cache whatever we got, even if there was an error, otherwise the
8083          * successful stats retrievals would get lost.
8084          */
8085         netdev_hw_stats64_add(stats, &report_delta.stats);
8086
8087         if (p_stats)
8088                 *p_stats = *stats;
8089         *p_used = report_delta.used;
8090
8091         return notifier_to_errno(rc);
8092 }
8093
8094 int netdev_offload_xstats_get(struct net_device *dev,
8095                               enum netdev_offload_xstats_type type,
8096                               struct rtnl_hw_stats64 *p_stats, bool *p_used,
8097                               struct netlink_ext_ack *extack)
8098 {
8099         ASSERT_RTNL();
8100
8101         if (p_stats)
8102                 return netdev_offload_xstats_get_stats(dev, type, p_stats,
8103                                                        p_used, extack);
8104         else
8105                 return netdev_offload_xstats_get_used(dev, type, p_used,
8106                                                       extack);
8107 }
8108 EXPORT_SYMBOL(netdev_offload_xstats_get);
8109
8110 void
8111 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta,
8112                                    const struct rtnl_hw_stats64 *stats)
8113 {
8114         report_delta->used = true;
8115         netdev_hw_stats64_add(&report_delta->stats, stats);
8116 }
8117 EXPORT_SYMBOL(netdev_offload_xstats_report_delta);
8118
8119 void
8120 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used)
8121 {
8122         report_used->used = true;
8123 }
8124 EXPORT_SYMBOL(netdev_offload_xstats_report_used);
8125
8126 void netdev_offload_xstats_push_delta(struct net_device *dev,
8127                                       enum netdev_offload_xstats_type type,
8128                                       const struct rtnl_hw_stats64 *p_stats)
8129 {
8130         struct rtnl_hw_stats64 *stats;
8131
8132         ASSERT_RTNL();
8133
8134         stats = netdev_offload_xstats_get_ptr(dev, type);
8135         if (WARN_ON(!stats))
8136                 return;
8137
8138         netdev_hw_stats64_add(stats, p_stats);
8139 }
8140 EXPORT_SYMBOL(netdev_offload_xstats_push_delta);
8141
8142 /**
8143  * netdev_get_xmit_slave - Get the xmit slave of master device
8144  * @dev: device
8145  * @skb: The packet
8146  * @all_slaves: assume all the slaves are active
8147  *
8148  * The reference counters are not incremented so the caller must be
8149  * careful with locks. The caller must hold RCU lock.
8150  * %NULL is returned if no slave is found.
8151  */
8152
8153 struct net_device *netdev_get_xmit_slave(struct net_device *dev,
8154                                          struct sk_buff *skb,
8155                                          bool all_slaves)
8156 {
8157         const struct net_device_ops *ops = dev->netdev_ops;
8158
8159         if (!ops->ndo_get_xmit_slave)
8160                 return NULL;
8161         return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
8162 }
8163 EXPORT_SYMBOL(netdev_get_xmit_slave);
8164
8165 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev,
8166                                                   struct sock *sk)
8167 {
8168         const struct net_device_ops *ops = dev->netdev_ops;
8169
8170         if (!ops->ndo_sk_get_lower_dev)
8171                 return NULL;
8172         return ops->ndo_sk_get_lower_dev(dev, sk);
8173 }
8174
8175 /**
8176  * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket
8177  * @dev: device
8178  * @sk: the socket
8179  *
8180  * %NULL is returned if no lower device is found.
8181  */
8182
8183 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
8184                                             struct sock *sk)
8185 {
8186         struct net_device *lower;
8187
8188         lower = netdev_sk_get_lower_dev(dev, sk);
8189         while (lower) {
8190                 dev = lower;
8191                 lower = netdev_sk_get_lower_dev(dev, sk);
8192         }
8193
8194         return dev;
8195 }
8196 EXPORT_SYMBOL(netdev_sk_get_lowest_dev);
8197
8198 static void netdev_adjacent_add_links(struct net_device *dev)
8199 {
8200         struct netdev_adjacent *iter;
8201
8202         struct net *net = dev_net(dev);
8203
8204         list_for_each_entry(iter, &dev->adj_list.upper, list) {
8205                 if (!net_eq(net, dev_net(iter->dev)))
8206                         continue;
8207                 netdev_adjacent_sysfs_add(iter->dev, dev,
8208                                           &iter->dev->adj_list.lower);
8209                 netdev_adjacent_sysfs_add(dev, iter->dev,
8210                                           &dev->adj_list.upper);
8211         }
8212
8213         list_for_each_entry(iter, &dev->adj_list.lower, list) {
8214                 if (!net_eq(net, dev_net(iter->dev)))
8215                         continue;
8216                 netdev_adjacent_sysfs_add(iter->dev, dev,
8217                                           &iter->dev->adj_list.upper);
8218                 netdev_adjacent_sysfs_add(dev, iter->dev,
8219                                           &dev->adj_list.lower);
8220         }
8221 }
8222
8223 static void netdev_adjacent_del_links(struct net_device *dev)
8224 {
8225         struct netdev_adjacent *iter;
8226
8227         struct net *net = dev_net(dev);
8228
8229         list_for_each_entry(iter, &dev->adj_list.upper, list) {
8230                 if (!net_eq(net, dev_net(iter->dev)))
8231                         continue;
8232                 netdev_adjacent_sysfs_del(iter->dev, dev->name,
8233                                           &iter->dev->adj_list.lower);
8234                 netdev_adjacent_sysfs_del(dev, iter->dev->name,
8235                                           &dev->adj_list.upper);
8236         }
8237
8238         list_for_each_entry(iter, &dev->adj_list.lower, list) {
8239                 if (!net_eq(net, dev_net(iter->dev)))
8240                         continue;
8241                 netdev_adjacent_sysfs_del(iter->dev, dev->name,
8242                                           &iter->dev->adj_list.upper);
8243                 netdev_adjacent_sysfs_del(dev, iter->dev->name,
8244                                           &dev->adj_list.lower);
8245         }
8246 }
8247
8248 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
8249 {
8250         struct netdev_adjacent *iter;
8251
8252         struct net *net = dev_net(dev);
8253
8254         list_for_each_entry(iter, &dev->adj_list.upper, list) {
8255                 if (!net_eq(net, dev_net(iter->dev)))
8256                         continue;
8257                 netdev_adjacent_sysfs_del(iter->dev, oldname,
8258                                           &iter->dev->adj_list.lower);
8259                 netdev_adjacent_sysfs_add(iter->dev, dev,
8260                                           &iter->dev->adj_list.lower);
8261         }
8262
8263         list_for_each_entry(iter, &dev->adj_list.lower, list) {
8264                 if (!net_eq(net, dev_net(iter->dev)))
8265                         continue;
8266                 netdev_adjacent_sysfs_del(iter->dev, oldname,
8267                                           &iter->dev->adj_list.upper);
8268                 netdev_adjacent_sysfs_add(iter->dev, dev,
8269                                           &iter->dev->adj_list.upper);
8270         }
8271 }
8272
8273 void *netdev_lower_dev_get_private(struct net_device *dev,
8274                                    struct net_device *lower_dev)
8275 {
8276         struct netdev_adjacent *lower;
8277
8278         if (!lower_dev)
8279                 return NULL;
8280         lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
8281         if (!lower)
8282                 return NULL;
8283
8284         return lower->private;
8285 }
8286 EXPORT_SYMBOL(netdev_lower_dev_get_private);
8287
8288
8289 /**
8290  * netdev_lower_state_changed - Dispatch event about lower device state change
8291  * @lower_dev: device
8292  * @lower_state_info: state to dispatch
8293  *
8294  * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
8295  * The caller must hold the RTNL lock.
8296  */
8297 void netdev_lower_state_changed(struct net_device *lower_dev,
8298                                 void *lower_state_info)
8299 {
8300         struct netdev_notifier_changelowerstate_info changelowerstate_info = {
8301                 .info.dev = lower_dev,
8302         };
8303
8304         ASSERT_RTNL();
8305         changelowerstate_info.lower_state_info = lower_state_info;
8306         call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
8307                                       &changelowerstate_info.info);
8308 }
8309 EXPORT_SYMBOL(netdev_lower_state_changed);
8310
8311 static void dev_change_rx_flags(struct net_device *dev, int flags)
8312 {
8313         const struct net_device_ops *ops = dev->netdev_ops;
8314
8315         if (ops->ndo_change_rx_flags)
8316                 ops->ndo_change_rx_flags(dev, flags);
8317 }
8318
8319 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
8320 {
8321         unsigned int old_flags = dev->flags;
8322         kuid_t uid;
8323         kgid_t gid;
8324
8325         ASSERT_RTNL();
8326
8327         dev->flags |= IFF_PROMISC;
8328         dev->promiscuity += inc;
8329         if (dev->promiscuity == 0) {
8330                 /*
8331                  * Avoid overflow.
8332                  * If inc causes overflow, untouch promisc and return error.
8333                  */
8334                 if (inc < 0)
8335                         dev->flags &= ~IFF_PROMISC;
8336                 else {
8337                         dev->promiscuity -= inc;
8338                         netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n");
8339                         return -EOVERFLOW;
8340                 }
8341         }
8342         if (dev->flags != old_flags) {
8343                 pr_info("device %s %s promiscuous mode\n",
8344                         dev->name,
8345                         dev->flags & IFF_PROMISC ? "entered" : "left");
8346                 if (audit_enabled) {
8347                         current_uid_gid(&uid, &gid);
8348                         audit_log(audit_context(), GFP_ATOMIC,
8349                                   AUDIT_ANOM_PROMISCUOUS,
8350                                   "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
8351                                   dev->name, (dev->flags & IFF_PROMISC),
8352                                   (old_flags & IFF_PROMISC),
8353                                   from_kuid(&init_user_ns, audit_get_loginuid(current)),
8354                                   from_kuid(&init_user_ns, uid),
8355                                   from_kgid(&init_user_ns, gid),
8356                                   audit_get_sessionid(current));
8357                 }
8358
8359                 dev_change_rx_flags(dev, IFF_PROMISC);
8360         }
8361         if (notify)
8362                 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
8363         return 0;
8364 }
8365
8366 /**
8367  *      dev_set_promiscuity     - update promiscuity count on a device
8368  *      @dev: device
8369  *      @inc: modifier
8370  *
8371  *      Add or remove promiscuity from a device. While the count in the device
8372  *      remains above zero the interface remains promiscuous. Once it hits zero
8373  *      the device reverts back to normal filtering operation. A negative inc
8374  *      value is used to drop promiscuity on the device.
8375  *      Return 0 if successful or a negative errno code on error.
8376  */
8377 int dev_set_promiscuity(struct net_device *dev, int inc)
8378 {
8379         unsigned int old_flags = dev->flags;
8380         int err;
8381
8382         err = __dev_set_promiscuity(dev, inc, true);
8383         if (err < 0)
8384                 return err;
8385         if (dev->flags != old_flags)
8386                 dev_set_rx_mode(dev);
8387         return err;
8388 }
8389 EXPORT_SYMBOL(dev_set_promiscuity);
8390
8391 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
8392 {
8393         unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
8394
8395         ASSERT_RTNL();
8396
8397         dev->flags |= IFF_ALLMULTI;
8398         dev->allmulti += inc;
8399         if (dev->allmulti == 0) {
8400                 /*
8401                  * Avoid overflow.
8402                  * If inc causes overflow, untouch allmulti and return error.
8403                  */
8404                 if (inc < 0)
8405                         dev->flags &= ~IFF_ALLMULTI;
8406                 else {
8407                         dev->allmulti -= inc;
8408                         netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n");
8409                         return -EOVERFLOW;
8410                 }
8411         }
8412         if (dev->flags ^ old_flags) {
8413                 dev_change_rx_flags(dev, IFF_ALLMULTI);
8414                 dev_set_rx_mode(dev);
8415                 if (notify)
8416                         __dev_notify_flags(dev, old_flags,
8417                                            dev->gflags ^ old_gflags);
8418         }
8419         return 0;
8420 }
8421
8422 /**
8423  *      dev_set_allmulti        - update allmulti count on a device
8424  *      @dev: device
8425  *      @inc: modifier
8426  *
8427  *      Add or remove reception of all multicast frames to a device. While the
8428  *      count in the device remains above zero the interface remains listening
8429  *      to all interfaces. Once it hits zero the device reverts back to normal
8430  *      filtering operation. A negative @inc value is used to drop the counter
8431  *      when releasing a resource needing all multicasts.
8432  *      Return 0 if successful or a negative errno code on error.
8433  */
8434
8435 int dev_set_allmulti(struct net_device *dev, int inc)
8436 {
8437         return __dev_set_allmulti(dev, inc, true);
8438 }
8439 EXPORT_SYMBOL(dev_set_allmulti);
8440
8441 /*
8442  *      Upload unicast and multicast address lists to device and
8443  *      configure RX filtering. When the device doesn't support unicast
8444  *      filtering it is put in promiscuous mode while unicast addresses
8445  *      are present.
8446  */
8447 void __dev_set_rx_mode(struct net_device *dev)
8448 {
8449         const struct net_device_ops *ops = dev->netdev_ops;
8450
8451         /* dev_open will call this function so the list will stay sane. */
8452         if (!(dev->flags&IFF_UP))
8453                 return;
8454
8455         if (!netif_device_present(dev))
8456                 return;
8457
8458         if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
8459                 /* Unicast addresses changes may only happen under the rtnl,
8460                  * therefore calling __dev_set_promiscuity here is safe.
8461                  */
8462                 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
8463                         __dev_set_promiscuity(dev, 1, false);
8464                         dev->uc_promisc = true;
8465                 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
8466                         __dev_set_promiscuity(dev, -1, false);
8467                         dev->uc_promisc = false;
8468                 }
8469         }
8470
8471         if (ops->ndo_set_rx_mode)
8472                 ops->ndo_set_rx_mode(dev);
8473 }
8474
8475 void dev_set_rx_mode(struct net_device *dev)
8476 {
8477         netif_addr_lock_bh(dev);
8478         __dev_set_rx_mode(dev);
8479         netif_addr_unlock_bh(dev);
8480 }
8481
8482 /**
8483  *      dev_get_flags - get flags reported to userspace
8484  *      @dev: device
8485  *
8486  *      Get the combination of flag bits exported through APIs to userspace.
8487  */
8488 unsigned int dev_get_flags(const struct net_device *dev)
8489 {
8490         unsigned int flags;
8491
8492         flags = (dev->flags & ~(IFF_PROMISC |
8493                                 IFF_ALLMULTI |
8494                                 IFF_RUNNING |
8495                                 IFF_LOWER_UP |
8496                                 IFF_DORMANT)) |
8497                 (dev->gflags & (IFF_PROMISC |
8498                                 IFF_ALLMULTI));
8499
8500         if (netif_running(dev)) {
8501                 if (netif_oper_up(dev))
8502                         flags |= IFF_RUNNING;
8503                 if (netif_carrier_ok(dev))
8504                         flags |= IFF_LOWER_UP;
8505                 if (netif_dormant(dev))
8506                         flags |= IFF_DORMANT;
8507         }
8508
8509         return flags;
8510 }
8511 EXPORT_SYMBOL(dev_get_flags);
8512
8513 int __dev_change_flags(struct net_device *dev, unsigned int flags,
8514                        struct netlink_ext_ack *extack)
8515 {
8516         unsigned int old_flags = dev->flags;
8517         int ret;
8518
8519         ASSERT_RTNL();
8520
8521         /*
8522          *      Set the flags on our device.
8523          */
8524
8525         dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
8526                                IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
8527                                IFF_AUTOMEDIA)) |
8528                      (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
8529                                     IFF_ALLMULTI));
8530
8531         /*
8532          *      Load in the correct multicast list now the flags have changed.
8533          */
8534
8535         if ((old_flags ^ flags) & IFF_MULTICAST)
8536                 dev_change_rx_flags(dev, IFF_MULTICAST);
8537
8538         dev_set_rx_mode(dev);
8539
8540         /*
8541          *      Have we downed the interface. We handle IFF_UP ourselves
8542          *      according to user attempts to set it, rather than blindly
8543          *      setting it.
8544          */
8545
8546         ret = 0;
8547         if ((old_flags ^ flags) & IFF_UP) {
8548                 if (old_flags & IFF_UP)
8549                         __dev_close(dev);
8550                 else
8551                         ret = __dev_open(dev, extack);
8552         }
8553
8554         if ((flags ^ dev->gflags) & IFF_PROMISC) {
8555                 int inc = (flags & IFF_PROMISC) ? 1 : -1;
8556                 unsigned int old_flags = dev->flags;
8557
8558                 dev->gflags ^= IFF_PROMISC;
8559
8560                 if (__dev_set_promiscuity(dev, inc, false) >= 0)
8561                         if (dev->flags != old_flags)
8562                                 dev_set_rx_mode(dev);
8563         }
8564
8565         /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
8566          * is important. Some (broken) drivers set IFF_PROMISC, when
8567          * IFF_ALLMULTI is requested not asking us and not reporting.
8568          */
8569         if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
8570                 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
8571
8572                 dev->gflags ^= IFF_ALLMULTI;
8573                 __dev_set_allmulti(dev, inc, false);
8574         }
8575
8576         return ret;
8577 }
8578
8579 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
8580                         unsigned int gchanges)
8581 {
8582         unsigned int changes = dev->flags ^ old_flags;
8583
8584         if (gchanges)
8585                 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
8586
8587         if (changes & IFF_UP) {
8588                 if (dev->flags & IFF_UP)
8589                         call_netdevice_notifiers(NETDEV_UP, dev);
8590                 else
8591                         call_netdevice_notifiers(NETDEV_DOWN, dev);
8592         }
8593
8594         if (dev->flags & IFF_UP &&
8595             (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
8596                 struct netdev_notifier_change_info change_info = {
8597                         .info = {
8598                                 .dev = dev,
8599                         },
8600                         .flags_changed = changes,
8601                 };
8602
8603                 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
8604         }
8605 }
8606
8607 /**
8608  *      dev_change_flags - change device settings
8609  *      @dev: device
8610  *      @flags: device state flags
8611  *      @extack: netlink extended ack
8612  *
8613  *      Change settings on device based state flags. The flags are
8614  *      in the userspace exported format.
8615  */
8616 int dev_change_flags(struct net_device *dev, unsigned int flags,
8617                      struct netlink_ext_ack *extack)
8618 {
8619         int ret;
8620         unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
8621
8622         ret = __dev_change_flags(dev, flags, extack);
8623         if (ret < 0)
8624                 return ret;
8625
8626         changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
8627         __dev_notify_flags(dev, old_flags, changes);
8628         return ret;
8629 }
8630 EXPORT_SYMBOL(dev_change_flags);
8631
8632 int __dev_set_mtu(struct net_device *dev, int new_mtu)
8633 {
8634         const struct net_device_ops *ops = dev->netdev_ops;
8635
8636         if (ops->ndo_change_mtu)
8637                 return ops->ndo_change_mtu(dev, new_mtu);
8638
8639         /* Pairs with all the lockless reads of dev->mtu in the stack */
8640         WRITE_ONCE(dev->mtu, new_mtu);
8641         return 0;
8642 }
8643 EXPORT_SYMBOL(__dev_set_mtu);
8644
8645 int dev_validate_mtu(struct net_device *dev, int new_mtu,
8646                      struct netlink_ext_ack *extack)
8647 {
8648         /* MTU must be positive, and in range */
8649         if (new_mtu < 0 || new_mtu < dev->min_mtu) {
8650                 NL_SET_ERR_MSG(extack, "mtu less than device minimum");
8651                 return -EINVAL;
8652         }
8653
8654         if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
8655                 NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
8656                 return -EINVAL;
8657         }
8658         return 0;
8659 }
8660
8661 /**
8662  *      dev_set_mtu_ext - Change maximum transfer unit
8663  *      @dev: device
8664  *      @new_mtu: new transfer unit
8665  *      @extack: netlink extended ack
8666  *
8667  *      Change the maximum transfer size of the network device.
8668  */
8669 int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
8670                     struct netlink_ext_ack *extack)
8671 {
8672         int err, orig_mtu;
8673
8674         if (new_mtu == dev->mtu)
8675                 return 0;
8676
8677         err = dev_validate_mtu(dev, new_mtu, extack);
8678         if (err)
8679                 return err;
8680
8681         if (!netif_device_present(dev))
8682                 return -ENODEV;
8683
8684         err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
8685         err = notifier_to_errno(err);
8686         if (err)
8687                 return err;
8688
8689         orig_mtu = dev->mtu;
8690         err = __dev_set_mtu(dev, new_mtu);
8691
8692         if (!err) {
8693                 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8694                                                    orig_mtu);
8695                 err = notifier_to_errno(err);
8696                 if (err) {
8697                         /* setting mtu back and notifying everyone again,
8698                          * so that they have a chance to revert changes.
8699                          */
8700                         __dev_set_mtu(dev, orig_mtu);
8701                         call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8702                                                      new_mtu);
8703                 }
8704         }
8705         return err;
8706 }
8707
8708 int dev_set_mtu(struct net_device *dev, int new_mtu)
8709 {
8710         struct netlink_ext_ack extack;
8711         int err;
8712
8713         memset(&extack, 0, sizeof(extack));
8714         err = dev_set_mtu_ext(dev, new_mtu, &extack);
8715         if (err && extack._msg)
8716                 net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
8717         return err;
8718 }
8719 EXPORT_SYMBOL(dev_set_mtu);
8720
8721 /**
8722  *      dev_change_tx_queue_len - Change TX queue length of a netdevice
8723  *      @dev: device
8724  *      @new_len: new tx queue length
8725  */
8726 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
8727 {
8728         unsigned int orig_len = dev->tx_queue_len;
8729         int res;
8730
8731         if (new_len != (unsigned int)new_len)
8732                 return -ERANGE;
8733
8734         if (new_len != orig_len) {
8735                 dev->tx_queue_len = new_len;
8736                 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
8737                 res = notifier_to_errno(res);
8738                 if (res)
8739                         goto err_rollback;
8740                 res = dev_qdisc_change_tx_queue_len(dev);
8741                 if (res)
8742                         goto err_rollback;
8743         }
8744
8745         return 0;
8746
8747 err_rollback:
8748         netdev_err(dev, "refused to change device tx_queue_len\n");
8749         dev->tx_queue_len = orig_len;
8750         return res;
8751 }
8752
8753 /**
8754  *      dev_set_group - Change group this device belongs to
8755  *      @dev: device
8756  *      @new_group: group this device should belong to
8757  */
8758 void dev_set_group(struct net_device *dev, int new_group)
8759 {
8760         dev->group = new_group;
8761 }
8762
8763 /**
8764  *      dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
8765  *      @dev: device
8766  *      @addr: new address
8767  *      @extack: netlink extended ack
8768  */
8769 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
8770                               struct netlink_ext_ack *extack)
8771 {
8772         struct netdev_notifier_pre_changeaddr_info info = {
8773                 .info.dev = dev,
8774                 .info.extack = extack,
8775                 .dev_addr = addr,
8776         };
8777         int rc;
8778
8779         rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
8780         return notifier_to_errno(rc);
8781 }
8782 EXPORT_SYMBOL(dev_pre_changeaddr_notify);
8783
8784 /**
8785  *      dev_set_mac_address - Change Media Access Control Address
8786  *      @dev: device
8787  *      @sa: new address
8788  *      @extack: netlink extended ack
8789  *
8790  *      Change the hardware (MAC) address of the device
8791  */
8792 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
8793                         struct netlink_ext_ack *extack)
8794 {
8795         const struct net_device_ops *ops = dev->netdev_ops;
8796         int err;
8797
8798         if (!ops->ndo_set_mac_address)
8799                 return -EOPNOTSUPP;
8800         if (sa->sa_family != dev->type)
8801                 return -EINVAL;
8802         if (!netif_device_present(dev))
8803                 return -ENODEV;
8804         err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
8805         if (err)
8806                 return err;
8807         err = ops->ndo_set_mac_address(dev, sa);
8808         if (err)
8809                 return err;
8810         dev->addr_assign_type = NET_ADDR_SET;
8811         call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
8812         add_device_randomness(dev->dev_addr, dev->addr_len);
8813         return 0;
8814 }
8815 EXPORT_SYMBOL(dev_set_mac_address);
8816
8817 static DECLARE_RWSEM(dev_addr_sem);
8818
8819 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
8820                              struct netlink_ext_ack *extack)
8821 {
8822         int ret;
8823
8824         down_write(&dev_addr_sem);
8825         ret = dev_set_mac_address(dev, sa, extack);
8826         up_write(&dev_addr_sem);
8827         return ret;
8828 }
8829 EXPORT_SYMBOL(dev_set_mac_address_user);
8830
8831 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
8832 {
8833         size_t size = sizeof(sa->sa_data);
8834         struct net_device *dev;
8835         int ret = 0;
8836
8837         down_read(&dev_addr_sem);
8838         rcu_read_lock();
8839
8840         dev = dev_get_by_name_rcu(net, dev_name);
8841         if (!dev) {
8842                 ret = -ENODEV;
8843                 goto unlock;
8844         }
8845         if (!dev->addr_len)
8846                 memset(sa->sa_data, 0, size);
8847         else
8848                 memcpy(sa->sa_data, dev->dev_addr,
8849                        min_t(size_t, size, dev->addr_len));
8850         sa->sa_family = dev->type;
8851
8852 unlock:
8853         rcu_read_unlock();
8854         up_read(&dev_addr_sem);
8855         return ret;
8856 }
8857 EXPORT_SYMBOL(dev_get_mac_address);
8858
8859 /**
8860  *      dev_change_carrier - Change device carrier
8861  *      @dev: device
8862  *      @new_carrier: new value
8863  *
8864  *      Change device carrier
8865  */
8866 int dev_change_carrier(struct net_device *dev, bool new_carrier)
8867 {
8868         const struct net_device_ops *ops = dev->netdev_ops;
8869
8870         if (!ops->ndo_change_carrier)
8871                 return -EOPNOTSUPP;
8872         if (!netif_device_present(dev))
8873                 return -ENODEV;
8874         return ops->ndo_change_carrier(dev, new_carrier);
8875 }
8876
8877 /**
8878  *      dev_get_phys_port_id - Get device physical port ID
8879  *      @dev: device
8880  *      @ppid: port ID
8881  *
8882  *      Get device physical port ID
8883  */
8884 int dev_get_phys_port_id(struct net_device *dev,
8885                          struct netdev_phys_item_id *ppid)
8886 {
8887         const struct net_device_ops *ops = dev->netdev_ops;
8888
8889         if (!ops->ndo_get_phys_port_id)
8890                 return -EOPNOTSUPP;
8891         return ops->ndo_get_phys_port_id(dev, ppid);
8892 }
8893
8894 /**
8895  *      dev_get_phys_port_name - Get device physical port name
8896  *      @dev: device
8897  *      @name: port name
8898  *      @len: limit of bytes to copy to name
8899  *
8900  *      Get device physical port name
8901  */
8902 int dev_get_phys_port_name(struct net_device *dev,
8903                            char *name, size_t len)
8904 {
8905         const struct net_device_ops *ops = dev->netdev_ops;
8906         int err;
8907
8908         if (ops->ndo_get_phys_port_name) {
8909                 err = ops->ndo_get_phys_port_name(dev, name, len);
8910                 if (err != -EOPNOTSUPP)
8911                         return err;
8912         }
8913         return devlink_compat_phys_port_name_get(dev, name, len);
8914 }
8915
8916 /**
8917  *      dev_get_port_parent_id - Get the device's port parent identifier
8918  *      @dev: network device
8919  *      @ppid: pointer to a storage for the port's parent identifier
8920  *      @recurse: allow/disallow recursion to lower devices
8921  *
8922  *      Get the devices's port parent identifier
8923  */
8924 int dev_get_port_parent_id(struct net_device *dev,
8925                            struct netdev_phys_item_id *ppid,
8926                            bool recurse)
8927 {
8928         const struct net_device_ops *ops = dev->netdev_ops;
8929         struct netdev_phys_item_id first = { };
8930         struct net_device *lower_dev;
8931         struct list_head *iter;
8932         int err;
8933
8934         if (ops->ndo_get_port_parent_id) {
8935                 err = ops->ndo_get_port_parent_id(dev, ppid);
8936                 if (err != -EOPNOTSUPP)
8937                         return err;
8938         }
8939
8940         err = devlink_compat_switch_id_get(dev, ppid);
8941         if (!recurse || err != -EOPNOTSUPP)
8942                 return err;
8943
8944         netdev_for_each_lower_dev(dev, lower_dev, iter) {
8945                 err = dev_get_port_parent_id(lower_dev, ppid, true);
8946                 if (err)
8947                         break;
8948                 if (!first.id_len)
8949                         first = *ppid;
8950                 else if (memcmp(&first, ppid, sizeof(*ppid)))
8951                         return -EOPNOTSUPP;
8952         }
8953
8954         return err;
8955 }
8956 EXPORT_SYMBOL(dev_get_port_parent_id);
8957
8958 /**
8959  *      netdev_port_same_parent_id - Indicate if two network devices have
8960  *      the same port parent identifier
8961  *      @a: first network device
8962  *      @b: second network device
8963  */
8964 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
8965 {
8966         struct netdev_phys_item_id a_id = { };
8967         struct netdev_phys_item_id b_id = { };
8968
8969         if (dev_get_port_parent_id(a, &a_id, true) ||
8970             dev_get_port_parent_id(b, &b_id, true))
8971                 return false;
8972
8973         return netdev_phys_item_id_same(&a_id, &b_id);
8974 }
8975 EXPORT_SYMBOL(netdev_port_same_parent_id);
8976
8977 /**
8978  *      dev_change_proto_down - set carrier according to proto_down.
8979  *
8980  *      @dev: device
8981  *      @proto_down: new value
8982  */
8983 int dev_change_proto_down(struct net_device *dev, bool proto_down)
8984 {
8985         if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN))
8986                 return -EOPNOTSUPP;
8987         if (!netif_device_present(dev))
8988                 return -ENODEV;
8989         if (proto_down)
8990                 netif_carrier_off(dev);
8991         else
8992                 netif_carrier_on(dev);
8993         dev->proto_down = proto_down;
8994         return 0;
8995 }
8996
8997 /**
8998  *      dev_change_proto_down_reason - proto down reason
8999  *
9000  *      @dev: device
9001  *      @mask: proto down mask
9002  *      @value: proto down value
9003  */
9004 void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
9005                                   u32 value)
9006 {
9007         int b;
9008
9009         if (!mask) {
9010                 dev->proto_down_reason = value;
9011         } else {
9012                 for_each_set_bit(b, &mask, 32) {
9013                         if (value & (1 << b))
9014                                 dev->proto_down_reason |= BIT(b);
9015                         else
9016                                 dev->proto_down_reason &= ~BIT(b);
9017                 }
9018         }
9019 }
9020
9021 struct bpf_xdp_link {
9022         struct bpf_link link;
9023         struct net_device *dev; /* protected by rtnl_lock, no refcnt held */
9024         int flags;
9025 };
9026
9027 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
9028 {
9029         if (flags & XDP_FLAGS_HW_MODE)
9030                 return XDP_MODE_HW;
9031         if (flags & XDP_FLAGS_DRV_MODE)
9032                 return XDP_MODE_DRV;
9033         if (flags & XDP_FLAGS_SKB_MODE)
9034                 return XDP_MODE_SKB;
9035         return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;
9036 }
9037
9038 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
9039 {
9040         switch (mode) {
9041         case XDP_MODE_SKB:
9042                 return generic_xdp_install;
9043         case XDP_MODE_DRV:
9044         case XDP_MODE_HW:
9045                 return dev->netdev_ops->ndo_bpf;
9046         default:
9047                 return NULL;
9048         }
9049 }
9050
9051 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev,
9052                                          enum bpf_xdp_mode mode)
9053 {
9054         return dev->xdp_state[mode].link;
9055 }
9056
9057 static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
9058                                      enum bpf_xdp_mode mode)
9059 {
9060         struct bpf_xdp_link *link = dev_xdp_link(dev, mode);
9061
9062         if (link)
9063                 return link->link.prog;
9064         return dev->xdp_state[mode].prog;
9065 }
9066
9067 u8 dev_xdp_prog_count(struct net_device *dev)
9068 {
9069         u8 count = 0;
9070         int i;
9071
9072         for (i = 0; i < __MAX_XDP_MODE; i++)
9073                 if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
9074                         count++;
9075         return count;
9076 }
9077 EXPORT_SYMBOL_GPL(dev_xdp_prog_count);
9078
9079 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
9080 {
9081         struct bpf_prog *prog = dev_xdp_prog(dev, mode);
9082
9083         return prog ? prog->aux->id : 0;
9084 }
9085
9086 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode,
9087                              struct bpf_xdp_link *link)
9088 {
9089         dev->xdp_state[mode].link = link;
9090         dev->xdp_state[mode].prog = NULL;
9091 }
9092
9093 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode,
9094                              struct bpf_prog *prog)
9095 {
9096         dev->xdp_state[mode].link = NULL;
9097         dev->xdp_state[mode].prog = prog;
9098 }
9099
9100 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
9101                            bpf_op_t bpf_op, struct netlink_ext_ack *extack,
9102                            u32 flags, struct bpf_prog *prog)
9103 {
9104         struct netdev_bpf xdp;
9105         int err;
9106
9107         memset(&xdp, 0, sizeof(xdp));
9108         xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG;
9109         xdp.extack = extack;
9110         xdp.flags = flags;
9111         xdp.prog = prog;
9112
9113         /* Drivers assume refcnt is already incremented (i.e, prog pointer is
9114          * "moved" into driver), so they don't increment it on their own, but
9115          * they do decrement refcnt when program is detached or replaced.
9116          * Given net_device also owns link/prog, we need to bump refcnt here
9117          * to prevent drivers from underflowing it.
9118          */
9119         if (prog)
9120                 bpf_prog_inc(prog);
9121         err = bpf_op(dev, &xdp);
9122         if (err) {
9123                 if (prog)
9124                         bpf_prog_put(prog);
9125                 return err;
9126         }
9127
9128         if (mode != XDP_MODE_HW)
9129                 bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog);
9130
9131         return 0;
9132 }
9133
9134 static void dev_xdp_uninstall(struct net_device *dev)
9135 {
9136         struct bpf_xdp_link *link;
9137         struct bpf_prog *prog;
9138         enum bpf_xdp_mode mode;
9139         bpf_op_t bpf_op;
9140
9141         ASSERT_RTNL();
9142
9143         for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) {
9144                 prog = dev_xdp_prog(dev, mode);
9145                 if (!prog)
9146                         continue;
9147
9148                 bpf_op = dev_xdp_bpf_op(dev, mode);
9149                 if (!bpf_op)
9150                         continue;
9151
9152                 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9153
9154                 /* auto-detach link from net device */
9155                 link = dev_xdp_link(dev, mode);
9156                 if (link)
9157                         link->dev = NULL;
9158                 else
9159                         bpf_prog_put(prog);
9160
9161                 dev_xdp_set_link(dev, mode, NULL);
9162         }
9163 }
9164
9165 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack,
9166                           struct bpf_xdp_link *link, struct bpf_prog *new_prog,
9167                           struct bpf_prog *old_prog, u32 flags)
9168 {
9169         unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES);
9170         struct bpf_prog *cur_prog;
9171         struct net_device *upper;
9172         struct list_head *iter;
9173         enum bpf_xdp_mode mode;
9174         bpf_op_t bpf_op;
9175         int err;
9176
9177         ASSERT_RTNL();
9178
9179         /* either link or prog attachment, never both */
9180         if (link && (new_prog || old_prog))
9181                 return -EINVAL;
9182         /* link supports only XDP mode flags */
9183         if (link && (flags & ~XDP_FLAGS_MODES)) {
9184                 NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment");
9185                 return -EINVAL;
9186         }
9187         /* just one XDP mode bit should be set, zero defaults to drv/skb mode */
9188         if (num_modes > 1) {
9189                 NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set");
9190                 return -EINVAL;
9191         }
9192         /* avoid ambiguity if offload + drv/skb mode progs are both loaded */
9193         if (!num_modes && dev_xdp_prog_count(dev) > 1) {
9194                 NL_SET_ERR_MSG(extack,
9195                                "More than one program loaded, unset mode is ambiguous");
9196                 return -EINVAL;
9197         }
9198         /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
9199         if (old_prog && !(flags & XDP_FLAGS_REPLACE)) {
9200                 NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified");
9201                 return -EINVAL;
9202         }
9203
9204         mode = dev_xdp_mode(dev, flags);
9205         /* can't replace attached link */
9206         if (dev_xdp_link(dev, mode)) {
9207                 NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link");
9208                 return -EBUSY;
9209         }
9210
9211         /* don't allow if an upper device already has a program */
9212         netdev_for_each_upper_dev_rcu(dev, upper, iter) {
9213                 if (dev_xdp_prog_count(upper) > 0) {
9214                         NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program");
9215                         return -EEXIST;
9216                 }
9217         }
9218
9219         cur_prog = dev_xdp_prog(dev, mode);
9220         /* can't replace attached prog with link */
9221         if (link && cur_prog) {
9222                 NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link");
9223                 return -EBUSY;
9224         }
9225         if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) {
9226                 NL_SET_ERR_MSG(extack, "Active program does not match expected");
9227                 return -EEXIST;
9228         }
9229
9230         /* put effective new program into new_prog */
9231         if (link)
9232                 new_prog = link->link.prog;
9233
9234         if (new_prog) {
9235                 bool offload = mode == XDP_MODE_HW;
9236                 enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB
9237                                                ? XDP_MODE_DRV : XDP_MODE_SKB;
9238
9239                 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
9240                         NL_SET_ERR_MSG(extack, "XDP program already attached");
9241                         return -EBUSY;
9242                 }
9243                 if (!offload && dev_xdp_prog(dev, other_mode)) {
9244                         NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
9245                         return -EEXIST;
9246                 }
9247                 if (!offload && bpf_prog_is_dev_bound(new_prog->aux)) {
9248                         NL_SET_ERR_MSG(extack, "Using device-bound program without HW_MODE flag is not supported");
9249                         return -EINVAL;
9250                 }
9251                 if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
9252                         NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
9253                         return -EINVAL;
9254                 }
9255                 if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) {
9256                         NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device");
9257                         return -EINVAL;
9258                 }
9259         }
9260
9261         /* don't call drivers if the effective program didn't change */
9262         if (new_prog != cur_prog) {
9263                 bpf_op = dev_xdp_bpf_op(dev, mode);
9264                 if (!bpf_op) {
9265                         NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode");
9266                         return -EOPNOTSUPP;
9267                 }
9268
9269                 err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog);
9270                 if (err)
9271                         return err;
9272         }
9273
9274         if (link)
9275                 dev_xdp_set_link(dev, mode, link);
9276         else
9277                 dev_xdp_set_prog(dev, mode, new_prog);
9278         if (cur_prog)
9279                 bpf_prog_put(cur_prog);
9280
9281         return 0;
9282 }
9283
9284 static int dev_xdp_attach_link(struct net_device *dev,
9285                                struct netlink_ext_ack *extack,
9286                                struct bpf_xdp_link *link)
9287 {
9288         return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags);
9289 }
9290
9291 static int dev_xdp_detach_link(struct net_device *dev,
9292                                struct netlink_ext_ack *extack,
9293                                struct bpf_xdp_link *link)
9294 {
9295         enum bpf_xdp_mode mode;
9296         bpf_op_t bpf_op;
9297
9298         ASSERT_RTNL();
9299
9300         mode = dev_xdp_mode(dev, link->flags);
9301         if (dev_xdp_link(dev, mode) != link)
9302                 return -EINVAL;
9303
9304         bpf_op = dev_xdp_bpf_op(dev, mode);
9305         WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9306         dev_xdp_set_link(dev, mode, NULL);
9307         return 0;
9308 }
9309
9310 static void bpf_xdp_link_release(struct bpf_link *link)
9311 {
9312         struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9313
9314         rtnl_lock();
9315
9316         /* if racing with net_device's tear down, xdp_link->dev might be
9317          * already NULL, in which case link was already auto-detached
9318          */
9319         if (xdp_link->dev) {
9320                 WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link));
9321                 xdp_link->dev = NULL;
9322         }
9323
9324         rtnl_unlock();
9325 }
9326
9327 static int bpf_xdp_link_detach(struct bpf_link *link)
9328 {
9329         bpf_xdp_link_release(link);
9330         return 0;
9331 }
9332
9333 static void bpf_xdp_link_dealloc(struct bpf_link *link)
9334 {
9335         struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9336
9337         kfree(xdp_link);
9338 }
9339
9340 static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link,
9341                                      struct seq_file *seq)
9342 {
9343         struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9344         u32 ifindex = 0;
9345
9346         rtnl_lock();
9347         if (xdp_link->dev)
9348                 ifindex = xdp_link->dev->ifindex;
9349         rtnl_unlock();
9350
9351         seq_printf(seq, "ifindex:\t%u\n", ifindex);
9352 }
9353
9354 static int bpf_xdp_link_fill_link_info(const struct bpf_link *link,
9355                                        struct bpf_link_info *info)
9356 {
9357         struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9358         u32 ifindex = 0;
9359
9360         rtnl_lock();
9361         if (xdp_link->dev)
9362                 ifindex = xdp_link->dev->ifindex;
9363         rtnl_unlock();
9364
9365         info->xdp.ifindex = ifindex;
9366         return 0;
9367 }
9368
9369 static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
9370                                struct bpf_prog *old_prog)
9371 {
9372         struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9373         enum bpf_xdp_mode mode;
9374         bpf_op_t bpf_op;
9375         int err = 0;
9376
9377         rtnl_lock();
9378
9379         /* link might have been auto-released already, so fail */
9380         if (!xdp_link->dev) {
9381                 err = -ENOLINK;
9382                 goto out_unlock;
9383         }
9384
9385         if (old_prog && link->prog != old_prog) {
9386                 err = -EPERM;
9387                 goto out_unlock;
9388         }
9389         old_prog = link->prog;
9390         if (old_prog->type != new_prog->type ||
9391             old_prog->expected_attach_type != new_prog->expected_attach_type) {
9392                 err = -EINVAL;
9393                 goto out_unlock;
9394         }
9395
9396         if (old_prog == new_prog) {
9397                 /* no-op, don't disturb drivers */
9398                 bpf_prog_put(new_prog);
9399                 goto out_unlock;
9400         }
9401
9402         mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);
9403         bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);
9404         err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,
9405                               xdp_link->flags, new_prog);
9406         if (err)
9407                 goto out_unlock;
9408
9409         old_prog = xchg(&link->prog, new_prog);
9410         bpf_prog_put(old_prog);
9411
9412 out_unlock:
9413         rtnl_unlock();
9414         return err;
9415 }
9416
9417 static const struct bpf_link_ops bpf_xdp_link_lops = {
9418         .release = bpf_xdp_link_release,
9419         .dealloc = bpf_xdp_link_dealloc,
9420         .detach = bpf_xdp_link_detach,
9421         .show_fdinfo = bpf_xdp_link_show_fdinfo,
9422         .fill_link_info = bpf_xdp_link_fill_link_info,
9423         .update_prog = bpf_xdp_link_update,
9424 };
9425
9426 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
9427 {
9428         struct net *net = current->nsproxy->net_ns;
9429         struct bpf_link_primer link_primer;
9430         struct bpf_xdp_link *link;
9431         struct net_device *dev;
9432         int err, fd;
9433
9434         rtnl_lock();
9435         dev = dev_get_by_index(net, attr->link_create.target_ifindex);
9436         if (!dev) {
9437                 rtnl_unlock();
9438                 return -EINVAL;
9439         }
9440
9441         link = kzalloc(sizeof(*link), GFP_USER);
9442         if (!link) {
9443                 err = -ENOMEM;
9444                 goto unlock;
9445         }
9446
9447         bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog);
9448         link->dev = dev;
9449         link->flags = attr->link_create.flags;
9450
9451         err = bpf_link_prime(&link->link, &link_primer);
9452         if (err) {
9453                 kfree(link);
9454                 goto unlock;
9455         }
9456
9457         err = dev_xdp_attach_link(dev, NULL, link);
9458         rtnl_unlock();
9459
9460         if (err) {
9461                 link->dev = NULL;
9462                 bpf_link_cleanup(&link_primer);
9463                 goto out_put_dev;
9464         }
9465
9466         fd = bpf_link_settle(&link_primer);
9467         /* link itself doesn't hold dev's refcnt to not complicate shutdown */
9468         dev_put(dev);
9469         return fd;
9470
9471 unlock:
9472         rtnl_unlock();
9473
9474 out_put_dev:
9475         dev_put(dev);
9476         return err;
9477 }
9478
9479 /**
9480  *      dev_change_xdp_fd - set or clear a bpf program for a device rx path
9481  *      @dev: device
9482  *      @extack: netlink extended ack
9483  *      @fd: new program fd or negative value to clear
9484  *      @expected_fd: old program fd that userspace expects to replace or clear
9485  *      @flags: xdp-related flags
9486  *
9487  *      Set or clear a bpf program for a device
9488  */
9489 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
9490                       int fd, int expected_fd, u32 flags)
9491 {
9492         enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags);
9493         struct bpf_prog *new_prog = NULL, *old_prog = NULL;
9494         int err;
9495
9496         ASSERT_RTNL();
9497
9498         if (fd >= 0) {
9499                 new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
9500                                                  mode != XDP_MODE_SKB);
9501                 if (IS_ERR(new_prog))
9502                         return PTR_ERR(new_prog);
9503         }
9504
9505         if (expected_fd >= 0) {
9506                 old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP,
9507                                                  mode != XDP_MODE_SKB);
9508                 if (IS_ERR(old_prog)) {
9509                         err = PTR_ERR(old_prog);
9510                         old_prog = NULL;
9511                         goto err_out;
9512                 }
9513         }
9514
9515         err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags);
9516
9517 err_out:
9518         if (err && new_prog)
9519                 bpf_prog_put(new_prog);
9520         if (old_prog)
9521                 bpf_prog_put(old_prog);
9522         return err;
9523 }
9524
9525 /**
9526  *      dev_new_index   -       allocate an ifindex
9527  *      @net: the applicable net namespace
9528  *
9529  *      Returns a suitable unique value for a new device interface
9530  *      number.  The caller must hold the rtnl semaphore or the
9531  *      dev_base_lock to be sure it remains unique.
9532  */
9533 static int dev_new_index(struct net *net)
9534 {
9535         int ifindex = net->ifindex;
9536
9537         for (;;) {
9538                 if (++ifindex <= 0)
9539                         ifindex = 1;
9540                 if (!__dev_get_by_index(net, ifindex))
9541                         return net->ifindex = ifindex;
9542         }
9543 }
9544
9545 /* Delayed registration/unregisteration */
9546 LIST_HEAD(net_todo_list);
9547 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
9548
9549 static void net_set_todo(struct net_device *dev)
9550 {
9551         list_add_tail(&dev->todo_list, &net_todo_list);
9552         atomic_inc(&dev_net(dev)->dev_unreg_count);
9553 }
9554
9555 static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
9556         struct net_device *upper, netdev_features_t features)
9557 {
9558         netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9559         netdev_features_t feature;
9560         int feature_bit;
9561
9562         for_each_netdev_feature(upper_disables, feature_bit) {
9563                 feature = __NETIF_F_BIT(feature_bit);
9564                 if (!(upper->wanted_features & feature)
9565                     && (features & feature)) {
9566                         netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
9567                                    &feature, upper->name);
9568                         features &= ~feature;
9569                 }
9570         }
9571
9572         return features;
9573 }
9574
9575 static void netdev_sync_lower_features(struct net_device *upper,
9576         struct net_device *lower, netdev_features_t features)
9577 {
9578         netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9579         netdev_features_t feature;
9580         int feature_bit;
9581
9582         for_each_netdev_feature(upper_disables, feature_bit) {
9583                 feature = __NETIF_F_BIT(feature_bit);
9584                 if (!(features & feature) && (lower->features & feature)) {
9585                         netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
9586                                    &feature, lower->name);
9587                         lower->wanted_features &= ~feature;
9588                         __netdev_update_features(lower);
9589
9590                         if (unlikely(lower->features & feature))
9591                                 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
9592                                             &feature, lower->name);
9593                         else
9594                                 netdev_features_change(lower);
9595                 }
9596         }
9597 }
9598
9599 static netdev_features_t netdev_fix_features(struct net_device *dev,
9600         netdev_features_t features)
9601 {
9602         /* Fix illegal checksum combinations */
9603         if ((features & NETIF_F_HW_CSUM) &&
9604             (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
9605                 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
9606                 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
9607         }
9608
9609         /* TSO requires that SG is present as well. */
9610         if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
9611                 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
9612                 features &= ~NETIF_F_ALL_TSO;
9613         }
9614
9615         if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
9616                                         !(features & NETIF_F_IP_CSUM)) {
9617                 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
9618                 features &= ~NETIF_F_TSO;
9619                 features &= ~NETIF_F_TSO_ECN;
9620         }
9621
9622         if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
9623                                          !(features & NETIF_F_IPV6_CSUM)) {
9624                 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
9625                 features &= ~NETIF_F_TSO6;
9626         }
9627
9628         /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
9629         if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
9630                 features &= ~NETIF_F_TSO_MANGLEID;
9631
9632         /* TSO ECN requires that TSO is present as well. */
9633         if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
9634                 features &= ~NETIF_F_TSO_ECN;
9635
9636         /* Software GSO depends on SG. */
9637         if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
9638                 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
9639                 features &= ~NETIF_F_GSO;
9640         }
9641
9642         /* GSO partial features require GSO partial be set */
9643         if ((features & dev->gso_partial_features) &&
9644             !(features & NETIF_F_GSO_PARTIAL)) {
9645                 netdev_dbg(dev,
9646                            "Dropping partially supported GSO features since no GSO partial.\n");
9647                 features &= ~dev->gso_partial_features;
9648         }
9649
9650         if (!(features & NETIF_F_RXCSUM)) {
9651                 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
9652                  * successfully merged by hardware must also have the
9653                  * checksum verified by hardware.  If the user does not
9654                  * want to enable RXCSUM, logically, we should disable GRO_HW.
9655                  */
9656                 if (features & NETIF_F_GRO_HW) {
9657                         netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
9658                         features &= ~NETIF_F_GRO_HW;
9659                 }
9660         }
9661
9662         /* LRO/HW-GRO features cannot be combined with RX-FCS */
9663         if (features & NETIF_F_RXFCS) {
9664                 if (features & NETIF_F_LRO) {
9665                         netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
9666                         features &= ~NETIF_F_LRO;
9667                 }
9668
9669                 if (features & NETIF_F_GRO_HW) {
9670                         netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
9671                         features &= ~NETIF_F_GRO_HW;
9672                 }
9673         }
9674
9675         if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) {
9676                 netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n");
9677                 features &= ~NETIF_F_LRO;
9678         }
9679
9680         if (features & NETIF_F_HW_TLS_TX) {
9681                 bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
9682                         (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
9683                 bool hw_csum = features & NETIF_F_HW_CSUM;
9684
9685                 if (!ip_csum && !hw_csum) {
9686                         netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
9687                         features &= ~NETIF_F_HW_TLS_TX;
9688                 }
9689         }
9690
9691         if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
9692                 netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
9693                 features &= ~NETIF_F_HW_TLS_RX;
9694         }
9695
9696         return features;
9697 }
9698
9699 int __netdev_update_features(struct net_device *dev)
9700 {
9701         struct net_device *upper, *lower;
9702         netdev_features_t features;
9703         struct list_head *iter;
9704         int err = -1;
9705
9706         ASSERT_RTNL();
9707
9708         features = netdev_get_wanted_features(dev);
9709
9710         if (dev->netdev_ops->ndo_fix_features)
9711                 features = dev->netdev_ops->ndo_fix_features(dev, features);
9712
9713         /* driver might be less strict about feature dependencies */
9714         features = netdev_fix_features(dev, features);
9715
9716         /* some features can't be enabled if they're off on an upper device */
9717         netdev_for_each_upper_dev_rcu(dev, upper, iter)
9718                 features = netdev_sync_upper_features(dev, upper, features);
9719
9720         if (dev->features == features)
9721                 goto sync_lower;
9722
9723         netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
9724                 &dev->features, &features);
9725
9726         if (dev->netdev_ops->ndo_set_features)
9727                 err = dev->netdev_ops->ndo_set_features(dev, features);
9728         else
9729                 err = 0;
9730
9731         if (unlikely(err < 0)) {
9732                 netdev_err(dev,
9733                         "set_features() failed (%d); wanted %pNF, left %pNF\n",
9734                         err, &features, &dev->features);
9735                 /* return non-0 since some features might have changed and
9736                  * it's better to fire a spurious notification than miss it
9737                  */
9738                 return -1;
9739         }
9740
9741 sync_lower:
9742         /* some features must be disabled on lower devices when disabled
9743          * on an upper device (think: bonding master or bridge)
9744          */
9745         netdev_for_each_lower_dev(dev, lower, iter)
9746                 netdev_sync_lower_features(dev, lower, features);
9747
9748         if (!err) {
9749                 netdev_features_t diff = features ^ dev->features;
9750
9751                 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
9752                         /* udp_tunnel_{get,drop}_rx_info both need
9753                          * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
9754                          * device, or they won't do anything.
9755                          * Thus we need to update dev->features
9756                          * *before* calling udp_tunnel_get_rx_info,
9757                          * but *after* calling udp_tunnel_drop_rx_info.
9758                          */
9759                         if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
9760                                 dev->features = features;
9761                                 udp_tunnel_get_rx_info(dev);
9762                         } else {
9763                                 udp_tunnel_drop_rx_info(dev);
9764                         }
9765                 }
9766
9767                 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
9768                         if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
9769                                 dev->features = features;
9770                                 err |= vlan_get_rx_ctag_filter_info(dev);
9771                         } else {
9772                                 vlan_drop_rx_ctag_filter_info(dev);
9773                         }
9774                 }
9775
9776                 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
9777                         if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
9778                                 dev->features = features;
9779                                 err |= vlan_get_rx_stag_filter_info(dev);
9780                         } else {
9781                                 vlan_drop_rx_stag_filter_info(dev);
9782                         }
9783                 }
9784
9785                 dev->features = features;
9786         }
9787
9788         return err < 0 ? 0 : 1;
9789 }
9790
9791 /**
9792  *      netdev_update_features - recalculate device features
9793  *      @dev: the device to check
9794  *
9795  *      Recalculate dev->features set and send notifications if it
9796  *      has changed. Should be called after driver or hardware dependent
9797  *      conditions might have changed that influence the features.
9798  */
9799 void netdev_update_features(struct net_device *dev)
9800 {
9801         if (__netdev_update_features(dev))
9802                 netdev_features_change(dev);
9803 }
9804 EXPORT_SYMBOL(netdev_update_features);
9805
9806 /**
9807  *      netdev_change_features - recalculate device features
9808  *      @dev: the device to check
9809  *
9810  *      Recalculate dev->features set and send notifications even
9811  *      if they have not changed. Should be called instead of
9812  *      netdev_update_features() if also dev->vlan_features might
9813  *      have changed to allow the changes to be propagated to stacked
9814  *      VLAN devices.
9815  */
9816 void netdev_change_features(struct net_device *dev)
9817 {
9818         __netdev_update_features(dev);
9819         netdev_features_change(dev);
9820 }
9821 EXPORT_SYMBOL(netdev_change_features);
9822
9823 /**
9824  *      netif_stacked_transfer_operstate -      transfer operstate
9825  *      @rootdev: the root or lower level device to transfer state from
9826  *      @dev: the device to transfer operstate to
9827  *
9828  *      Transfer operational state from root to device. This is normally
9829  *      called when a stacking relationship exists between the root
9830  *      device and the device(a leaf device).
9831  */
9832 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
9833                                         struct net_device *dev)
9834 {
9835         if (rootdev->operstate == IF_OPER_DORMANT)
9836                 netif_dormant_on(dev);
9837         else
9838                 netif_dormant_off(dev);
9839
9840         if (rootdev->operstate == IF_OPER_TESTING)
9841                 netif_testing_on(dev);
9842         else
9843                 netif_testing_off(dev);
9844
9845         if (netif_carrier_ok(rootdev))
9846                 netif_carrier_on(dev);
9847         else
9848                 netif_carrier_off(dev);
9849 }
9850 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
9851
9852 static int netif_alloc_rx_queues(struct net_device *dev)
9853 {
9854         unsigned int i, count = dev->num_rx_queues;
9855         struct netdev_rx_queue *rx;
9856         size_t sz = count * sizeof(*rx);
9857         int err = 0;
9858
9859         BUG_ON(count < 1);
9860
9861         rx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
9862         if (!rx)
9863                 return -ENOMEM;
9864
9865         dev->_rx = rx;
9866
9867         for (i = 0; i < count; i++) {
9868                 rx[i].dev = dev;
9869
9870                 /* XDP RX-queue setup */
9871                 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0);
9872                 if (err < 0)
9873                         goto err_rxq_info;
9874         }
9875         return 0;
9876
9877 err_rxq_info:
9878         /* Rollback successful reg's and free other resources */
9879         while (i--)
9880                 xdp_rxq_info_unreg(&rx[i].xdp_rxq);
9881         kvfree(dev->_rx);
9882         dev->_rx = NULL;
9883         return err;
9884 }
9885
9886 static void netif_free_rx_queues(struct net_device *dev)
9887 {
9888         unsigned int i, count = dev->num_rx_queues;
9889
9890         /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
9891         if (!dev->_rx)
9892                 return;
9893
9894         for (i = 0; i < count; i++)
9895                 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
9896
9897         kvfree(dev->_rx);
9898 }
9899
9900 static void netdev_init_one_queue(struct net_device *dev,
9901                                   struct netdev_queue *queue, void *_unused)
9902 {
9903         /* Initialize queue lock */
9904         spin_lock_init(&queue->_xmit_lock);
9905         netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
9906         queue->xmit_lock_owner = -1;
9907         netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
9908         queue->dev = dev;
9909 #ifdef CONFIG_BQL
9910         dql_init(&queue->dql, HZ);
9911 #endif
9912 }
9913
9914 static void netif_free_tx_queues(struct net_device *dev)
9915 {
9916         kvfree(dev->_tx);
9917 }
9918
9919 static int netif_alloc_netdev_queues(struct net_device *dev)
9920 {
9921         unsigned int count = dev->num_tx_queues;
9922         struct netdev_queue *tx;
9923         size_t sz = count * sizeof(*tx);
9924
9925         if (count < 1 || count > 0xffff)
9926                 return -EINVAL;
9927
9928         tx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
9929         if (!tx)
9930                 return -ENOMEM;
9931
9932         dev->_tx = tx;
9933
9934         netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
9935         spin_lock_init(&dev->tx_global_lock);
9936
9937         return 0;
9938 }
9939
9940 void netif_tx_stop_all_queues(struct net_device *dev)
9941 {
9942         unsigned int i;
9943
9944         for (i = 0; i < dev->num_tx_queues; i++) {
9945                 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
9946
9947                 netif_tx_stop_queue(txq);
9948         }
9949 }
9950 EXPORT_SYMBOL(netif_tx_stop_all_queues);
9951
9952 /**
9953  * register_netdevice() - register a network device
9954  * @dev: device to register
9955  *
9956  * Take a prepared network device structure and make it externally accessible.
9957  * A %NETDEV_REGISTER message is sent to the netdev notifier chain.
9958  * Callers must hold the rtnl lock - you may want register_netdev()
9959  * instead of this.
9960  */
9961 int register_netdevice(struct net_device *dev)
9962 {
9963         int ret;
9964         struct net *net = dev_net(dev);
9965
9966         BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
9967                      NETDEV_FEATURE_COUNT);
9968         BUG_ON(dev_boot_phase);
9969         ASSERT_RTNL();
9970
9971         might_sleep();
9972
9973         /* When net_device's are persistent, this will be fatal. */
9974         BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
9975         BUG_ON(!net);
9976
9977         ret = ethtool_check_ops(dev->ethtool_ops);
9978         if (ret)
9979                 return ret;
9980
9981         spin_lock_init(&dev->addr_list_lock);
9982         netdev_set_addr_lockdep_class(dev);
9983
9984         ret = dev_get_valid_name(net, dev, dev->name);
9985         if (ret < 0)
9986                 goto out;
9987
9988         ret = -ENOMEM;
9989         dev->name_node = netdev_name_node_head_alloc(dev);
9990         if (!dev->name_node)
9991                 goto out;
9992
9993         /* Init, if this function is available */
9994         if (dev->netdev_ops->ndo_init) {
9995                 ret = dev->netdev_ops->ndo_init(dev);
9996                 if (ret) {
9997                         if (ret > 0)
9998                                 ret = -EIO;
9999                         goto err_free_name;
10000                 }
10001         }
10002
10003         if (((dev->hw_features | dev->features) &
10004              NETIF_F_HW_VLAN_CTAG_FILTER) &&
10005             (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
10006              !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
10007                 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
10008                 ret = -EINVAL;
10009                 goto err_uninit;
10010         }
10011
10012         ret = -EBUSY;
10013         if (!dev->ifindex)
10014                 dev->ifindex = dev_new_index(net);
10015         else if (__dev_get_by_index(net, dev->ifindex))
10016                 goto err_uninit;
10017
10018         /* Transfer changeable features to wanted_features and enable
10019          * software offloads (GSO and GRO).
10020          */
10021         dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF);
10022         dev->features |= NETIF_F_SOFT_FEATURES;
10023
10024         if (dev->udp_tunnel_nic_info) {
10025                 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10026                 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10027         }
10028
10029         dev->wanted_features = dev->features & dev->hw_features;
10030
10031         if (!(dev->flags & IFF_LOOPBACK))
10032                 dev->hw_features |= NETIF_F_NOCACHE_COPY;
10033
10034         /* If IPv4 TCP segmentation offload is supported we should also
10035          * allow the device to enable segmenting the frame with the option
10036          * of ignoring a static IP ID value.  This doesn't enable the
10037          * feature itself but allows the user to enable it later.
10038          */
10039         if (dev->hw_features & NETIF_F_TSO)
10040                 dev->hw_features |= NETIF_F_TSO_MANGLEID;
10041         if (dev->vlan_features & NETIF_F_TSO)
10042                 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
10043         if (dev->mpls_features & NETIF_F_TSO)
10044                 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
10045         if (dev->hw_enc_features & NETIF_F_TSO)
10046                 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
10047
10048         /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
10049          */
10050         dev->vlan_features |= NETIF_F_HIGHDMA;
10051
10052         /* Make NETIF_F_SG inheritable to tunnel devices.
10053          */
10054         dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
10055
10056         /* Make NETIF_F_SG inheritable to MPLS.
10057          */
10058         dev->mpls_features |= NETIF_F_SG;
10059
10060         ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
10061         ret = notifier_to_errno(ret);
10062         if (ret)
10063                 goto err_uninit;
10064
10065         ret = netdev_register_kobject(dev);
10066         write_lock(&dev_base_lock);
10067         dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED;
10068         write_unlock(&dev_base_lock);
10069         if (ret)
10070                 goto err_uninit;
10071
10072         __netdev_update_features(dev);
10073
10074         /*
10075          *      Default initial state at registry is that the
10076          *      device is present.
10077          */
10078
10079         set_bit(__LINK_STATE_PRESENT, &dev->state);
10080
10081         linkwatch_init_dev(dev);
10082
10083         dev_init_scheduler(dev);
10084
10085         netdev_hold(dev, &dev->dev_registered_tracker, GFP_KERNEL);
10086         list_netdevice(dev);
10087
10088         add_device_randomness(dev->dev_addr, dev->addr_len);
10089
10090         /* If the device has permanent device address, driver should
10091          * set dev_addr and also addr_assign_type should be set to
10092          * NET_ADDR_PERM (default value).
10093          */
10094         if (dev->addr_assign_type == NET_ADDR_PERM)
10095                 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10096
10097         /* Notify protocols, that a new device appeared. */
10098         ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
10099         ret = notifier_to_errno(ret);
10100         if (ret) {
10101                 /* Expect explicit free_netdev() on failure */
10102                 dev->needs_free_netdev = false;
10103                 unregister_netdevice_queue(dev, NULL);
10104                 goto out;
10105         }
10106         /*
10107          *      Prevent userspace races by waiting until the network
10108          *      device is fully setup before sending notifications.
10109          */
10110         if (!dev->rtnl_link_ops ||
10111             dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
10112                 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
10113
10114 out:
10115         return ret;
10116
10117 err_uninit:
10118         if (dev->netdev_ops->ndo_uninit)
10119                 dev->netdev_ops->ndo_uninit(dev);
10120         if (dev->priv_destructor)
10121                 dev->priv_destructor(dev);
10122 err_free_name:
10123         netdev_name_node_free(dev->name_node);
10124         goto out;
10125 }
10126 EXPORT_SYMBOL(register_netdevice);
10127
10128 /**
10129  *      init_dummy_netdev       - init a dummy network device for NAPI
10130  *      @dev: device to init
10131  *
10132  *      This takes a network device structure and initialize the minimum
10133  *      amount of fields so it can be used to schedule NAPI polls without
10134  *      registering a full blown interface. This is to be used by drivers
10135  *      that need to tie several hardware interfaces to a single NAPI
10136  *      poll scheduler due to HW limitations.
10137  */
10138 int init_dummy_netdev(struct net_device *dev)
10139 {
10140         /* Clear everything. Note we don't initialize spinlocks
10141          * are they aren't supposed to be taken by any of the
10142          * NAPI code and this dummy netdev is supposed to be
10143          * only ever used for NAPI polls
10144          */
10145         memset(dev, 0, sizeof(struct net_device));
10146
10147         /* make sure we BUG if trying to hit standard
10148          * register/unregister code path
10149          */
10150         dev->reg_state = NETREG_DUMMY;
10151
10152         /* NAPI wants this */
10153         INIT_LIST_HEAD(&dev->napi_list);
10154
10155         /* a dummy interface is started by default */
10156         set_bit(__LINK_STATE_PRESENT, &dev->state);
10157         set_bit(__LINK_STATE_START, &dev->state);
10158
10159         /* napi_busy_loop stats accounting wants this */
10160         dev_net_set(dev, &init_net);
10161
10162         /* Note : We dont allocate pcpu_refcnt for dummy devices,
10163          * because users of this 'device' dont need to change
10164          * its refcount.
10165          */
10166
10167         return 0;
10168 }
10169 EXPORT_SYMBOL_GPL(init_dummy_netdev);
10170
10171
10172 /**
10173  *      register_netdev - register a network device
10174  *      @dev: device to register
10175  *
10176  *      Take a completed network device structure and add it to the kernel
10177  *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
10178  *      chain. 0 is returned on success. A negative errno code is returned
10179  *      on a failure to set up the device, or if the name is a duplicate.
10180  *
10181  *      This is a wrapper around register_netdevice that takes the rtnl semaphore
10182  *      and expands the device name if you passed a format string to
10183  *      alloc_netdev.
10184  */
10185 int register_netdev(struct net_device *dev)
10186 {
10187         int err;
10188
10189         if (rtnl_lock_killable())
10190                 return -EINTR;
10191         err = register_netdevice(dev);
10192         rtnl_unlock();
10193         return err;
10194 }
10195 EXPORT_SYMBOL(register_netdev);
10196
10197 int netdev_refcnt_read(const struct net_device *dev)
10198 {
10199 #ifdef CONFIG_PCPU_DEV_REFCNT
10200         int i, refcnt = 0;
10201
10202         for_each_possible_cpu(i)
10203                 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
10204         return refcnt;
10205 #else
10206         return refcount_read(&dev->dev_refcnt);
10207 #endif
10208 }
10209 EXPORT_SYMBOL(netdev_refcnt_read);
10210
10211 int netdev_unregister_timeout_secs __read_mostly = 10;
10212
10213 #define WAIT_REFS_MIN_MSECS 1
10214 #define WAIT_REFS_MAX_MSECS 250
10215 /**
10216  * netdev_wait_allrefs_any - wait until all references are gone.
10217  * @list: list of net_devices to wait on
10218  *
10219  * This is called when unregistering network devices.
10220  *
10221  * Any protocol or device that holds a reference should register
10222  * for netdevice notification, and cleanup and put back the
10223  * reference if they receive an UNREGISTER event.
10224  * We can get stuck here if buggy protocols don't correctly
10225  * call dev_put.
10226  */
10227 static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
10228 {
10229         unsigned long rebroadcast_time, warning_time;
10230         struct net_device *dev;
10231         int wait = 0;
10232
10233         rebroadcast_time = warning_time = jiffies;
10234
10235         list_for_each_entry(dev, list, todo_list)
10236                 if (netdev_refcnt_read(dev) == 1)
10237                         return dev;
10238
10239         while (true) {
10240                 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
10241                         rtnl_lock();
10242
10243                         /* Rebroadcast unregister notification */
10244                         list_for_each_entry(dev, list, todo_list)
10245                                 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10246
10247                         __rtnl_unlock();
10248                         rcu_barrier();
10249                         rtnl_lock();
10250
10251                         list_for_each_entry(dev, list, todo_list)
10252                                 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
10253                                              &dev->state)) {
10254                                         /* We must not have linkwatch events
10255                                          * pending on unregister. If this
10256                                          * happens, we simply run the queue
10257                                          * unscheduled, resulting in a noop
10258                                          * for this device.
10259                                          */
10260                                         linkwatch_run_queue();
10261                                         break;
10262                                 }
10263
10264                         __rtnl_unlock();
10265
10266                         rebroadcast_time = jiffies;
10267                 }
10268
10269                 if (!wait) {
10270                         rcu_barrier();
10271                         wait = WAIT_REFS_MIN_MSECS;
10272                 } else {
10273                         msleep(wait);
10274                         wait = min(wait << 1, WAIT_REFS_MAX_MSECS);
10275                 }
10276
10277                 list_for_each_entry(dev, list, todo_list)
10278                         if (netdev_refcnt_read(dev) == 1)
10279                                 return dev;
10280
10281                 if (time_after(jiffies, warning_time +
10282                                netdev_unregister_timeout_secs * HZ)) {
10283                         list_for_each_entry(dev, list, todo_list) {
10284                                 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
10285                                          dev->name, netdev_refcnt_read(dev));
10286                                 ref_tracker_dir_print(&dev->refcnt_tracker, 10);
10287                         }
10288
10289                         warning_time = jiffies;
10290                 }
10291         }
10292 }
10293
10294 /* The sequence is:
10295  *
10296  *      rtnl_lock();
10297  *      ...
10298  *      register_netdevice(x1);
10299  *      register_netdevice(x2);
10300  *      ...
10301  *      unregister_netdevice(y1);
10302  *      unregister_netdevice(y2);
10303  *      ...
10304  *      rtnl_unlock();
10305  *      free_netdev(y1);
10306  *      free_netdev(y2);
10307  *
10308  * We are invoked by rtnl_unlock().
10309  * This allows us to deal with problems:
10310  * 1) We can delete sysfs objects which invoke hotplug
10311  *    without deadlocking with linkwatch via keventd.
10312  * 2) Since we run with the RTNL semaphore not held, we can sleep
10313  *    safely in order to wait for the netdev refcnt to drop to zero.
10314  *
10315  * We must not return until all unregister events added during
10316  * the interval the lock was held have been completed.
10317  */
10318 void netdev_run_todo(void)
10319 {
10320         struct net_device *dev, *tmp;
10321         struct list_head list;
10322 #ifdef CONFIG_LOCKDEP
10323         struct list_head unlink_list;
10324
10325         list_replace_init(&net_unlink_list, &unlink_list);
10326
10327         while (!list_empty(&unlink_list)) {
10328                 struct net_device *dev = list_first_entry(&unlink_list,
10329                                                           struct net_device,
10330                                                           unlink_list);
10331                 list_del_init(&dev->unlink_list);
10332                 dev->nested_level = dev->lower_level - 1;
10333         }
10334 #endif
10335
10336         /* Snapshot list, allow later requests */
10337         list_replace_init(&net_todo_list, &list);
10338
10339         __rtnl_unlock();
10340
10341         /* Wait for rcu callbacks to finish before next phase */
10342         if (!list_empty(&list))
10343                 rcu_barrier();
10344
10345         list_for_each_entry_safe(dev, tmp, &list, todo_list) {
10346                 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
10347                         netdev_WARN(dev, "run_todo but not unregistering\n");
10348                         list_del(&dev->todo_list);
10349                         continue;
10350                 }
10351
10352                 write_lock(&dev_base_lock);
10353                 dev->reg_state = NETREG_UNREGISTERED;
10354                 write_unlock(&dev_base_lock);
10355                 linkwatch_forget_dev(dev);
10356         }
10357
10358         while (!list_empty(&list)) {
10359                 dev = netdev_wait_allrefs_any(&list);
10360                 list_del(&dev->todo_list);
10361
10362                 /* paranoia */
10363                 BUG_ON(netdev_refcnt_read(dev) != 1);
10364                 BUG_ON(!list_empty(&dev->ptype_all));
10365                 BUG_ON(!list_empty(&dev->ptype_specific));
10366                 WARN_ON(rcu_access_pointer(dev->ip_ptr));
10367                 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
10368 #if IS_ENABLED(CONFIG_DECNET)
10369                 WARN_ON(dev->dn_ptr);
10370 #endif
10371                 if (dev->priv_destructor)
10372                         dev->priv_destructor(dev);
10373                 if (dev->needs_free_netdev)
10374                         free_netdev(dev);
10375
10376                 if (atomic_dec_and_test(&dev_net(dev)->dev_unreg_count))
10377                         wake_up(&netdev_unregistering_wq);
10378
10379                 /* Free network device */
10380                 kobject_put(&dev->dev.kobj);
10381         }
10382 }
10383
10384 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
10385  * all the same fields in the same order as net_device_stats, with only
10386  * the type differing, but rtnl_link_stats64 may have additional fields
10387  * at the end for newer counters.
10388  */
10389 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
10390                              const struct net_device_stats *netdev_stats)
10391 {
10392 #if BITS_PER_LONG == 64
10393         BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
10394         memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
10395         /* zero out counters that only exist in rtnl_link_stats64 */
10396         memset((char *)stats64 + sizeof(*netdev_stats), 0,
10397                sizeof(*stats64) - sizeof(*netdev_stats));
10398 #else
10399         size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
10400         const unsigned long *src = (const unsigned long *)netdev_stats;
10401         u64 *dst = (u64 *)stats64;
10402
10403         BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
10404         for (i = 0; i < n; i++)
10405                 dst[i] = src[i];
10406         /* zero out counters that only exist in rtnl_link_stats64 */
10407         memset((char *)stats64 + n * sizeof(u64), 0,
10408                sizeof(*stats64) - n * sizeof(u64));
10409 #endif
10410 }
10411 EXPORT_SYMBOL(netdev_stats_to_stats64);
10412
10413 struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev)
10414 {
10415         struct net_device_core_stats __percpu *p;
10416
10417         p = alloc_percpu_gfp(struct net_device_core_stats,
10418                              GFP_ATOMIC | __GFP_NOWARN);
10419
10420         if (p && cmpxchg(&dev->core_stats, NULL, p))
10421                 free_percpu(p);
10422
10423         /* This READ_ONCE() pairs with the cmpxchg() above */
10424         return READ_ONCE(dev->core_stats);
10425 }
10426 EXPORT_SYMBOL(netdev_core_stats_alloc);
10427
10428 /**
10429  *      dev_get_stats   - get network device statistics
10430  *      @dev: device to get statistics from
10431  *      @storage: place to store stats
10432  *
10433  *      Get network statistics from device. Return @storage.
10434  *      The device driver may provide its own method by setting
10435  *      dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
10436  *      otherwise the internal statistics structure is used.
10437  */
10438 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
10439                                         struct rtnl_link_stats64 *storage)
10440 {
10441         const struct net_device_ops *ops = dev->netdev_ops;
10442         const struct net_device_core_stats __percpu *p;
10443
10444         if (ops->ndo_get_stats64) {
10445                 memset(storage, 0, sizeof(*storage));
10446                 ops->ndo_get_stats64(dev, storage);
10447         } else if (ops->ndo_get_stats) {
10448                 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
10449         } else {
10450                 netdev_stats_to_stats64(storage, &dev->stats);
10451         }
10452
10453         /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
10454         p = READ_ONCE(dev->core_stats);
10455         if (p) {
10456                 const struct net_device_core_stats *core_stats;
10457                 int i;
10458
10459                 for_each_possible_cpu(i) {
10460                         core_stats = per_cpu_ptr(p, i);
10461                         storage->rx_dropped += READ_ONCE(core_stats->rx_dropped);
10462                         storage->tx_dropped += READ_ONCE(core_stats->tx_dropped);
10463                         storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler);
10464                         storage->rx_otherhost_dropped += READ_ONCE(core_stats->rx_otherhost_dropped);
10465                 }
10466         }
10467         return storage;
10468 }
10469 EXPORT_SYMBOL(dev_get_stats);
10470
10471 /**
10472  *      dev_fetch_sw_netstats - get per-cpu network device statistics
10473  *      @s: place to store stats
10474  *      @netstats: per-cpu network stats to read from
10475  *
10476  *      Read per-cpu network statistics and populate the related fields in @s.
10477  */
10478 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
10479                            const struct pcpu_sw_netstats __percpu *netstats)
10480 {
10481         int cpu;
10482
10483         for_each_possible_cpu(cpu) {
10484                 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
10485                 const struct pcpu_sw_netstats *stats;
10486                 unsigned int start;
10487
10488                 stats = per_cpu_ptr(netstats, cpu);
10489                 do {
10490                         start = u64_stats_fetch_begin_irq(&stats->syncp);
10491                         rx_packets = u64_stats_read(&stats->rx_packets);
10492                         rx_bytes   = u64_stats_read(&stats->rx_bytes);
10493                         tx_packets = u64_stats_read(&stats->tx_packets);
10494                         tx_bytes   = u64_stats_read(&stats->tx_bytes);
10495                 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
10496
10497                 s->rx_packets += rx_packets;
10498                 s->rx_bytes   += rx_bytes;
10499                 s->tx_packets += tx_packets;
10500                 s->tx_bytes   += tx_bytes;
10501         }
10502 }
10503 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);
10504
10505 /**
10506  *      dev_get_tstats64 - ndo_get_stats64 implementation
10507  *      @dev: device to get statistics from
10508  *      @s: place to store stats
10509  *
10510  *      Populate @s from dev->stats and dev->tstats. Can be used as
10511  *      ndo_get_stats64() callback.
10512  */
10513 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s)
10514 {
10515         netdev_stats_to_stats64(s, &dev->stats);
10516         dev_fetch_sw_netstats(s, dev->tstats);
10517 }
10518 EXPORT_SYMBOL_GPL(dev_get_tstats64);
10519
10520 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
10521 {
10522         struct netdev_queue *queue = dev_ingress_queue(dev);
10523
10524 #ifdef CONFIG_NET_CLS_ACT
10525         if (queue)
10526                 return queue;
10527         queue = kzalloc(sizeof(*queue), GFP_KERNEL);
10528         if (!queue)
10529                 return NULL;
10530         netdev_init_one_queue(dev, queue, NULL);
10531         RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
10532         queue->qdisc_sleeping = &noop_qdisc;
10533         rcu_assign_pointer(dev->ingress_queue, queue);
10534 #endif
10535         return queue;
10536 }
10537
10538 static const struct ethtool_ops default_ethtool_ops;
10539
10540 void netdev_set_default_ethtool_ops(struct net_device *dev,
10541                                     const struct ethtool_ops *ops)
10542 {
10543         if (dev->ethtool_ops == &default_ethtool_ops)
10544                 dev->ethtool_ops = ops;
10545 }
10546 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
10547
10548 void netdev_freemem(struct net_device *dev)
10549 {
10550         char *addr = (char *)dev - dev->padded;
10551
10552         kvfree(addr);
10553 }
10554
10555 /**
10556  * alloc_netdev_mqs - allocate network device
10557  * @sizeof_priv: size of private data to allocate space for
10558  * @name: device name format string
10559  * @name_assign_type: origin of device name
10560  * @setup: callback to initialize device
10561  * @txqs: the number of TX subqueues to allocate
10562  * @rxqs: the number of RX subqueues to allocate
10563  *
10564  * Allocates a struct net_device with private data area for driver use
10565  * and performs basic initialization.  Also allocates subqueue structs
10566  * for each queue on the device.
10567  */
10568 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
10569                 unsigned char name_assign_type,
10570                 void (*setup)(struct net_device *),
10571                 unsigned int txqs, unsigned int rxqs)
10572 {
10573         struct net_device *dev;
10574         unsigned int alloc_size;
10575         struct net_device *p;
10576
10577         BUG_ON(strlen(name) >= sizeof(dev->name));
10578
10579         if (txqs < 1) {
10580                 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
10581                 return NULL;
10582         }
10583
10584         if (rxqs < 1) {
10585                 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
10586                 return NULL;
10587         }
10588
10589         alloc_size = sizeof(struct net_device);
10590         if (sizeof_priv) {
10591                 /* ensure 32-byte alignment of private area */
10592                 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
10593                 alloc_size += sizeof_priv;
10594         }
10595         /* ensure 32-byte alignment of whole construct */
10596         alloc_size += NETDEV_ALIGN - 1;
10597
10598         p = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10599         if (!p)
10600                 return NULL;
10601
10602         dev = PTR_ALIGN(p, NETDEV_ALIGN);
10603         dev->padded = (char *)dev - (char *)p;
10604
10605         ref_tracker_dir_init(&dev->refcnt_tracker, 128);
10606 #ifdef CONFIG_PCPU_DEV_REFCNT
10607         dev->pcpu_refcnt = alloc_percpu(int);
10608         if (!dev->pcpu_refcnt)
10609                 goto free_dev;
10610         __dev_hold(dev);
10611 #else
10612         refcount_set(&dev->dev_refcnt, 1);
10613 #endif
10614
10615         if (dev_addr_init(dev))
10616                 goto free_pcpu;
10617
10618         dev_mc_init(dev);
10619         dev_uc_init(dev);
10620
10621         dev_net_set(dev, &init_net);
10622
10623         dev->gso_max_size = GSO_LEGACY_MAX_SIZE;
10624         dev->gso_max_segs = GSO_MAX_SEGS;
10625         dev->gro_max_size = GRO_LEGACY_MAX_SIZE;
10626         dev->tso_max_size = TSO_LEGACY_MAX_SIZE;
10627         dev->tso_max_segs = TSO_MAX_SEGS;
10628         dev->upper_level = 1;
10629         dev->lower_level = 1;
10630 #ifdef CONFIG_LOCKDEP
10631         dev->nested_level = 0;
10632         INIT_LIST_HEAD(&dev->unlink_list);
10633 #endif
10634
10635         INIT_LIST_HEAD(&dev->napi_list);
10636         INIT_LIST_HEAD(&dev->unreg_list);
10637         INIT_LIST_HEAD(&dev->close_list);
10638         INIT_LIST_HEAD(&dev->link_watch_list);
10639         INIT_LIST_HEAD(&dev->adj_list.upper);
10640         INIT_LIST_HEAD(&dev->adj_list.lower);
10641         INIT_LIST_HEAD(&dev->ptype_all);
10642         INIT_LIST_HEAD(&dev->ptype_specific);
10643         INIT_LIST_HEAD(&dev->net_notifier_list);
10644 #ifdef CONFIG_NET_SCHED
10645         hash_init(dev->qdisc_hash);
10646 #endif
10647         dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
10648         setup(dev);
10649
10650         if (!dev->tx_queue_len) {
10651                 dev->priv_flags |= IFF_NO_QUEUE;
10652                 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
10653         }
10654
10655         dev->num_tx_queues = txqs;
10656         dev->real_num_tx_queues = txqs;
10657         if (netif_alloc_netdev_queues(dev))
10658                 goto free_all;
10659
10660         dev->num_rx_queues = rxqs;
10661         dev->real_num_rx_queues = rxqs;
10662         if (netif_alloc_rx_queues(dev))
10663                 goto free_all;
10664
10665         strcpy(dev->name, name);
10666         dev->name_assign_type = name_assign_type;
10667         dev->group = INIT_NETDEV_GROUP;
10668         if (!dev->ethtool_ops)
10669                 dev->ethtool_ops = &default_ethtool_ops;
10670
10671         nf_hook_netdev_init(dev);
10672
10673         return dev;
10674
10675 free_all:
10676         free_netdev(dev);
10677         return NULL;
10678
10679 free_pcpu:
10680 #ifdef CONFIG_PCPU_DEV_REFCNT
10681         free_percpu(dev->pcpu_refcnt);
10682 free_dev:
10683 #endif
10684         netdev_freemem(dev);
10685         return NULL;
10686 }
10687 EXPORT_SYMBOL(alloc_netdev_mqs);
10688
10689 /**
10690  * free_netdev - free network device
10691  * @dev: device
10692  *
10693  * This function does the last stage of destroying an allocated device
10694  * interface. The reference to the device object is released. If this
10695  * is the last reference then it will be freed.Must be called in process
10696  * context.
10697  */
10698 void free_netdev(struct net_device *dev)
10699 {
10700         struct napi_struct *p, *n;
10701
10702         might_sleep();
10703
10704         /* When called immediately after register_netdevice() failed the unwind
10705          * handling may still be dismantling the device. Handle that case by
10706          * deferring the free.
10707          */
10708         if (dev->reg_state == NETREG_UNREGISTERING) {
10709                 ASSERT_RTNL();
10710                 dev->needs_free_netdev = true;
10711                 return;
10712         }
10713
10714         netif_free_tx_queues(dev);
10715         netif_free_rx_queues(dev);
10716
10717         kfree(rcu_dereference_protected(dev->ingress_queue, 1));
10718
10719         /* Flush device addresses */
10720         dev_addr_flush(dev);
10721
10722         list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
10723                 netif_napi_del(p);
10724
10725         ref_tracker_dir_exit(&dev->refcnt_tracker);
10726 #ifdef CONFIG_PCPU_DEV_REFCNT
10727         free_percpu(dev->pcpu_refcnt);
10728         dev->pcpu_refcnt = NULL;
10729 #endif
10730         free_percpu(dev->core_stats);
10731         dev->core_stats = NULL;
10732         free_percpu(dev->xdp_bulkq);
10733         dev->xdp_bulkq = NULL;
10734
10735         /*  Compatibility with error handling in drivers */
10736         if (dev->reg_state == NETREG_UNINITIALIZED) {
10737                 netdev_freemem(dev);
10738                 return;
10739         }
10740
10741         BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
10742         dev->reg_state = NETREG_RELEASED;
10743
10744         /* will free via device release */
10745         put_device(&dev->dev);
10746 }
10747 EXPORT_SYMBOL(free_netdev);
10748
10749 /**
10750  *      synchronize_net -  Synchronize with packet receive processing
10751  *
10752  *      Wait for packets currently being received to be done.
10753  *      Does not block later packets from starting.
10754  */
10755 void synchronize_net(void)
10756 {
10757         might_sleep();
10758         if (rtnl_is_locked())
10759                 synchronize_rcu_expedited();
10760         else
10761                 synchronize_rcu();
10762 }
10763 EXPORT_SYMBOL(synchronize_net);
10764
10765 /**
10766  *      unregister_netdevice_queue - remove device from the kernel
10767  *      @dev: device
10768  *      @head: list
10769  *
10770  *      This function shuts down a device interface and removes it
10771  *      from the kernel tables.
10772  *      If head not NULL, device is queued to be unregistered later.
10773  *
10774  *      Callers must hold the rtnl semaphore.  You may want
10775  *      unregister_netdev() instead of this.
10776  */
10777
10778 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
10779 {
10780         ASSERT_RTNL();
10781
10782         if (head) {
10783                 list_move_tail(&dev->unreg_list, head);
10784         } else {
10785                 LIST_HEAD(single);
10786
10787                 list_add(&dev->unreg_list, &single);
10788                 unregister_netdevice_many(&single);
10789         }
10790 }
10791 EXPORT_SYMBOL(unregister_netdevice_queue);
10792
10793 /**
10794  *      unregister_netdevice_many - unregister many devices
10795  *      @head: list of devices
10796  *
10797  *  Note: As most callers use a stack allocated list_head,
10798  *  we force a list_del() to make sure stack wont be corrupted later.
10799  */
10800 void unregister_netdevice_many(struct list_head *head)
10801 {
10802         struct net_device *dev, *tmp;
10803         LIST_HEAD(close_head);
10804
10805         BUG_ON(dev_boot_phase);
10806         ASSERT_RTNL();
10807
10808         if (list_empty(head))
10809                 return;
10810
10811         list_for_each_entry_safe(dev, tmp, head, unreg_list) {
10812                 /* Some devices call without registering
10813                  * for initialization unwind. Remove those
10814                  * devices and proceed with the remaining.
10815                  */
10816                 if (dev->reg_state == NETREG_UNINITIALIZED) {
10817                         pr_debug("unregister_netdevice: device %s/%p never was registered\n",
10818                                  dev->name, dev);
10819
10820                         WARN_ON(1);
10821                         list_del(&dev->unreg_list);
10822                         continue;
10823                 }
10824                 dev->dismantle = true;
10825                 BUG_ON(dev->reg_state != NETREG_REGISTERED);
10826         }
10827
10828         /* If device is running, close it first. */
10829         list_for_each_entry(dev, head, unreg_list)
10830                 list_add_tail(&dev->close_list, &close_head);
10831         dev_close_many(&close_head, true);
10832
10833         list_for_each_entry(dev, head, unreg_list) {
10834                 /* And unlink it from device chain. */
10835                 write_lock(&dev_base_lock);
10836                 unlist_netdevice(dev, false);
10837                 dev->reg_state = NETREG_UNREGISTERING;
10838                 write_unlock(&dev_base_lock);
10839         }
10840         flush_all_backlogs();
10841
10842         synchronize_net();
10843
10844         list_for_each_entry(dev, head, unreg_list) {
10845                 struct sk_buff *skb = NULL;
10846
10847                 /* Shutdown queueing discipline. */
10848                 dev_shutdown(dev);
10849
10850                 dev_xdp_uninstall(dev);
10851
10852                 netdev_offload_xstats_disable_all(dev);
10853
10854                 /* Notify protocols, that we are about to destroy
10855                  * this device. They should clean all the things.
10856                  */
10857                 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10858
10859                 if (!dev->rtnl_link_ops ||
10860                     dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
10861                         skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
10862                                                      GFP_KERNEL, NULL, 0);
10863
10864                 /*
10865                  *      Flush the unicast and multicast chains
10866                  */
10867                 dev_uc_flush(dev);
10868                 dev_mc_flush(dev);
10869
10870                 netdev_name_node_alt_flush(dev);
10871                 netdev_name_node_free(dev->name_node);
10872
10873                 if (dev->netdev_ops->ndo_uninit)
10874                         dev->netdev_ops->ndo_uninit(dev);
10875
10876                 if (skb)
10877                         rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
10878
10879                 /* Notifier chain MUST detach us all upper devices. */
10880                 WARN_ON(netdev_has_any_upper_dev(dev));
10881                 WARN_ON(netdev_has_any_lower_dev(dev));
10882
10883                 /* Remove entries from kobject tree */
10884                 netdev_unregister_kobject(dev);
10885 #ifdef CONFIG_XPS
10886                 /* Remove XPS queueing entries */
10887                 netif_reset_xps_queues_gt(dev, 0);
10888 #endif
10889         }
10890
10891         synchronize_net();
10892
10893         list_for_each_entry(dev, head, unreg_list) {
10894                 netdev_put(dev, &dev->dev_registered_tracker);
10895                 net_set_todo(dev);
10896         }
10897
10898         list_del(head);
10899 }
10900 EXPORT_SYMBOL(unregister_netdevice_many);
10901
10902 /**
10903  *      unregister_netdev - remove device from the kernel
10904  *      @dev: device
10905  *
10906  *      This function shuts down a device interface and removes it
10907  *      from the kernel tables.
10908  *
10909  *      This is just a wrapper for unregister_netdevice that takes
10910  *      the rtnl semaphore.  In general you want to use this and not
10911  *      unregister_netdevice.
10912  */
10913 void unregister_netdev(struct net_device *dev)
10914 {
10915         rtnl_lock();
10916         unregister_netdevice(dev);
10917         rtnl_unlock();
10918 }
10919 EXPORT_SYMBOL(unregister_netdev);
10920
10921 /**
10922  *      __dev_change_net_namespace - move device to different nethost namespace
10923  *      @dev: device
10924  *      @net: network namespace
10925  *      @pat: If not NULL name pattern to try if the current device name
10926  *            is already taken in the destination network namespace.
10927  *      @new_ifindex: If not zero, specifies device index in the target
10928  *                    namespace.
10929  *
10930  *      This function shuts down a device interface and moves it
10931  *      to a new network namespace. On success 0 is returned, on
10932  *      a failure a netagive errno code is returned.
10933  *
10934  *      Callers must hold the rtnl semaphore.
10935  */
10936
10937 int __dev_change_net_namespace(struct net_device *dev, struct net *net,
10938                                const char *pat, int new_ifindex)
10939 {
10940         struct net *net_old = dev_net(dev);
10941         int err, new_nsid;
10942
10943         ASSERT_RTNL();
10944
10945         /* Don't allow namespace local devices to be moved. */
10946         err = -EINVAL;
10947         if (dev->features & NETIF_F_NETNS_LOCAL)
10948                 goto out;
10949
10950         /* Ensure the device has been registrered */
10951         if (dev->reg_state != NETREG_REGISTERED)
10952                 goto out;
10953
10954         /* Get out if there is nothing todo */
10955         err = 0;
10956         if (net_eq(net_old, net))
10957                 goto out;
10958
10959         /* Pick the destination device name, and ensure
10960          * we can use it in the destination network namespace.
10961          */
10962         err = -EEXIST;
10963         if (netdev_name_in_use(net, dev->name)) {
10964                 /* We get here if we can't use the current device name */
10965                 if (!pat)
10966                         goto out;
10967                 err = dev_get_valid_name(net, dev, pat);
10968                 if (err < 0)
10969                         goto out;
10970         }
10971
10972         /* Check that new_ifindex isn't used yet. */
10973         err = -EBUSY;
10974         if (new_ifindex && __dev_get_by_index(net, new_ifindex))
10975                 goto out;
10976
10977         /*
10978          * And now a mini version of register_netdevice unregister_netdevice.
10979          */
10980
10981         /* If device is running close it first. */
10982         dev_close(dev);
10983
10984         /* And unlink it from device chain */
10985         unlist_netdevice(dev, true);
10986
10987         synchronize_net();
10988
10989         /* Shutdown queueing discipline. */
10990         dev_shutdown(dev);
10991
10992         /* Notify protocols, that we are about to destroy
10993          * this device. They should clean all the things.
10994          *
10995          * Note that dev->reg_state stays at NETREG_REGISTERED.
10996          * This is wanted because this way 8021q and macvlan know
10997          * the device is just moving and can keep their slaves up.
10998          */
10999         call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
11000         rcu_barrier();
11001
11002         new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
11003         /* If there is an ifindex conflict assign a new one */
11004         if (!new_ifindex) {
11005                 if (__dev_get_by_index(net, dev->ifindex))
11006                         new_ifindex = dev_new_index(net);
11007                 else
11008                         new_ifindex = dev->ifindex;
11009         }
11010
11011         rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
11012                             new_ifindex);
11013
11014         /*
11015          *      Flush the unicast and multicast chains
11016          */
11017         dev_uc_flush(dev);
11018         dev_mc_flush(dev);
11019
11020         /* Send a netdev-removed uevent to the old namespace */
11021         kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
11022         netdev_adjacent_del_links(dev);
11023
11024         /* Move per-net netdevice notifiers that are following the netdevice */
11025         move_netdevice_notifiers_dev_net(dev, net);
11026
11027         /* Actually switch the network namespace */
11028         dev_net_set(dev, net);
11029         dev->ifindex = new_ifindex;
11030
11031         /* Send a netdev-add uevent to the new namespace */
11032         kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
11033         netdev_adjacent_add_links(dev);
11034
11035         /* Fixup kobjects */
11036         err = device_rename(&dev->dev, dev->name);
11037         WARN_ON(err);
11038
11039         /* Adapt owner in case owning user namespace of target network
11040          * namespace is different from the original one.
11041          */
11042         err = netdev_change_owner(dev, net_old, net);
11043         WARN_ON(err);
11044
11045         /* Add the device back in the hashes */
11046         list_netdevice(dev);
11047
11048         /* Notify protocols, that a new device appeared. */
11049         call_netdevice_notifiers(NETDEV_REGISTER, dev);
11050
11051         /*
11052          *      Prevent userspace races by waiting until the network
11053          *      device is fully setup before sending notifications.
11054          */
11055         rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
11056
11057         synchronize_net();
11058         err = 0;
11059 out:
11060         return err;
11061 }
11062 EXPORT_SYMBOL_GPL(__dev_change_net_namespace);
11063
11064 static int dev_cpu_dead(unsigned int oldcpu)
11065 {
11066         struct sk_buff **list_skb;
11067         struct sk_buff *skb;
11068         unsigned int cpu;
11069         struct softnet_data *sd, *oldsd, *remsd = NULL;
11070
11071         local_irq_disable();
11072         cpu = smp_processor_id();
11073         sd = &per_cpu(softnet_data, cpu);
11074         oldsd = &per_cpu(softnet_data, oldcpu);
11075
11076         /* Find end of our completion_queue. */
11077         list_skb = &sd->completion_queue;
11078         while (*list_skb)
11079                 list_skb = &(*list_skb)->next;
11080         /* Append completion queue from offline CPU. */
11081         *list_skb = oldsd->completion_queue;
11082         oldsd->completion_queue = NULL;
11083
11084         /* Append output queue from offline CPU. */
11085         if (oldsd->output_queue) {
11086                 *sd->output_queue_tailp = oldsd->output_queue;
11087                 sd->output_queue_tailp = oldsd->output_queue_tailp;
11088                 oldsd->output_queue = NULL;
11089                 oldsd->output_queue_tailp = &oldsd->output_queue;
11090         }
11091         /* Append NAPI poll list from offline CPU, with one exception :
11092          * process_backlog() must be called by cpu owning percpu backlog.
11093          * We properly handle process_queue & input_pkt_queue later.
11094          */
11095         while (!list_empty(&oldsd->poll_list)) {
11096                 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
11097                                                             struct napi_struct,
11098                                                             poll_list);
11099
11100                 list_del_init(&napi->poll_list);
11101                 if (napi->poll == process_backlog)
11102                         napi->state = 0;
11103                 else
11104                         ____napi_schedule(sd, napi);
11105         }
11106
11107         raise_softirq_irqoff(NET_TX_SOFTIRQ);
11108         local_irq_enable();
11109
11110 #ifdef CONFIG_RPS
11111         remsd = oldsd->rps_ipi_list;
11112         oldsd->rps_ipi_list = NULL;
11113 #endif
11114         /* send out pending IPI's on offline CPU */
11115         net_rps_send_ipi(remsd);
11116
11117         /* Process offline CPU's input_pkt_queue */
11118         while ((skb = __skb_dequeue(&oldsd->process_queue))) {
11119                 netif_rx(skb);
11120                 input_queue_head_incr(oldsd);
11121         }
11122         while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
11123                 netif_rx(skb);
11124                 input_queue_head_incr(oldsd);
11125         }
11126
11127         return 0;
11128 }
11129
11130 /**
11131  *      netdev_increment_features - increment feature set by one
11132  *      @all: current feature set
11133  *      @one: new feature set
11134  *      @mask: mask feature set
11135  *
11136  *      Computes a new feature set after adding a device with feature set
11137  *      @one to the master device with current feature set @all.  Will not
11138  *      enable anything that is off in @mask. Returns the new feature set.
11139  */
11140 netdev_features_t netdev_increment_features(netdev_features_t all,
11141         netdev_features_t one, netdev_features_t mask)
11142 {
11143         if (mask & NETIF_F_HW_CSUM)
11144                 mask |= NETIF_F_CSUM_MASK;
11145         mask |= NETIF_F_VLAN_CHALLENGED;
11146
11147         all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
11148         all &= one | ~NETIF_F_ALL_FOR_ALL;
11149
11150         /* If one device supports hw checksumming, set for all. */
11151         if (all & NETIF_F_HW_CSUM)
11152                 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
11153
11154         return all;
11155 }
11156 EXPORT_SYMBOL(netdev_increment_features);
11157
11158 static struct hlist_head * __net_init netdev_create_hash(void)
11159 {
11160         int i;
11161         struct hlist_head *hash;
11162
11163         hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
11164         if (hash != NULL)
11165                 for (i = 0; i < NETDEV_HASHENTRIES; i++)
11166                         INIT_HLIST_HEAD(&hash[i]);
11167
11168         return hash;
11169 }
11170
11171 /* Initialize per network namespace state */
11172 static int __net_init netdev_init(struct net *net)
11173 {
11174         BUILD_BUG_ON(GRO_HASH_BUCKETS >
11175                      8 * sizeof_field(struct napi_struct, gro_bitmask));
11176
11177         INIT_LIST_HEAD(&net->dev_base_head);
11178
11179         net->dev_name_head = netdev_create_hash();
11180         if (net->dev_name_head == NULL)
11181                 goto err_name;
11182
11183         net->dev_index_head = netdev_create_hash();
11184         if (net->dev_index_head == NULL)
11185                 goto err_idx;
11186
11187         RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
11188
11189         return 0;
11190
11191 err_idx:
11192         kfree(net->dev_name_head);
11193 err_name:
11194         return -ENOMEM;
11195 }
11196
11197 /**
11198  *      netdev_drivername - network driver for the device
11199  *      @dev: network device
11200  *
11201  *      Determine network driver for device.
11202  */
11203 const char *netdev_drivername(const struct net_device *dev)
11204 {
11205         const struct device_driver *driver;
11206         const struct device *parent;
11207         const char *empty = "";
11208
11209         parent = dev->dev.parent;
11210         if (!parent)
11211                 return empty;
11212
11213         driver = parent->driver;
11214         if (driver && driver->name)
11215                 return driver->name;
11216         return empty;
11217 }
11218
11219 static void __netdev_printk(const char *level, const struct net_device *dev,
11220                             struct va_format *vaf)
11221 {
11222         if (dev && dev->dev.parent) {
11223                 dev_printk_emit(level[1] - '0',
11224                                 dev->dev.parent,
11225                                 "%s %s %s%s: %pV",
11226                                 dev_driver_string(dev->dev.parent),
11227                                 dev_name(dev->dev.parent),
11228                                 netdev_name(dev), netdev_reg_state(dev),
11229                                 vaf);
11230         } else if (dev) {
11231                 printk("%s%s%s: %pV",
11232                        level, netdev_name(dev), netdev_reg_state(dev), vaf);
11233         } else {
11234                 printk("%s(NULL net_device): %pV", level, vaf);
11235         }
11236 }
11237
11238 void netdev_printk(const char *level, const struct net_device *dev,
11239                    const char *format, ...)
11240 {
11241         struct va_format vaf;
11242         va_list args;
11243
11244         va_start(args, format);
11245
11246         vaf.fmt = format;
11247         vaf.va = &args;
11248
11249         __netdev_printk(level, dev, &vaf);
11250
11251         va_end(args);
11252 }
11253 EXPORT_SYMBOL(netdev_printk);
11254
11255 #define define_netdev_printk_level(func, level)                 \
11256 void func(const struct net_device *dev, const char *fmt, ...)   \
11257 {                                                               \
11258         struct va_format vaf;                                   \
11259         va_list args;                                           \
11260                                                                 \
11261         va_start(args, fmt);                                    \
11262                                                                 \
11263         vaf.fmt = fmt;                                          \
11264         vaf.va = &args;                                         \
11265                                                                 \
11266         __netdev_printk(level, dev, &vaf);                      \
11267                                                                 \
11268         va_end(args);                                           \
11269 }                                                               \
11270 EXPORT_SYMBOL(func);
11271
11272 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
11273 define_netdev_printk_level(netdev_alert, KERN_ALERT);
11274 define_netdev_printk_level(netdev_crit, KERN_CRIT);
11275 define_netdev_printk_level(netdev_err, KERN_ERR);
11276 define_netdev_printk_level(netdev_warn, KERN_WARNING);
11277 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
11278 define_netdev_printk_level(netdev_info, KERN_INFO);
11279
11280 static void __net_exit netdev_exit(struct net *net)
11281 {
11282         kfree(net->dev_name_head);
11283         kfree(net->dev_index_head);
11284         if (net != &init_net)
11285                 WARN_ON_ONCE(!list_empty(&net->dev_base_head));
11286 }
11287
11288 static struct pernet_operations __net_initdata netdev_net_ops = {
11289         .init = netdev_init,
11290         .exit = netdev_exit,
11291 };
11292
11293 static void __net_exit default_device_exit_net(struct net *net)
11294 {
11295         struct net_device *dev, *aux;
11296         /*
11297          * Push all migratable network devices back to the
11298          * initial network namespace
11299          */
11300         ASSERT_RTNL();
11301         for_each_netdev_safe(net, dev, aux) {
11302                 int err;
11303                 char fb_name[IFNAMSIZ];
11304
11305                 /* Ignore unmoveable devices (i.e. loopback) */
11306                 if (dev->features & NETIF_F_NETNS_LOCAL)
11307                         continue;
11308
11309                 /* Leave virtual devices for the generic cleanup */
11310                 if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
11311                         continue;
11312
11313                 /* Push remaining network devices to init_net */
11314                 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
11315                 if (netdev_name_in_use(&init_net, fb_name))
11316                         snprintf(fb_name, IFNAMSIZ, "dev%%d");
11317                 err = dev_change_net_namespace(dev, &init_net, fb_name);
11318                 if (err) {
11319                         pr_emerg("%s: failed to move %s to init_net: %d\n",
11320                                  __func__, dev->name, err);
11321                         BUG();
11322                 }
11323         }
11324 }
11325
11326 static void __net_exit default_device_exit_batch(struct list_head *net_list)
11327 {
11328         /* At exit all network devices most be removed from a network
11329          * namespace.  Do this in the reverse order of registration.
11330          * Do this across as many network namespaces as possible to
11331          * improve batching efficiency.
11332          */
11333         struct net_device *dev;
11334         struct net *net;
11335         LIST_HEAD(dev_kill_list);
11336
11337         rtnl_lock();
11338         list_for_each_entry(net, net_list, exit_list) {
11339                 default_device_exit_net(net);
11340                 cond_resched();
11341         }
11342
11343         list_for_each_entry(net, net_list, exit_list) {
11344                 for_each_netdev_reverse(net, dev) {
11345                         if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
11346                                 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
11347                         else
11348                                 unregister_netdevice_queue(dev, &dev_kill_list);
11349                 }
11350         }
11351         unregister_netdevice_many(&dev_kill_list);
11352         rtnl_unlock();
11353 }
11354
11355 static struct pernet_operations __net_initdata default_device_ops = {
11356         .exit_batch = default_device_exit_batch,
11357 };
11358
11359 /*
11360  *      Initialize the DEV module. At boot time this walks the device list and
11361  *      unhooks any devices that fail to initialise (normally hardware not
11362  *      present) and leaves us with a valid list of present and active devices.
11363  *
11364  */
11365
11366 /*
11367  *       This is called single threaded during boot, so no need
11368  *       to take the rtnl semaphore.
11369  */
11370 static int __init net_dev_init(void)
11371 {
11372         int i, rc = -ENOMEM;
11373
11374         BUG_ON(!dev_boot_phase);
11375
11376         if (dev_proc_init())
11377                 goto out;
11378
11379         if (netdev_kobject_init())
11380                 goto out;
11381
11382         INIT_LIST_HEAD(&ptype_all);
11383         for (i = 0; i < PTYPE_HASH_SIZE; i++)
11384                 INIT_LIST_HEAD(&ptype_base[i]);
11385
11386         if (register_pernet_subsys(&netdev_net_ops))
11387                 goto out;
11388
11389         /*
11390          *      Initialise the packet receive queues.
11391          */
11392
11393         for_each_possible_cpu(i) {
11394                 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
11395                 struct softnet_data *sd = &per_cpu(softnet_data, i);
11396
11397                 INIT_WORK(flush, flush_backlog);
11398
11399                 skb_queue_head_init(&sd->input_pkt_queue);
11400                 skb_queue_head_init(&sd->process_queue);
11401 #ifdef CONFIG_XFRM_OFFLOAD
11402                 skb_queue_head_init(&sd->xfrm_backlog);
11403 #endif
11404                 INIT_LIST_HEAD(&sd->poll_list);
11405                 sd->output_queue_tailp = &sd->output_queue;
11406 #ifdef CONFIG_RPS
11407                 INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
11408                 sd->cpu = i;
11409 #endif
11410                 INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
11411                 spin_lock_init(&sd->defer_lock);
11412
11413                 init_gro_hash(&sd->backlog);
11414                 sd->backlog.poll = process_backlog;
11415                 sd->backlog.weight = weight_p;
11416         }
11417
11418         dev_boot_phase = 0;
11419
11420         /* The loopback device is special if any other network devices
11421          * is present in a network namespace the loopback device must
11422          * be present. Since we now dynamically allocate and free the
11423          * loopback device ensure this invariant is maintained by
11424          * keeping the loopback device as the first device on the
11425          * list of network devices.  Ensuring the loopback devices
11426          * is the first device that appears and the last network device
11427          * that disappears.
11428          */
11429         if (register_pernet_device(&loopback_net_ops))
11430                 goto out;
11431
11432         if (register_pernet_device(&default_device_ops))
11433                 goto out;
11434
11435         open_softirq(NET_TX_SOFTIRQ, net_tx_action);
11436         open_softirq(NET_RX_SOFTIRQ, net_rx_action);
11437
11438         rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
11439                                        NULL, dev_cpu_dead);
11440         WARN_ON(rc < 0);
11441         rc = 0;
11442 out:
11443         return rc;
11444 }
11445
11446 subsys_initcall(net_dev_init);
This page took 0.667275 seconds and 4 git commands to generate.