2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/stat.h>
102 #include <net/pkt_sched.h>
103 #include <net/checksum.h>
104 #include <net/xfrm.h>
105 #include <linux/highmem.h>
106 #include <linux/init.h>
107 #include <linux/module.h>
108 #include <linux/netpoll.h>
109 #include <linux/rcupdate.h>
110 #include <linux/delay.h>
111 #include <net/iw_handler.h>
112 #include <asm/current.h>
113 #include <linux/audit.h>
114 #include <linux/dmaengine.h>
115 #include <linux/err.h>
116 #include <linux/ctype.h>
117 #include <linux/if_arp.h>
118 #include <linux/if_vlan.h>
119 #include <linux/ip.h>
121 #include <net/mpls.h>
122 #include <linux/ipv6.h>
123 #include <linux/in.h>
124 #include <linux/jhash.h>
125 #include <linux/random.h>
126 #include <trace/events/napi.h>
127 #include <trace/events/net.h>
128 #include <trace/events/skb.h>
129 #include <linux/pci.h>
130 #include <linux/inetdevice.h>
131 #include <linux/cpu_rmap.h>
132 #include <linux/static_key.h>
133 #include <linux/hashtable.h>
134 #include <linux/vmalloc.h>
135 #include <linux/if_macvlan.h>
136 #include <linux/errqueue.h>
137 #include <linux/hrtimer.h>
139 #include "net-sysfs.h"
141 /* Instead of increasing this, you should create a hash table. */
142 #define MAX_GRO_SKBS 8
144 /* This should be increased if a protocol with a bigger head is added. */
145 #define GRO_MAX_HEAD (MAX_HEADER + 128)
147 static DEFINE_SPINLOCK(ptype_lock);
148 static DEFINE_SPINLOCK(offload_lock);
149 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
150 struct list_head ptype_all __read_mostly; /* Taps */
151 static struct list_head offload_base __read_mostly;
153 static int netif_rx_internal(struct sk_buff *skb);
154 static int call_netdevice_notifiers_info(unsigned long val,
155 struct net_device *dev,
156 struct netdev_notifier_info *info);
159 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
162 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
164 * Writers must hold the rtnl semaphore while they loop through the
165 * dev_base_head list, and hold dev_base_lock for writing when they do the
166 * actual updates. This allows pure readers to access the list even
167 * while a writer is preparing to update it.
169 * To put it another way, dev_base_lock is held for writing only to
170 * protect against pure readers; the rtnl semaphore provides the
171 * protection against other writers.
173 * See, for example usages, register_netdevice() and
174 * unregister_netdevice(), which must be called with the rtnl
177 DEFINE_RWLOCK(dev_base_lock);
178 EXPORT_SYMBOL(dev_base_lock);
180 /* protects napi_hash addition/deletion and napi_gen_id */
181 static DEFINE_SPINLOCK(napi_hash_lock);
183 static unsigned int napi_gen_id;
184 static DEFINE_HASHTABLE(napi_hash, 8);
186 static seqcount_t devnet_rename_seq;
188 static inline void dev_base_seq_inc(struct net *net)
190 while (++net->dev_base_seq == 0);
193 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
195 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
197 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
200 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
202 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
205 static inline void rps_lock(struct softnet_data *sd)
208 spin_lock(&sd->input_pkt_queue.lock);
212 static inline void rps_unlock(struct softnet_data *sd)
215 spin_unlock(&sd->input_pkt_queue.lock);
219 /* Device list insertion */
220 static void list_netdevice(struct net_device *dev)
222 struct net *net = dev_net(dev);
226 write_lock_bh(&dev_base_lock);
227 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
228 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
229 hlist_add_head_rcu(&dev->index_hlist,
230 dev_index_hash(net, dev->ifindex));
231 write_unlock_bh(&dev_base_lock);
233 dev_base_seq_inc(net);
236 /* Device list removal
237 * caller must respect a RCU grace period before freeing/reusing dev
239 static void unlist_netdevice(struct net_device *dev)
243 /* Unlink dev from the device chain */
244 write_lock_bh(&dev_base_lock);
245 list_del_rcu(&dev->dev_list);
246 hlist_del_rcu(&dev->name_hlist);
247 hlist_del_rcu(&dev->index_hlist);
248 write_unlock_bh(&dev_base_lock);
250 dev_base_seq_inc(dev_net(dev));
257 static RAW_NOTIFIER_HEAD(netdev_chain);
260 * Device drivers call our routines to queue packets here. We empty the
261 * queue in the local softnet handler.
264 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
265 EXPORT_PER_CPU_SYMBOL(softnet_data);
267 #ifdef CONFIG_LOCKDEP
269 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
270 * according to dev->type
272 static const unsigned short netdev_lock_type[] =
273 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
274 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
275 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
276 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
277 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
278 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
279 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
280 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
281 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
282 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
283 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
284 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
285 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
286 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
287 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
289 static const char *const netdev_lock_name[] =
290 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
291 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
292 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
293 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
294 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
295 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
296 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
297 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
298 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
299 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
300 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
301 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
302 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
303 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
304 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
306 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
307 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
309 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
313 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
314 if (netdev_lock_type[i] == dev_type)
316 /* the last key is used by default */
317 return ARRAY_SIZE(netdev_lock_type) - 1;
320 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
321 unsigned short dev_type)
325 i = netdev_lock_pos(dev_type);
326 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
327 netdev_lock_name[i]);
330 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
334 i = netdev_lock_pos(dev->type);
335 lockdep_set_class_and_name(&dev->addr_list_lock,
336 &netdev_addr_lock_key[i],
337 netdev_lock_name[i]);
340 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
341 unsigned short dev_type)
344 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
349 /*******************************************************************************
351 Protocol management and registration routines
353 *******************************************************************************/
356 * Add a protocol ID to the list. Now that the input handler is
357 * smarter we can dispense with all the messy stuff that used to be
360 * BEWARE!!! Protocol handlers, mangling input packets,
361 * MUST BE last in hash buckets and checking protocol handlers
362 * MUST start from promiscuous ptype_all chain in net_bh.
363 * It is true now, do not change it.
364 * Explanation follows: if protocol handler, mangling packet, will
365 * be the first on list, it is not able to sense, that packet
366 * is cloned and should be copied-on-write, so that it will
367 * change it and subsequent readers will get broken packet.
371 static inline struct list_head *ptype_head(const struct packet_type *pt)
373 if (pt->type == htons(ETH_P_ALL))
374 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
376 return pt->dev ? &pt->dev->ptype_specific :
377 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
381 * dev_add_pack - add packet handler
382 * @pt: packet type declaration
384 * Add a protocol handler to the networking stack. The passed &packet_type
385 * is linked into kernel lists and may not be freed until it has been
386 * removed from the kernel lists.
388 * This call does not sleep therefore it can not
389 * guarantee all CPU's that are in middle of receiving packets
390 * will see the new packet type (until the next received packet).
393 void dev_add_pack(struct packet_type *pt)
395 struct list_head *head = ptype_head(pt);
397 spin_lock(&ptype_lock);
398 list_add_rcu(&pt->list, head);
399 spin_unlock(&ptype_lock);
401 EXPORT_SYMBOL(dev_add_pack);
404 * __dev_remove_pack - remove packet handler
405 * @pt: packet type declaration
407 * Remove a protocol handler that was previously added to the kernel
408 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
409 * from the kernel lists and can be freed or reused once this function
412 * The packet type might still be in use by receivers
413 * and must not be freed until after all the CPU's have gone
414 * through a quiescent state.
416 void __dev_remove_pack(struct packet_type *pt)
418 struct list_head *head = ptype_head(pt);
419 struct packet_type *pt1;
421 spin_lock(&ptype_lock);
423 list_for_each_entry(pt1, head, list) {
425 list_del_rcu(&pt->list);
430 pr_warn("dev_remove_pack: %p not found\n", pt);
432 spin_unlock(&ptype_lock);
434 EXPORT_SYMBOL(__dev_remove_pack);
437 * dev_remove_pack - remove packet handler
438 * @pt: packet type declaration
440 * Remove a protocol handler that was previously added to the kernel
441 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
442 * from the kernel lists and can be freed or reused once this function
445 * This call sleeps to guarantee that no CPU is looking at the packet
448 void dev_remove_pack(struct packet_type *pt)
450 __dev_remove_pack(pt);
454 EXPORT_SYMBOL(dev_remove_pack);
458 * dev_add_offload - register offload handlers
459 * @po: protocol offload declaration
461 * Add protocol offload handlers to the networking stack. The passed
462 * &proto_offload is linked into kernel lists and may not be freed until
463 * it has been removed from the kernel lists.
465 * This call does not sleep therefore it can not
466 * guarantee all CPU's that are in middle of receiving packets
467 * will see the new offload handlers (until the next received packet).
469 void dev_add_offload(struct packet_offload *po)
471 struct list_head *head = &offload_base;
473 spin_lock(&offload_lock);
474 list_add_rcu(&po->list, head);
475 spin_unlock(&offload_lock);
477 EXPORT_SYMBOL(dev_add_offload);
480 * __dev_remove_offload - remove offload handler
481 * @po: packet offload declaration
483 * Remove a protocol offload handler that was previously added to the
484 * kernel offload handlers by dev_add_offload(). The passed &offload_type
485 * is removed from the kernel lists and can be freed or reused once this
488 * The packet type might still be in use by receivers
489 * and must not be freed until after all the CPU's have gone
490 * through a quiescent state.
492 static void __dev_remove_offload(struct packet_offload *po)
494 struct list_head *head = &offload_base;
495 struct packet_offload *po1;
497 spin_lock(&offload_lock);
499 list_for_each_entry(po1, head, list) {
501 list_del_rcu(&po->list);
506 pr_warn("dev_remove_offload: %p not found\n", po);
508 spin_unlock(&offload_lock);
512 * dev_remove_offload - remove packet offload handler
513 * @po: packet offload declaration
515 * Remove a packet offload handler that was previously added to the kernel
516 * offload handlers by dev_add_offload(). The passed &offload_type is
517 * removed from the kernel lists and can be freed or reused once this
520 * This call sleeps to guarantee that no CPU is looking at the packet
523 void dev_remove_offload(struct packet_offload *po)
525 __dev_remove_offload(po);
529 EXPORT_SYMBOL(dev_remove_offload);
531 /******************************************************************************
533 Device Boot-time Settings Routines
535 *******************************************************************************/
537 /* Boot time configuration table */
538 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
541 * netdev_boot_setup_add - add new setup entry
542 * @name: name of the device
543 * @map: configured settings for the device
545 * Adds new setup entry to the dev_boot_setup list. The function
546 * returns 0 on error and 1 on success. This is a generic routine to
549 static int netdev_boot_setup_add(char *name, struct ifmap *map)
551 struct netdev_boot_setup *s;
555 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
556 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
557 memset(s[i].name, 0, sizeof(s[i].name));
558 strlcpy(s[i].name, name, IFNAMSIZ);
559 memcpy(&s[i].map, map, sizeof(s[i].map));
564 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
568 * netdev_boot_setup_check - check boot time settings
569 * @dev: the netdevice
571 * Check boot time settings for the device.
572 * The found settings are set for the device to be used
573 * later in the device probing.
574 * Returns 0 if no settings found, 1 if they are.
576 int netdev_boot_setup_check(struct net_device *dev)
578 struct netdev_boot_setup *s = dev_boot_setup;
581 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
582 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
583 !strcmp(dev->name, s[i].name)) {
584 dev->irq = s[i].map.irq;
585 dev->base_addr = s[i].map.base_addr;
586 dev->mem_start = s[i].map.mem_start;
587 dev->mem_end = s[i].map.mem_end;
593 EXPORT_SYMBOL(netdev_boot_setup_check);
597 * netdev_boot_base - get address from boot time settings
598 * @prefix: prefix for network device
599 * @unit: id for network device
601 * Check boot time settings for the base address of device.
602 * The found settings are set for the device to be used
603 * later in the device probing.
604 * Returns 0 if no settings found.
606 unsigned long netdev_boot_base(const char *prefix, int unit)
608 const struct netdev_boot_setup *s = dev_boot_setup;
612 sprintf(name, "%s%d", prefix, unit);
615 * If device already registered then return base of 1
616 * to indicate not to probe for this interface
618 if (__dev_get_by_name(&init_net, name))
621 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
622 if (!strcmp(name, s[i].name))
623 return s[i].map.base_addr;
628 * Saves at boot time configured settings for any netdevice.
630 int __init netdev_boot_setup(char *str)
635 str = get_options(str, ARRAY_SIZE(ints), ints);
640 memset(&map, 0, sizeof(map));
644 map.base_addr = ints[2];
646 map.mem_start = ints[3];
648 map.mem_end = ints[4];
650 /* Add new entry to the list */
651 return netdev_boot_setup_add(str, &map);
654 __setup("netdev=", netdev_boot_setup);
656 /*******************************************************************************
658 Device Interface Subroutines
660 *******************************************************************************/
663 * dev_get_iflink - get 'iflink' value of a interface
664 * @dev: targeted interface
666 * Indicates the ifindex the interface is linked to.
667 * Physical interfaces have the same 'ifindex' and 'iflink' values.
670 int dev_get_iflink(const struct net_device *dev)
672 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
673 return dev->netdev_ops->ndo_get_iflink(dev);
675 /* If dev->rtnl_link_ops is set, it's a virtual interface. */
676 if (dev->rtnl_link_ops)
681 EXPORT_SYMBOL(dev_get_iflink);
684 * __dev_get_by_name - find a device by its name
685 * @net: the applicable net namespace
686 * @name: name to find
688 * Find an interface by name. Must be called under RTNL semaphore
689 * or @dev_base_lock. If the name is found a pointer to the device
690 * is returned. If the name is not found then %NULL is returned. The
691 * reference counters are not incremented so the caller must be
692 * careful with locks.
695 struct net_device *__dev_get_by_name(struct net *net, const char *name)
697 struct net_device *dev;
698 struct hlist_head *head = dev_name_hash(net, name);
700 hlist_for_each_entry(dev, head, name_hlist)
701 if (!strncmp(dev->name, name, IFNAMSIZ))
706 EXPORT_SYMBOL(__dev_get_by_name);
709 * dev_get_by_name_rcu - find a device by its name
710 * @net: the applicable net namespace
711 * @name: name to find
713 * Find an interface by name.
714 * If the name is found a pointer to the device is returned.
715 * If the name is not found then %NULL is returned.
716 * The reference counters are not incremented so the caller must be
717 * careful with locks. The caller must hold RCU lock.
720 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
722 struct net_device *dev;
723 struct hlist_head *head = dev_name_hash(net, name);
725 hlist_for_each_entry_rcu(dev, head, name_hlist)
726 if (!strncmp(dev->name, name, IFNAMSIZ))
731 EXPORT_SYMBOL(dev_get_by_name_rcu);
734 * dev_get_by_name - find a device by its name
735 * @net: the applicable net namespace
736 * @name: name to find
738 * Find an interface by name. This can be called from any
739 * context and does its own locking. The returned handle has
740 * the usage count incremented and the caller must use dev_put() to
741 * release it when it is no longer needed. %NULL is returned if no
742 * matching device is found.
745 struct net_device *dev_get_by_name(struct net *net, const char *name)
747 struct net_device *dev;
750 dev = dev_get_by_name_rcu(net, name);
756 EXPORT_SYMBOL(dev_get_by_name);
759 * __dev_get_by_index - find a device by its ifindex
760 * @net: the applicable net namespace
761 * @ifindex: index of device
763 * Search for an interface by index. Returns %NULL if the device
764 * is not found or a pointer to the device. The device has not
765 * had its reference counter increased so the caller must be careful
766 * about locking. The caller must hold either the RTNL semaphore
770 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
772 struct net_device *dev;
773 struct hlist_head *head = dev_index_hash(net, ifindex);
775 hlist_for_each_entry(dev, head, index_hlist)
776 if (dev->ifindex == ifindex)
781 EXPORT_SYMBOL(__dev_get_by_index);
784 * dev_get_by_index_rcu - find a device by its ifindex
785 * @net: the applicable net namespace
786 * @ifindex: index of device
788 * Search for an interface by index. Returns %NULL if the device
789 * is not found or a pointer to the device. The device has not
790 * had its reference counter increased so the caller must be careful
791 * about locking. The caller must hold RCU lock.
794 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
796 struct net_device *dev;
797 struct hlist_head *head = dev_index_hash(net, ifindex);
799 hlist_for_each_entry_rcu(dev, head, index_hlist)
800 if (dev->ifindex == ifindex)
805 EXPORT_SYMBOL(dev_get_by_index_rcu);
809 * dev_get_by_index - find a device by its ifindex
810 * @net: the applicable net namespace
811 * @ifindex: index of device
813 * Search for an interface by index. Returns NULL if the device
814 * is not found or a pointer to the device. The device returned has
815 * had a reference added and the pointer is safe until the user calls
816 * dev_put to indicate they have finished with it.
819 struct net_device *dev_get_by_index(struct net *net, int ifindex)
821 struct net_device *dev;
824 dev = dev_get_by_index_rcu(net, ifindex);
830 EXPORT_SYMBOL(dev_get_by_index);
833 * netdev_get_name - get a netdevice name, knowing its ifindex.
834 * @net: network namespace
835 * @name: a pointer to the buffer where the name will be stored.
836 * @ifindex: the ifindex of the interface to get the name from.
838 * The use of raw_seqcount_begin() and cond_resched() before
839 * retrying is required as we want to give the writers a chance
840 * to complete when CONFIG_PREEMPT is not set.
842 int netdev_get_name(struct net *net, char *name, int ifindex)
844 struct net_device *dev;
848 seq = raw_seqcount_begin(&devnet_rename_seq);
850 dev = dev_get_by_index_rcu(net, ifindex);
856 strcpy(name, dev->name);
858 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
867 * dev_getbyhwaddr_rcu - find a device by its hardware address
868 * @net: the applicable net namespace
869 * @type: media type of device
870 * @ha: hardware address
872 * Search for an interface by MAC address. Returns NULL if the device
873 * is not found or a pointer to the device.
874 * The caller must hold RCU or RTNL.
875 * The returned device has not had its ref count increased
876 * and the caller must therefore be careful about locking
880 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
883 struct net_device *dev;
885 for_each_netdev_rcu(net, dev)
886 if (dev->type == type &&
887 !memcmp(dev->dev_addr, ha, dev->addr_len))
892 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
894 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
896 struct net_device *dev;
899 for_each_netdev(net, dev)
900 if (dev->type == type)
905 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
907 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
909 struct net_device *dev, *ret = NULL;
912 for_each_netdev_rcu(net, dev)
913 if (dev->type == type) {
921 EXPORT_SYMBOL(dev_getfirstbyhwtype);
924 * __dev_get_by_flags - find any device with given flags
925 * @net: the applicable net namespace
926 * @if_flags: IFF_* values
927 * @mask: bitmask of bits in if_flags to check
929 * Search for any interface with the given flags. Returns NULL if a device
930 * is not found or a pointer to the device. Must be called inside
931 * rtnl_lock(), and result refcount is unchanged.
934 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
937 struct net_device *dev, *ret;
942 for_each_netdev(net, dev) {
943 if (((dev->flags ^ if_flags) & mask) == 0) {
950 EXPORT_SYMBOL(__dev_get_by_flags);
953 * dev_valid_name - check if name is okay for network device
956 * Network device names need to be valid file names to
957 * to allow sysfs to work. We also disallow any kind of
960 bool dev_valid_name(const char *name)
964 if (strlen(name) >= IFNAMSIZ)
966 if (!strcmp(name, ".") || !strcmp(name, ".."))
970 if (*name == '/' || *name == ':' || isspace(*name))
976 EXPORT_SYMBOL(dev_valid_name);
979 * __dev_alloc_name - allocate a name for a device
980 * @net: network namespace to allocate the device name in
981 * @name: name format string
982 * @buf: scratch buffer and result name string
984 * Passed a format string - eg "lt%d" it will try and find a suitable
985 * id. It scans list of devices to build up a free map, then chooses
986 * the first empty slot. The caller must hold the dev_base or rtnl lock
987 * while allocating the name and adding the device in order to avoid
989 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
990 * Returns the number of the unit assigned or a negative errno code.
993 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
997 const int max_netdevices = 8*PAGE_SIZE;
998 unsigned long *inuse;
999 struct net_device *d;
1001 p = strnchr(name, IFNAMSIZ-1, '%');
1004 * Verify the string as this thing may have come from
1005 * the user. There must be either one "%d" and no other "%"
1008 if (p[1] != 'd' || strchr(p + 2, '%'))
1011 /* Use one page as a bit array of possible slots */
1012 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1016 for_each_netdev(net, d) {
1017 if (!sscanf(d->name, name, &i))
1019 if (i < 0 || i >= max_netdevices)
1022 /* avoid cases where sscanf is not exact inverse of printf */
1023 snprintf(buf, IFNAMSIZ, name, i);
1024 if (!strncmp(buf, d->name, IFNAMSIZ))
1028 i = find_first_zero_bit(inuse, max_netdevices);
1029 free_page((unsigned long) inuse);
1033 snprintf(buf, IFNAMSIZ, name, i);
1034 if (!__dev_get_by_name(net, buf))
1037 /* It is possible to run out of possible slots
1038 * when the name is long and there isn't enough space left
1039 * for the digits, or if all bits are used.
1045 * dev_alloc_name - allocate a name for a device
1047 * @name: name format string
1049 * Passed a format string - eg "lt%d" it will try and find a suitable
1050 * id. It scans list of devices to build up a free map, then chooses
1051 * the first empty slot. The caller must hold the dev_base or rtnl lock
1052 * while allocating the name and adding the device in order to avoid
1054 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1055 * Returns the number of the unit assigned or a negative errno code.
1058 int dev_alloc_name(struct net_device *dev, const char *name)
1064 BUG_ON(!dev_net(dev));
1066 ret = __dev_alloc_name(net, name, buf);
1068 strlcpy(dev->name, buf, IFNAMSIZ);
1071 EXPORT_SYMBOL(dev_alloc_name);
1073 static int dev_alloc_name_ns(struct net *net,
1074 struct net_device *dev,
1080 ret = __dev_alloc_name(net, name, buf);
1082 strlcpy(dev->name, buf, IFNAMSIZ);
1086 static int dev_get_valid_name(struct net *net,
1087 struct net_device *dev,
1092 if (!dev_valid_name(name))
1095 if (strchr(name, '%'))
1096 return dev_alloc_name_ns(net, dev, name);
1097 else if (__dev_get_by_name(net, name))
1099 else if (dev->name != name)
1100 strlcpy(dev->name, name, IFNAMSIZ);
1106 * dev_change_name - change name of a device
1108 * @newname: name (or format string) must be at least IFNAMSIZ
1110 * Change name of a device, can pass format strings "eth%d".
1113 int dev_change_name(struct net_device *dev, const char *newname)
1115 unsigned char old_assign_type;
1116 char oldname[IFNAMSIZ];
1122 BUG_ON(!dev_net(dev));
1125 if (dev->flags & IFF_UP)
1128 write_seqcount_begin(&devnet_rename_seq);
1130 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1131 write_seqcount_end(&devnet_rename_seq);
1135 memcpy(oldname, dev->name, IFNAMSIZ);
1137 err = dev_get_valid_name(net, dev, newname);
1139 write_seqcount_end(&devnet_rename_seq);
1143 if (oldname[0] && !strchr(oldname, '%'))
1144 netdev_info(dev, "renamed from %s\n", oldname);
1146 old_assign_type = dev->name_assign_type;
1147 dev->name_assign_type = NET_NAME_RENAMED;
1150 ret = device_rename(&dev->dev, dev->name);
1152 memcpy(dev->name, oldname, IFNAMSIZ);
1153 dev->name_assign_type = old_assign_type;
1154 write_seqcount_end(&devnet_rename_seq);
1158 write_seqcount_end(&devnet_rename_seq);
1160 netdev_adjacent_rename_links(dev, oldname);
1162 write_lock_bh(&dev_base_lock);
1163 hlist_del_rcu(&dev->name_hlist);
1164 write_unlock_bh(&dev_base_lock);
1168 write_lock_bh(&dev_base_lock);
1169 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1170 write_unlock_bh(&dev_base_lock);
1172 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1173 ret = notifier_to_errno(ret);
1176 /* err >= 0 after dev_alloc_name() or stores the first errno */
1179 write_seqcount_begin(&devnet_rename_seq);
1180 memcpy(dev->name, oldname, IFNAMSIZ);
1181 memcpy(oldname, newname, IFNAMSIZ);
1182 dev->name_assign_type = old_assign_type;
1183 old_assign_type = NET_NAME_RENAMED;
1186 pr_err("%s: name change rollback failed: %d\n",
1195 * dev_set_alias - change ifalias of a device
1197 * @alias: name up to IFALIASZ
1198 * @len: limit of bytes to copy from info
1200 * Set ifalias for a device,
1202 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1208 if (len >= IFALIASZ)
1212 kfree(dev->ifalias);
1213 dev->ifalias = NULL;
1217 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1220 dev->ifalias = new_ifalias;
1222 strlcpy(dev->ifalias, alias, len+1);
1228 * netdev_features_change - device changes features
1229 * @dev: device to cause notification
1231 * Called to indicate a device has changed features.
1233 void netdev_features_change(struct net_device *dev)
1235 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1237 EXPORT_SYMBOL(netdev_features_change);
1240 * netdev_state_change - device changes state
1241 * @dev: device to cause notification
1243 * Called to indicate a device has changed state. This function calls
1244 * the notifier chains for netdev_chain and sends a NEWLINK message
1245 * to the routing socket.
1247 void netdev_state_change(struct net_device *dev)
1249 if (dev->flags & IFF_UP) {
1250 struct netdev_notifier_change_info change_info;
1252 change_info.flags_changed = 0;
1253 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1255 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1258 EXPORT_SYMBOL(netdev_state_change);
1261 * netdev_notify_peers - notify network peers about existence of @dev
1262 * @dev: network device
1264 * Generate traffic such that interested network peers are aware of
1265 * @dev, such as by generating a gratuitous ARP. This may be used when
1266 * a device wants to inform the rest of the network about some sort of
1267 * reconfiguration such as a failover event or virtual machine
1270 void netdev_notify_peers(struct net_device *dev)
1273 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1276 EXPORT_SYMBOL(netdev_notify_peers);
1278 static int __dev_open(struct net_device *dev)
1280 const struct net_device_ops *ops = dev->netdev_ops;
1285 if (!netif_device_present(dev))
1288 /* Block netpoll from trying to do any rx path servicing.
1289 * If we don't do this there is a chance ndo_poll_controller
1290 * or ndo_poll may be running while we open the device
1292 netpoll_poll_disable(dev);
1294 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1295 ret = notifier_to_errno(ret);
1299 set_bit(__LINK_STATE_START, &dev->state);
1301 if (ops->ndo_validate_addr)
1302 ret = ops->ndo_validate_addr(dev);
1304 if (!ret && ops->ndo_open)
1305 ret = ops->ndo_open(dev);
1307 netpoll_poll_enable(dev);
1310 clear_bit(__LINK_STATE_START, &dev->state);
1312 dev->flags |= IFF_UP;
1313 dev_set_rx_mode(dev);
1315 add_device_randomness(dev->dev_addr, dev->addr_len);
1322 * dev_open - prepare an interface for use.
1323 * @dev: device to open
1325 * Takes a device from down to up state. The device's private open
1326 * function is invoked and then the multicast lists are loaded. Finally
1327 * the device is moved into the up state and a %NETDEV_UP message is
1328 * sent to the netdev notifier chain.
1330 * Calling this function on an active interface is a nop. On a failure
1331 * a negative errno code is returned.
1333 int dev_open(struct net_device *dev)
1337 if (dev->flags & IFF_UP)
1340 ret = __dev_open(dev);
1344 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1345 call_netdevice_notifiers(NETDEV_UP, dev);
1349 EXPORT_SYMBOL(dev_open);
1351 static int __dev_close_many(struct list_head *head)
1353 struct net_device *dev;
1358 list_for_each_entry(dev, head, close_list) {
1359 /* Temporarily disable netpoll until the interface is down */
1360 netpoll_poll_disable(dev);
1362 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1364 clear_bit(__LINK_STATE_START, &dev->state);
1366 /* Synchronize to scheduled poll. We cannot touch poll list, it
1367 * can be even on different cpu. So just clear netif_running().
1369 * dev->stop() will invoke napi_disable() on all of it's
1370 * napi_struct instances on this device.
1372 smp_mb__after_atomic(); /* Commit netif_running(). */
1375 dev_deactivate_many(head);
1377 list_for_each_entry(dev, head, close_list) {
1378 const struct net_device_ops *ops = dev->netdev_ops;
1381 * Call the device specific close. This cannot fail.
1382 * Only if device is UP
1384 * We allow it to be called even after a DETACH hot-plug
1390 dev->flags &= ~IFF_UP;
1391 netpoll_poll_enable(dev);
1397 static int __dev_close(struct net_device *dev)
1402 list_add(&dev->close_list, &single);
1403 retval = __dev_close_many(&single);
1409 int dev_close_many(struct list_head *head, bool unlink)
1411 struct net_device *dev, *tmp;
1413 /* Remove the devices that don't need to be closed */
1414 list_for_each_entry_safe(dev, tmp, head, close_list)
1415 if (!(dev->flags & IFF_UP))
1416 list_del_init(&dev->close_list);
1418 __dev_close_many(head);
1420 list_for_each_entry_safe(dev, tmp, head, close_list) {
1421 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1422 call_netdevice_notifiers(NETDEV_DOWN, dev);
1424 list_del_init(&dev->close_list);
1429 EXPORT_SYMBOL(dev_close_many);
1432 * dev_close - shutdown an interface.
1433 * @dev: device to shutdown
1435 * This function moves an active device into down state. A
1436 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1437 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1440 int dev_close(struct net_device *dev)
1442 if (dev->flags & IFF_UP) {
1445 list_add(&dev->close_list, &single);
1446 dev_close_many(&single, true);
1451 EXPORT_SYMBOL(dev_close);
1455 * dev_disable_lro - disable Large Receive Offload on a device
1458 * Disable Large Receive Offload (LRO) on a net device. Must be
1459 * called under RTNL. This is needed if received packets may be
1460 * forwarded to another interface.
1462 void dev_disable_lro(struct net_device *dev)
1464 struct net_device *lower_dev;
1465 struct list_head *iter;
1467 dev->wanted_features &= ~NETIF_F_LRO;
1468 netdev_update_features(dev);
1470 if (unlikely(dev->features & NETIF_F_LRO))
1471 netdev_WARN(dev, "failed to disable LRO!\n");
1473 netdev_for_each_lower_dev(dev, lower_dev, iter)
1474 dev_disable_lro(lower_dev);
1476 EXPORT_SYMBOL(dev_disable_lro);
1478 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1479 struct net_device *dev)
1481 struct netdev_notifier_info info;
1483 netdev_notifier_info_init(&info, dev);
1484 return nb->notifier_call(nb, val, &info);
1487 static int dev_boot_phase = 1;
1490 * register_netdevice_notifier - register a network notifier block
1493 * Register a notifier to be called when network device events occur.
1494 * The notifier passed is linked into the kernel structures and must
1495 * not be reused until it has been unregistered. A negative errno code
1496 * is returned on a failure.
1498 * When registered all registration and up events are replayed
1499 * to the new notifier to allow device to have a race free
1500 * view of the network device list.
1503 int register_netdevice_notifier(struct notifier_block *nb)
1505 struct net_device *dev;
1506 struct net_device *last;
1511 err = raw_notifier_chain_register(&netdev_chain, nb);
1517 for_each_netdev(net, dev) {
1518 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1519 err = notifier_to_errno(err);
1523 if (!(dev->flags & IFF_UP))
1526 call_netdevice_notifier(nb, NETDEV_UP, dev);
1537 for_each_netdev(net, dev) {
1541 if (dev->flags & IFF_UP) {
1542 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1544 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1546 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1551 raw_notifier_chain_unregister(&netdev_chain, nb);
1554 EXPORT_SYMBOL(register_netdevice_notifier);
1557 * unregister_netdevice_notifier - unregister a network notifier block
1560 * Unregister a notifier previously registered by
1561 * register_netdevice_notifier(). The notifier is unlinked into the
1562 * kernel structures and may then be reused. A negative errno code
1563 * is returned on a failure.
1565 * After unregistering unregister and down device events are synthesized
1566 * for all devices on the device list to the removed notifier to remove
1567 * the need for special case cleanup code.
1570 int unregister_netdevice_notifier(struct notifier_block *nb)
1572 struct net_device *dev;
1577 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1582 for_each_netdev(net, dev) {
1583 if (dev->flags & IFF_UP) {
1584 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1586 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1588 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1595 EXPORT_SYMBOL(unregister_netdevice_notifier);
1598 * call_netdevice_notifiers_info - call all network notifier blocks
1599 * @val: value passed unmodified to notifier function
1600 * @dev: net_device pointer passed unmodified to notifier function
1601 * @info: notifier information data
1603 * Call all network notifier blocks. Parameters and return value
1604 * are as for raw_notifier_call_chain().
1607 static int call_netdevice_notifiers_info(unsigned long val,
1608 struct net_device *dev,
1609 struct netdev_notifier_info *info)
1612 netdev_notifier_info_init(info, dev);
1613 return raw_notifier_call_chain(&netdev_chain, val, info);
1617 * call_netdevice_notifiers - call all network notifier blocks
1618 * @val: value passed unmodified to notifier function
1619 * @dev: net_device pointer passed unmodified to notifier function
1621 * Call all network notifier blocks. Parameters and return value
1622 * are as for raw_notifier_call_chain().
1625 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1627 struct netdev_notifier_info info;
1629 return call_netdevice_notifiers_info(val, dev, &info);
1631 EXPORT_SYMBOL(call_netdevice_notifiers);
1633 #ifdef CONFIG_NET_CLS_ACT
1634 static struct static_key ingress_needed __read_mostly;
1636 void net_inc_ingress_queue(void)
1638 static_key_slow_inc(&ingress_needed);
1640 EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1642 void net_dec_ingress_queue(void)
1644 static_key_slow_dec(&ingress_needed);
1646 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1649 static struct static_key netstamp_needed __read_mostly;
1650 #ifdef HAVE_JUMP_LABEL
1651 /* We are not allowed to call static_key_slow_dec() from irq context
1652 * If net_disable_timestamp() is called from irq context, defer the
1653 * static_key_slow_dec() calls.
1655 static atomic_t netstamp_needed_deferred;
1658 void net_enable_timestamp(void)
1660 #ifdef HAVE_JUMP_LABEL
1661 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1665 static_key_slow_dec(&netstamp_needed);
1669 static_key_slow_inc(&netstamp_needed);
1671 EXPORT_SYMBOL(net_enable_timestamp);
1673 void net_disable_timestamp(void)
1675 #ifdef HAVE_JUMP_LABEL
1676 if (in_interrupt()) {
1677 atomic_inc(&netstamp_needed_deferred);
1681 static_key_slow_dec(&netstamp_needed);
1683 EXPORT_SYMBOL(net_disable_timestamp);
1685 static inline void net_timestamp_set(struct sk_buff *skb)
1687 skb->tstamp.tv64 = 0;
1688 if (static_key_false(&netstamp_needed))
1689 __net_timestamp(skb);
1692 #define net_timestamp_check(COND, SKB) \
1693 if (static_key_false(&netstamp_needed)) { \
1694 if ((COND) && !(SKB)->tstamp.tv64) \
1695 __net_timestamp(SKB); \
1698 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
1702 if (!(dev->flags & IFF_UP))
1705 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1706 if (skb->len <= len)
1709 /* if TSO is enabled, we don't care about the length as the packet
1710 * could be forwarded without being segmented before
1712 if (skb_is_gso(skb))
1717 EXPORT_SYMBOL_GPL(is_skb_forwardable);
1719 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1721 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1722 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1723 atomic_long_inc(&dev->rx_dropped);
1729 if (unlikely(!is_skb_forwardable(dev, skb))) {
1730 atomic_long_inc(&dev->rx_dropped);
1735 skb_scrub_packet(skb, true);
1737 skb->protocol = eth_type_trans(skb, dev);
1738 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1742 EXPORT_SYMBOL_GPL(__dev_forward_skb);
1745 * dev_forward_skb - loopback an skb to another netif
1747 * @dev: destination network device
1748 * @skb: buffer to forward
1751 * NET_RX_SUCCESS (no congestion)
1752 * NET_RX_DROP (packet was dropped, but freed)
1754 * dev_forward_skb can be used for injecting an skb from the
1755 * start_xmit function of one device into the receive queue
1756 * of another device.
1758 * The receiving device may be in another namespace, so
1759 * we have to clear all information in the skb that could
1760 * impact namespace isolation.
1762 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1764 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
1766 EXPORT_SYMBOL_GPL(dev_forward_skb);
1768 static inline int deliver_skb(struct sk_buff *skb,
1769 struct packet_type *pt_prev,
1770 struct net_device *orig_dev)
1772 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1774 atomic_inc(&skb->users);
1775 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1778 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1779 struct packet_type **pt,
1780 struct net_device *orig_dev,
1782 struct list_head *ptype_list)
1784 struct packet_type *ptype, *pt_prev = *pt;
1786 list_for_each_entry_rcu(ptype, ptype_list, list) {
1787 if (ptype->type != type)
1790 deliver_skb(skb, pt_prev, orig_dev);
1796 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1798 if (!ptype->af_packet_priv || !skb->sk)
1801 if (ptype->id_match)
1802 return ptype->id_match(ptype, skb->sk);
1803 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1810 * Support routine. Sends outgoing frames to any network
1811 * taps currently in use.
1814 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1816 struct packet_type *ptype;
1817 struct sk_buff *skb2 = NULL;
1818 struct packet_type *pt_prev = NULL;
1819 struct list_head *ptype_list = &ptype_all;
1823 list_for_each_entry_rcu(ptype, ptype_list, list) {
1824 /* Never send packets back to the socket
1827 if (skb_loop_sk(ptype, skb))
1831 deliver_skb(skb2, pt_prev, skb->dev);
1836 /* need to clone skb, done only once */
1837 skb2 = skb_clone(skb, GFP_ATOMIC);
1841 net_timestamp_set(skb2);
1843 /* skb->nh should be correctly
1844 * set by sender, so that the second statement is
1845 * just protection against buggy protocols.
1847 skb_reset_mac_header(skb2);
1849 if (skb_network_header(skb2) < skb2->data ||
1850 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1851 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1852 ntohs(skb2->protocol),
1854 skb_reset_network_header(skb2);
1857 skb2->transport_header = skb2->network_header;
1858 skb2->pkt_type = PACKET_OUTGOING;
1862 if (ptype_list == &ptype_all) {
1863 ptype_list = &dev->ptype_all;
1868 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1873 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1874 * @dev: Network device
1875 * @txq: number of queues available
1877 * If real_num_tx_queues is changed the tc mappings may no longer be
1878 * valid. To resolve this verify the tc mapping remains valid and if
1879 * not NULL the mapping. With no priorities mapping to this
1880 * offset/count pair it will no longer be used. In the worst case TC0
1881 * is invalid nothing can be done so disable priority mappings. If is
1882 * expected that drivers will fix this mapping if they can before
1883 * calling netif_set_real_num_tx_queues.
1885 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1888 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1890 /* If TC0 is invalidated disable TC mapping */
1891 if (tc->offset + tc->count > txq) {
1892 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1897 /* Invalidated prio to tc mappings set to TC0 */
1898 for (i = 1; i < TC_BITMASK + 1; i++) {
1899 int q = netdev_get_prio_tc_map(dev, i);
1901 tc = &dev->tc_to_txq[q];
1902 if (tc->offset + tc->count > txq) {
1903 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1905 netdev_set_prio_tc_map(dev, i, 0);
1911 static DEFINE_MUTEX(xps_map_mutex);
1912 #define xmap_dereference(P) \
1913 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1915 static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1918 struct xps_map *map = NULL;
1922 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1924 for (pos = 0; map && pos < map->len; pos++) {
1925 if (map->queues[pos] == index) {
1927 map->queues[pos] = map->queues[--map->len];
1929 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1930 kfree_rcu(map, rcu);
1940 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
1942 struct xps_dev_maps *dev_maps;
1944 bool active = false;
1946 mutex_lock(&xps_map_mutex);
1947 dev_maps = xmap_dereference(dev->xps_maps);
1952 for_each_possible_cpu(cpu) {
1953 for (i = index; i < dev->num_tx_queues; i++) {
1954 if (!remove_xps_queue(dev_maps, cpu, i))
1957 if (i == dev->num_tx_queues)
1962 RCU_INIT_POINTER(dev->xps_maps, NULL);
1963 kfree_rcu(dev_maps, rcu);
1966 for (i = index; i < dev->num_tx_queues; i++)
1967 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1971 mutex_unlock(&xps_map_mutex);
1974 static struct xps_map *expand_xps_map(struct xps_map *map,
1977 struct xps_map *new_map;
1978 int alloc_len = XPS_MIN_MAP_ALLOC;
1981 for (pos = 0; map && pos < map->len; pos++) {
1982 if (map->queues[pos] != index)
1987 /* Need to add queue to this CPU's existing map */
1989 if (pos < map->alloc_len)
1992 alloc_len = map->alloc_len * 2;
1995 /* Need to allocate new map to store queue on this CPU's map */
1996 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2001 for (i = 0; i < pos; i++)
2002 new_map->queues[i] = map->queues[i];
2003 new_map->alloc_len = alloc_len;
2009 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2012 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
2013 struct xps_map *map, *new_map;
2014 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
2015 int cpu, numa_node_id = -2;
2016 bool active = false;
2018 mutex_lock(&xps_map_mutex);
2020 dev_maps = xmap_dereference(dev->xps_maps);
2022 /* allocate memory for queue storage */
2023 for_each_online_cpu(cpu) {
2024 if (!cpumask_test_cpu(cpu, mask))
2028 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2029 if (!new_dev_maps) {
2030 mutex_unlock(&xps_map_mutex);
2034 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2037 map = expand_xps_map(map, cpu, index);
2041 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2045 goto out_no_new_maps;
2047 for_each_possible_cpu(cpu) {
2048 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2049 /* add queue to CPU maps */
2052 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2053 while ((pos < map->len) && (map->queues[pos] != index))
2056 if (pos == map->len)
2057 map->queues[map->len++] = index;
2059 if (numa_node_id == -2)
2060 numa_node_id = cpu_to_node(cpu);
2061 else if (numa_node_id != cpu_to_node(cpu))
2064 } else if (dev_maps) {
2065 /* fill in the new device map from the old device map */
2066 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2067 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2072 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2074 /* Cleanup old maps */
2076 for_each_possible_cpu(cpu) {
2077 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2078 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2079 if (map && map != new_map)
2080 kfree_rcu(map, rcu);
2083 kfree_rcu(dev_maps, rcu);
2086 dev_maps = new_dev_maps;
2090 /* update Tx queue numa node */
2091 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2092 (numa_node_id >= 0) ? numa_node_id :
2098 /* removes queue from unused CPUs */
2099 for_each_possible_cpu(cpu) {
2100 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2103 if (remove_xps_queue(dev_maps, cpu, index))
2107 /* free map if not active */
2109 RCU_INIT_POINTER(dev->xps_maps, NULL);
2110 kfree_rcu(dev_maps, rcu);
2114 mutex_unlock(&xps_map_mutex);
2118 /* remove any maps that we added */
2119 for_each_possible_cpu(cpu) {
2120 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2121 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2123 if (new_map && new_map != map)
2127 mutex_unlock(&xps_map_mutex);
2129 kfree(new_dev_maps);
2132 EXPORT_SYMBOL(netif_set_xps_queue);
2136 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2137 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2139 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2143 if (txq < 1 || txq > dev->num_tx_queues)
2146 if (dev->reg_state == NETREG_REGISTERED ||
2147 dev->reg_state == NETREG_UNREGISTERING) {
2150 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2156 netif_setup_tc(dev, txq);
2158 if (txq < dev->real_num_tx_queues) {
2159 qdisc_reset_all_tx_gt(dev, txq);
2161 netif_reset_xps_queues_gt(dev, txq);
2166 dev->real_num_tx_queues = txq;
2169 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2173 * netif_set_real_num_rx_queues - set actual number of RX queues used
2174 * @dev: Network device
2175 * @rxq: Actual number of RX queues
2177 * This must be called either with the rtnl_lock held or before
2178 * registration of the net device. Returns 0 on success, or a
2179 * negative error code. If called before registration, it always
2182 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2186 if (rxq < 1 || rxq > dev->num_rx_queues)
2189 if (dev->reg_state == NETREG_REGISTERED) {
2192 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2198 dev->real_num_rx_queues = rxq;
2201 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2205 * netif_get_num_default_rss_queues - default number of RSS queues
2207 * This routine should set an upper limit on the number of RSS queues
2208 * used by default by multiqueue devices.
2210 int netif_get_num_default_rss_queues(void)
2212 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2214 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2216 static inline void __netif_reschedule(struct Qdisc *q)
2218 struct softnet_data *sd;
2219 unsigned long flags;
2221 local_irq_save(flags);
2222 sd = this_cpu_ptr(&softnet_data);
2223 q->next_sched = NULL;
2224 *sd->output_queue_tailp = q;
2225 sd->output_queue_tailp = &q->next_sched;
2226 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2227 local_irq_restore(flags);
2230 void __netif_schedule(struct Qdisc *q)
2232 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2233 __netif_reschedule(q);
2235 EXPORT_SYMBOL(__netif_schedule);
2237 struct dev_kfree_skb_cb {
2238 enum skb_free_reason reason;
2241 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2243 return (struct dev_kfree_skb_cb *)skb->cb;
2246 void netif_schedule_queue(struct netdev_queue *txq)
2249 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2250 struct Qdisc *q = rcu_dereference(txq->qdisc);
2252 __netif_schedule(q);
2256 EXPORT_SYMBOL(netif_schedule_queue);
2259 * netif_wake_subqueue - allow sending packets on subqueue
2260 * @dev: network device
2261 * @queue_index: sub queue index
2263 * Resume individual transmit queue of a device with multiple transmit queues.
2265 void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2267 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2269 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
2273 q = rcu_dereference(txq->qdisc);
2274 __netif_schedule(q);
2278 EXPORT_SYMBOL(netif_wake_subqueue);
2280 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2282 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2286 q = rcu_dereference(dev_queue->qdisc);
2287 __netif_schedule(q);
2291 EXPORT_SYMBOL(netif_tx_wake_queue);
2293 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2295 unsigned long flags;
2297 if (likely(atomic_read(&skb->users) == 1)) {
2299 atomic_set(&skb->users, 0);
2300 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2303 get_kfree_skb_cb(skb)->reason = reason;
2304 local_irq_save(flags);
2305 skb->next = __this_cpu_read(softnet_data.completion_queue);
2306 __this_cpu_write(softnet_data.completion_queue, skb);
2307 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2308 local_irq_restore(flags);
2310 EXPORT_SYMBOL(__dev_kfree_skb_irq);
2312 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2314 if (in_irq() || irqs_disabled())
2315 __dev_kfree_skb_irq(skb, reason);
2319 EXPORT_SYMBOL(__dev_kfree_skb_any);
2323 * netif_device_detach - mark device as removed
2324 * @dev: network device
2326 * Mark device as removed from system and therefore no longer available.
2328 void netif_device_detach(struct net_device *dev)
2330 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2331 netif_running(dev)) {
2332 netif_tx_stop_all_queues(dev);
2335 EXPORT_SYMBOL(netif_device_detach);
2338 * netif_device_attach - mark device as attached
2339 * @dev: network device
2341 * Mark device as attached from system and restart if needed.
2343 void netif_device_attach(struct net_device *dev)
2345 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2346 netif_running(dev)) {
2347 netif_tx_wake_all_queues(dev);
2348 __netdev_watchdog_up(dev);
2351 EXPORT_SYMBOL(netif_device_attach);
2353 static void skb_warn_bad_offload(const struct sk_buff *skb)
2355 static const netdev_features_t null_features = 0;
2356 struct net_device *dev = skb->dev;
2357 const char *driver = "";
2359 if (!net_ratelimit())
2362 if (dev && dev->dev.parent)
2363 driver = dev_driver_string(dev->dev.parent);
2365 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2366 "gso_type=%d ip_summed=%d\n",
2367 driver, dev ? &dev->features : &null_features,
2368 skb->sk ? &skb->sk->sk_route_caps : &null_features,
2369 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2370 skb_shinfo(skb)->gso_type, skb->ip_summed);
2374 * Invalidate hardware checksum when packet is to be mangled, and
2375 * complete checksum manually on outgoing path.
2377 int skb_checksum_help(struct sk_buff *skb)
2380 int ret = 0, offset;
2382 if (skb->ip_summed == CHECKSUM_COMPLETE)
2383 goto out_set_summed;
2385 if (unlikely(skb_shinfo(skb)->gso_size)) {
2386 skb_warn_bad_offload(skb);
2390 /* Before computing a checksum, we should make sure no frag could
2391 * be modified by an external entity : checksum could be wrong.
2393 if (skb_has_shared_frag(skb)) {
2394 ret = __skb_linearize(skb);
2399 offset = skb_checksum_start_offset(skb);
2400 BUG_ON(offset >= skb_headlen(skb));
2401 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2403 offset += skb->csum_offset;
2404 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2406 if (skb_cloned(skb) &&
2407 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2408 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2413 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2415 skb->ip_summed = CHECKSUM_NONE;
2419 EXPORT_SYMBOL(skb_checksum_help);
2421 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2423 __be16 type = skb->protocol;
2425 /* Tunnel gso handlers can set protocol to ethernet. */
2426 if (type == htons(ETH_P_TEB)) {
2429 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2432 eth = (struct ethhdr *)skb_mac_header(skb);
2433 type = eth->h_proto;
2436 return __vlan_get_protocol(skb, type, depth);
2440 * skb_mac_gso_segment - mac layer segmentation handler.
2441 * @skb: buffer to segment
2442 * @features: features for the output path (see dev->features)
2444 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2445 netdev_features_t features)
2447 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2448 struct packet_offload *ptype;
2449 int vlan_depth = skb->mac_len;
2450 __be16 type = skb_network_protocol(skb, &vlan_depth);
2452 if (unlikely(!type))
2453 return ERR_PTR(-EINVAL);
2455 __skb_pull(skb, vlan_depth);
2458 list_for_each_entry_rcu(ptype, &offload_base, list) {
2459 if (ptype->type == type && ptype->callbacks.gso_segment) {
2460 segs = ptype->callbacks.gso_segment(skb, features);
2466 __skb_push(skb, skb->data - skb_mac_header(skb));
2470 EXPORT_SYMBOL(skb_mac_gso_segment);
2473 /* openvswitch calls this on rx path, so we need a different check.
2475 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2478 return skb->ip_summed != CHECKSUM_PARTIAL;
2480 return skb->ip_summed == CHECKSUM_NONE;
2484 * __skb_gso_segment - Perform segmentation on skb.
2485 * @skb: buffer to segment
2486 * @features: features for the output path (see dev->features)
2487 * @tx_path: whether it is called in TX path
2489 * This function segments the given skb and returns a list of segments.
2491 * It may return NULL if the skb requires no segmentation. This is
2492 * only possible when GSO is used for verifying header integrity.
2494 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2495 netdev_features_t features, bool tx_path)
2497 if (unlikely(skb_needs_check(skb, tx_path))) {
2500 skb_warn_bad_offload(skb);
2502 err = skb_cow_head(skb, 0);
2504 return ERR_PTR(err);
2507 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
2508 SKB_GSO_CB(skb)->encap_level = 0;
2510 skb_reset_mac_header(skb);
2511 skb_reset_mac_len(skb);
2513 return skb_mac_gso_segment(skb, features);
2515 EXPORT_SYMBOL(__skb_gso_segment);
2517 /* Take action when hardware reception checksum errors are detected. */
2519 void netdev_rx_csum_fault(struct net_device *dev)
2521 if (net_ratelimit()) {
2522 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2526 EXPORT_SYMBOL(netdev_rx_csum_fault);
2529 /* Actually, we should eliminate this check as soon as we know, that:
2530 * 1. IOMMU is present and allows to map all the memory.
2531 * 2. No high memory really exists on this machine.
2534 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2536 #ifdef CONFIG_HIGHMEM
2538 if (!(dev->features & NETIF_F_HIGHDMA)) {
2539 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2540 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2541 if (PageHighMem(skb_frag_page(frag)))
2546 if (PCI_DMA_BUS_IS_PHYS) {
2547 struct device *pdev = dev->dev.parent;
2551 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2552 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2553 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2554 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2562 /* If MPLS offload request, verify we are testing hardware MPLS features
2563 * instead of standard features for the netdev.
2565 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
2566 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2567 netdev_features_t features,
2570 if (eth_p_mpls(type))
2571 features &= skb->dev->mpls_features;
2576 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2577 netdev_features_t features,
2584 static netdev_features_t harmonize_features(struct sk_buff *skb,
2585 netdev_features_t features)
2590 type = skb_network_protocol(skb, &tmp);
2591 features = net_mpls_features(skb, features, type);
2593 if (skb->ip_summed != CHECKSUM_NONE &&
2594 !can_checksum_protocol(features, type)) {
2595 features &= ~NETIF_F_ALL_CSUM;
2596 } else if (illegal_highdma(skb->dev, skb)) {
2597 features &= ~NETIF_F_SG;
2603 netdev_features_t passthru_features_check(struct sk_buff *skb,
2604 struct net_device *dev,
2605 netdev_features_t features)
2609 EXPORT_SYMBOL(passthru_features_check);
2611 static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2612 struct net_device *dev,
2613 netdev_features_t features)
2615 return vlan_features_check(skb, features);
2618 netdev_features_t netif_skb_features(struct sk_buff *skb)
2620 struct net_device *dev = skb->dev;
2621 netdev_features_t features = dev->features;
2622 u16 gso_segs = skb_shinfo(skb)->gso_segs;
2624 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
2625 features &= ~NETIF_F_GSO_MASK;
2627 /* If encapsulation offload request, verify we are testing
2628 * hardware encapsulation features instead of standard
2629 * features for the netdev
2631 if (skb->encapsulation)
2632 features &= dev->hw_enc_features;
2634 if (skb_vlan_tagged(skb))
2635 features = netdev_intersect_features(features,
2636 dev->vlan_features |
2637 NETIF_F_HW_VLAN_CTAG_TX |
2638 NETIF_F_HW_VLAN_STAG_TX);
2640 if (dev->netdev_ops->ndo_features_check)
2641 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2644 features &= dflt_features_check(skb, dev, features);
2646 return harmonize_features(skb, features);
2648 EXPORT_SYMBOL(netif_skb_features);
2650 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
2651 struct netdev_queue *txq, bool more)
2656 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
2657 dev_queue_xmit_nit(skb, dev);
2660 trace_net_dev_start_xmit(skb, dev);
2661 rc = netdev_start_xmit(skb, dev, txq, more);
2662 trace_net_dev_xmit(skb, rc, dev, len);
2667 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2668 struct netdev_queue *txq, int *ret)
2670 struct sk_buff *skb = first;
2671 int rc = NETDEV_TX_OK;
2674 struct sk_buff *next = skb->next;
2677 rc = xmit_one(skb, dev, txq, next != NULL);
2678 if (unlikely(!dev_xmit_complete(rc))) {
2684 if (netif_xmit_stopped(txq) && skb) {
2685 rc = NETDEV_TX_BUSY;
2695 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2696 netdev_features_t features)
2698 if (skb_vlan_tag_present(skb) &&
2699 !vlan_hw_offload_capable(features, skb->vlan_proto))
2700 skb = __vlan_hwaccel_push_inside(skb);
2704 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
2706 netdev_features_t features;
2711 features = netif_skb_features(skb);
2712 skb = validate_xmit_vlan(skb, features);
2716 if (netif_needs_gso(skb, features)) {
2717 struct sk_buff *segs;
2719 segs = skb_gso_segment(skb, features);
2727 if (skb_needs_linearize(skb, features) &&
2728 __skb_linearize(skb))
2731 /* If packet is not checksummed and device does not
2732 * support checksumming for this protocol, complete
2733 * checksumming here.
2735 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2736 if (skb->encapsulation)
2737 skb_set_inner_transport_header(skb,
2738 skb_checksum_start_offset(skb));
2740 skb_set_transport_header(skb,
2741 skb_checksum_start_offset(skb));
2742 if (!(features & NETIF_F_ALL_CSUM) &&
2743 skb_checksum_help(skb))
2756 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2758 struct sk_buff *next, *head = NULL, *tail;
2760 for (; skb != NULL; skb = next) {
2764 /* in case skb wont be segmented, point to itself */
2767 skb = validate_xmit_skb(skb, dev);
2775 /* If skb was segmented, skb->prev points to
2776 * the last segment. If not, it still contains skb.
2783 static void qdisc_pkt_len_init(struct sk_buff *skb)
2785 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2787 qdisc_skb_cb(skb)->pkt_len = skb->len;
2789 /* To get more precise estimation of bytes sent on wire,
2790 * we add to pkt_len the headers size of all segments
2792 if (shinfo->gso_size) {
2793 unsigned int hdr_len;
2794 u16 gso_segs = shinfo->gso_segs;
2796 /* mac layer + network layer */
2797 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2799 /* + transport layer */
2800 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2801 hdr_len += tcp_hdrlen(skb);
2803 hdr_len += sizeof(struct udphdr);
2805 if (shinfo->gso_type & SKB_GSO_DODGY)
2806 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2809 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
2813 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2814 struct net_device *dev,
2815 struct netdev_queue *txq)
2817 spinlock_t *root_lock = qdisc_lock(q);
2821 qdisc_pkt_len_init(skb);
2822 qdisc_calculate_pkt_len(skb, q);
2824 * Heuristic to force contended enqueues to serialize on a
2825 * separate lock before trying to get qdisc main lock.
2826 * This permits __QDISC___STATE_RUNNING owner to get the lock more
2827 * often and dequeue packets faster.
2829 contended = qdisc_is_running(q);
2830 if (unlikely(contended))
2831 spin_lock(&q->busylock);
2833 spin_lock(root_lock);
2834 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2837 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2838 qdisc_run_begin(q)) {
2840 * This is a work-conserving queue; there are no old skbs
2841 * waiting to be sent out; and the qdisc is not running -
2842 * xmit the skb directly.
2845 qdisc_bstats_update(q, skb);
2847 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
2848 if (unlikely(contended)) {
2849 spin_unlock(&q->busylock);
2856 rc = NET_XMIT_SUCCESS;
2858 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2859 if (qdisc_run_begin(q)) {
2860 if (unlikely(contended)) {
2861 spin_unlock(&q->busylock);
2867 spin_unlock(root_lock);
2868 if (unlikely(contended))
2869 spin_unlock(&q->busylock);
2873 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2874 static void skb_update_prio(struct sk_buff *skb)
2876 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2878 if (!skb->priority && skb->sk && map) {
2879 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2881 if (prioidx < map->priomap_len)
2882 skb->priority = map->priomap[prioidx];
2886 #define skb_update_prio(skb)
2889 DEFINE_PER_CPU(int, xmit_recursion);
2890 EXPORT_SYMBOL(xmit_recursion);
2892 #define RECURSION_LIMIT 10
2895 * dev_loopback_xmit - loop back @skb
2896 * @skb: buffer to transmit
2898 int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb)
2900 skb_reset_mac_header(skb);
2901 __skb_pull(skb, skb_network_offset(skb));
2902 skb->pkt_type = PACKET_LOOPBACK;
2903 skb->ip_summed = CHECKSUM_UNNECESSARY;
2904 WARN_ON(!skb_dst(skb));
2909 EXPORT_SYMBOL(dev_loopback_xmit);
2912 * __dev_queue_xmit - transmit a buffer
2913 * @skb: buffer to transmit
2914 * @accel_priv: private data used for L2 forwarding offload
2916 * Queue a buffer for transmission to a network device. The caller must
2917 * have set the device and priority and built the buffer before calling
2918 * this function. The function can be called from an interrupt.
2920 * A negative errno code is returned on a failure. A success does not
2921 * guarantee the frame will be transmitted as it may be dropped due
2922 * to congestion or traffic shaping.
2924 * -----------------------------------------------------------------------------------
2925 * I notice this method can also return errors from the queue disciplines,
2926 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2929 * Regardless of the return value, the skb is consumed, so it is currently
2930 * difficult to retry a send to this method. (You can bump the ref count
2931 * before sending to hold a reference for retry if you are careful.)
2933 * When calling this method, interrupts MUST be enabled. This is because
2934 * the BH enable code must have IRQs enabled so that it will not deadlock.
2937 static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
2939 struct net_device *dev = skb->dev;
2940 struct netdev_queue *txq;
2944 skb_reset_mac_header(skb);
2946 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
2947 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
2949 /* Disable soft irqs for various locks below. Also
2950 * stops preemption for RCU.
2954 skb_update_prio(skb);
2956 /* If device/qdisc don't need skb->dst, release it right now while
2957 * its hot in this cpu cache.
2959 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2964 txq = netdev_pick_tx(dev, skb, accel_priv);
2965 q = rcu_dereference_bh(txq->qdisc);
2967 #ifdef CONFIG_NET_CLS_ACT
2968 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2970 trace_net_dev_queue(skb);
2972 rc = __dev_xmit_skb(skb, q, dev, txq);
2976 /* The device has no queue. Common case for software devices:
2977 loopback, all the sorts of tunnels...
2979 Really, it is unlikely that netif_tx_lock protection is necessary
2980 here. (f.e. loopback and IP tunnels are clean ignoring statistics
2982 However, it is possible, that they rely on protection
2985 Check this and shot the lock. It is not prone from deadlocks.
2986 Either shot noqueue qdisc, it is even simpler 8)
2988 if (dev->flags & IFF_UP) {
2989 int cpu = smp_processor_id(); /* ok because BHs are off */
2991 if (txq->xmit_lock_owner != cpu) {
2993 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2994 goto recursion_alert;
2996 skb = validate_xmit_skb(skb, dev);
3000 HARD_TX_LOCK(dev, txq, cpu);
3002 if (!netif_xmit_stopped(txq)) {
3003 __this_cpu_inc(xmit_recursion);
3004 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
3005 __this_cpu_dec(xmit_recursion);
3006 if (dev_xmit_complete(rc)) {
3007 HARD_TX_UNLOCK(dev, txq);
3011 HARD_TX_UNLOCK(dev, txq);
3012 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3015 /* Recursion is detected! It is possible,
3019 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3026 rcu_read_unlock_bh();
3028 atomic_long_inc(&dev->tx_dropped);
3029 kfree_skb_list(skb);
3032 rcu_read_unlock_bh();
3036 int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb)
3038 return __dev_queue_xmit(skb, NULL);
3040 EXPORT_SYMBOL(dev_queue_xmit_sk);
3042 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3044 return __dev_queue_xmit(skb, accel_priv);
3046 EXPORT_SYMBOL(dev_queue_xmit_accel);
3049 /*=======================================================================
3051 =======================================================================*/
3053 int netdev_max_backlog __read_mostly = 1000;
3054 EXPORT_SYMBOL(netdev_max_backlog);
3056 int netdev_tstamp_prequeue __read_mostly = 1;
3057 int netdev_budget __read_mostly = 300;
3058 int weight_p __read_mostly = 64; /* old backlog weight */
3060 /* Called with irq disabled */
3061 static inline void ____napi_schedule(struct softnet_data *sd,
3062 struct napi_struct *napi)
3064 list_add_tail(&napi->poll_list, &sd->poll_list);
3065 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3070 /* One global table that all flow-based protocols share. */
3071 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
3072 EXPORT_SYMBOL(rps_sock_flow_table);
3073 u32 rps_cpu_mask __read_mostly;
3074 EXPORT_SYMBOL(rps_cpu_mask);
3076 struct static_key rps_needed __read_mostly;
3078 static struct rps_dev_flow *
3079 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3080 struct rps_dev_flow *rflow, u16 next_cpu)
3082 if (next_cpu < nr_cpu_ids) {
3083 #ifdef CONFIG_RFS_ACCEL
3084 struct netdev_rx_queue *rxqueue;
3085 struct rps_dev_flow_table *flow_table;
3086 struct rps_dev_flow *old_rflow;
3091 /* Should we steer this flow to a different hardware queue? */
3092 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3093 !(dev->features & NETIF_F_NTUPLE))
3095 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3096 if (rxq_index == skb_get_rx_queue(skb))
3099 rxqueue = dev->_rx + rxq_index;
3100 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3103 flow_id = skb_get_hash(skb) & flow_table->mask;
3104 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3105 rxq_index, flow_id);
3109 rflow = &flow_table->flows[flow_id];
3111 if (old_rflow->filter == rflow->filter)
3112 old_rflow->filter = RPS_NO_FILTER;
3116 per_cpu(softnet_data, next_cpu).input_queue_head;
3119 rflow->cpu = next_cpu;
3124 * get_rps_cpu is called from netif_receive_skb and returns the target
3125 * CPU from the RPS map of the receiving queue for a given skb.
3126 * rcu_read_lock must be held on entry.
3128 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3129 struct rps_dev_flow **rflowp)
3131 const struct rps_sock_flow_table *sock_flow_table;
3132 struct netdev_rx_queue *rxqueue = dev->_rx;
3133 struct rps_dev_flow_table *flow_table;
3134 struct rps_map *map;
3139 if (skb_rx_queue_recorded(skb)) {
3140 u16 index = skb_get_rx_queue(skb);
3142 if (unlikely(index >= dev->real_num_rx_queues)) {
3143 WARN_ONCE(dev->real_num_rx_queues > 1,
3144 "%s received packet on queue %u, but number "
3145 "of RX queues is %u\n",
3146 dev->name, index, dev->real_num_rx_queues);
3152 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3154 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3155 map = rcu_dereference(rxqueue->rps_map);
3156 if (!flow_table && !map)
3159 skb_reset_network_header(skb);
3160 hash = skb_get_hash(skb);
3164 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3165 if (flow_table && sock_flow_table) {
3166 struct rps_dev_flow *rflow;
3170 /* First check into global flow table if there is a match */
3171 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3172 if ((ident ^ hash) & ~rps_cpu_mask)
3175 next_cpu = ident & rps_cpu_mask;
3177 /* OK, now we know there is a match,
3178 * we can look at the local (per receive queue) flow table
3180 rflow = &flow_table->flows[hash & flow_table->mask];
3184 * If the desired CPU (where last recvmsg was done) is
3185 * different from current CPU (one in the rx-queue flow
3186 * table entry), switch if one of the following holds:
3187 * - Current CPU is unset (>= nr_cpu_ids).
3188 * - Current CPU is offline.
3189 * - The current CPU's queue tail has advanced beyond the
3190 * last packet that was enqueued using this table entry.
3191 * This guarantees that all previous packets for the flow
3192 * have been dequeued, thus preserving in order delivery.
3194 if (unlikely(tcpu != next_cpu) &&
3195 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
3196 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
3197 rflow->last_qtail)) >= 0)) {
3199 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
3202 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
3212 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
3213 if (cpu_online(tcpu)) {
3223 #ifdef CONFIG_RFS_ACCEL
3226 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3227 * @dev: Device on which the filter was set
3228 * @rxq_index: RX queue index
3229 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3230 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3232 * Drivers that implement ndo_rx_flow_steer() should periodically call
3233 * this function for each installed filter and remove the filters for
3234 * which it returns %true.
3236 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3237 u32 flow_id, u16 filter_id)
3239 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3240 struct rps_dev_flow_table *flow_table;
3241 struct rps_dev_flow *rflow;
3246 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3247 if (flow_table && flow_id <= flow_table->mask) {
3248 rflow = &flow_table->flows[flow_id];
3249 cpu = ACCESS_ONCE(rflow->cpu);
3250 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
3251 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3252 rflow->last_qtail) <
3253 (int)(10 * flow_table->mask)))
3259 EXPORT_SYMBOL(rps_may_expire_flow);
3261 #endif /* CONFIG_RFS_ACCEL */
3263 /* Called from hardirq (IPI) context */
3264 static void rps_trigger_softirq(void *data)
3266 struct softnet_data *sd = data;
3268 ____napi_schedule(sd, &sd->backlog);
3272 #endif /* CONFIG_RPS */
3275 * Check if this softnet_data structure is another cpu one
3276 * If yes, queue it to our IPI list and return 1
3279 static int rps_ipi_queued(struct softnet_data *sd)
3282 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
3285 sd->rps_ipi_next = mysd->rps_ipi_list;
3286 mysd->rps_ipi_list = sd;
3288 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3291 #endif /* CONFIG_RPS */
3295 #ifdef CONFIG_NET_FLOW_LIMIT
3296 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3299 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3301 #ifdef CONFIG_NET_FLOW_LIMIT
3302 struct sd_flow_limit *fl;
3303 struct softnet_data *sd;
3304 unsigned int old_flow, new_flow;
3306 if (qlen < (netdev_max_backlog >> 1))
3309 sd = this_cpu_ptr(&softnet_data);
3312 fl = rcu_dereference(sd->flow_limit);
3314 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
3315 old_flow = fl->history[fl->history_head];
3316 fl->history[fl->history_head] = new_flow;
3319 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3321 if (likely(fl->buckets[old_flow]))
3322 fl->buckets[old_flow]--;
3324 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3336 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3337 * queue (may be a remote CPU queue).
3339 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3340 unsigned int *qtail)
3342 struct softnet_data *sd;
3343 unsigned long flags;
3346 sd = &per_cpu(softnet_data, cpu);
3348 local_irq_save(flags);
3351 qlen = skb_queue_len(&sd->input_pkt_queue);
3352 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
3355 __skb_queue_tail(&sd->input_pkt_queue, skb);
3356 input_queue_tail_incr_save(sd, qtail);
3358 local_irq_restore(flags);
3359 return NET_RX_SUCCESS;
3362 /* Schedule NAPI for backlog device
3363 * We can use non atomic operation since we own the queue lock
3365 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
3366 if (!rps_ipi_queued(sd))
3367 ____napi_schedule(sd, &sd->backlog);
3375 local_irq_restore(flags);
3377 atomic_long_inc(&skb->dev->rx_dropped);
3382 static int netif_rx_internal(struct sk_buff *skb)
3386 net_timestamp_check(netdev_tstamp_prequeue, skb);
3388 trace_netif_rx(skb);
3390 if (static_key_false(&rps_needed)) {
3391 struct rps_dev_flow voidflow, *rflow = &voidflow;
3397 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3399 cpu = smp_processor_id();
3401 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3409 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3416 * netif_rx - post buffer to the network code
3417 * @skb: buffer to post
3419 * This function receives a packet from a device driver and queues it for
3420 * the upper (protocol) levels to process. It always succeeds. The buffer
3421 * may be dropped during processing for congestion control or by the
3425 * NET_RX_SUCCESS (no congestion)
3426 * NET_RX_DROP (packet was dropped)
3430 int netif_rx(struct sk_buff *skb)
3432 trace_netif_rx_entry(skb);
3434 return netif_rx_internal(skb);
3436 EXPORT_SYMBOL(netif_rx);
3438 int netif_rx_ni(struct sk_buff *skb)
3442 trace_netif_rx_ni_entry(skb);
3445 err = netif_rx_internal(skb);
3446 if (local_softirq_pending())
3452 EXPORT_SYMBOL(netif_rx_ni);
3454 static void net_tx_action(struct softirq_action *h)
3456 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3458 if (sd->completion_queue) {
3459 struct sk_buff *clist;
3461 local_irq_disable();
3462 clist = sd->completion_queue;
3463 sd->completion_queue = NULL;
3467 struct sk_buff *skb = clist;
3468 clist = clist->next;
3470 WARN_ON(atomic_read(&skb->users));
3471 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3472 trace_consume_skb(skb);
3474 trace_kfree_skb(skb, net_tx_action);
3479 if (sd->output_queue) {
3482 local_irq_disable();
3483 head = sd->output_queue;
3484 sd->output_queue = NULL;
3485 sd->output_queue_tailp = &sd->output_queue;
3489 struct Qdisc *q = head;
3490 spinlock_t *root_lock;
3492 head = head->next_sched;
3494 root_lock = qdisc_lock(q);
3495 if (spin_trylock(root_lock)) {
3496 smp_mb__before_atomic();
3497 clear_bit(__QDISC_STATE_SCHED,
3500 spin_unlock(root_lock);
3502 if (!test_bit(__QDISC_STATE_DEACTIVATED,
3504 __netif_reschedule(q);
3506 smp_mb__before_atomic();
3507 clear_bit(__QDISC_STATE_SCHED,
3515 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3516 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3517 /* This hook is defined here for ATM LANE */
3518 int (*br_fdb_test_addr_hook)(struct net_device *dev,
3519 unsigned char *addr) __read_mostly;
3520 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3523 #ifdef CONFIG_NET_CLS_ACT
3524 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3525 struct packet_type **pt_prev,
3526 int *ret, struct net_device *orig_dev)
3528 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3531 /* If there's at least one ingress present somewhere (so
3532 * we get here via enabled static key), remaining devices
3533 * that are not configured with an ingress qdisc will bail
3534 * out w/o the rcu_dereference().
3536 if (!rxq || (q = rcu_dereference(rxq->qdisc)) == &noop_qdisc)
3540 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3544 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3546 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3547 switch (qdisc_enqueue_root(skb, q)) {
3560 * netdev_rx_handler_register - register receive handler
3561 * @dev: device to register a handler for
3562 * @rx_handler: receive handler to register
3563 * @rx_handler_data: data pointer that is used by rx handler
3565 * Register a receive handler for a device. This handler will then be
3566 * called from __netif_receive_skb. A negative errno code is returned
3569 * The caller must hold the rtnl_mutex.
3571 * For a general description of rx_handler, see enum rx_handler_result.
3573 int netdev_rx_handler_register(struct net_device *dev,
3574 rx_handler_func_t *rx_handler,
3575 void *rx_handler_data)
3579 if (dev->rx_handler)
3582 /* Note: rx_handler_data must be set before rx_handler */
3583 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3584 rcu_assign_pointer(dev->rx_handler, rx_handler);
3588 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3591 * netdev_rx_handler_unregister - unregister receive handler
3592 * @dev: device to unregister a handler from
3594 * Unregister a receive handler from a device.
3596 * The caller must hold the rtnl_mutex.
3598 void netdev_rx_handler_unregister(struct net_device *dev)
3602 RCU_INIT_POINTER(dev->rx_handler, NULL);
3603 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3604 * section has a guarantee to see a non NULL rx_handler_data
3608 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3610 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3613 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3614 * the special handling of PFMEMALLOC skbs.
3616 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3618 switch (skb->protocol) {
3619 case htons(ETH_P_ARP):
3620 case htons(ETH_P_IP):
3621 case htons(ETH_P_IPV6):
3622 case htons(ETH_P_8021Q):
3623 case htons(ETH_P_8021AD):
3630 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3632 struct packet_type *ptype, *pt_prev;
3633 rx_handler_func_t *rx_handler;
3634 struct net_device *orig_dev;
3635 bool deliver_exact = false;
3636 int ret = NET_RX_DROP;
3639 net_timestamp_check(!netdev_tstamp_prequeue, skb);
3641 trace_netif_receive_skb(skb);
3643 orig_dev = skb->dev;
3645 skb_reset_network_header(skb);
3646 if (!skb_transport_header_was_set(skb))
3647 skb_reset_transport_header(skb);
3648 skb_reset_mac_len(skb);
3655 skb->skb_iif = skb->dev->ifindex;
3657 __this_cpu_inc(softnet_data.processed);
3659 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3660 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
3661 skb = skb_vlan_untag(skb);
3666 #ifdef CONFIG_NET_CLS_ACT
3667 if (skb->tc_verd & TC_NCLS) {
3668 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3676 list_for_each_entry_rcu(ptype, &ptype_all, list) {
3678 ret = deliver_skb(skb, pt_prev, orig_dev);
3682 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
3684 ret = deliver_skb(skb, pt_prev, orig_dev);
3689 #ifdef CONFIG_NET_CLS_ACT
3690 if (static_key_false(&ingress_needed)) {
3691 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3699 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
3702 if (skb_vlan_tag_present(skb)) {
3704 ret = deliver_skb(skb, pt_prev, orig_dev);
3707 if (vlan_do_receive(&skb))
3709 else if (unlikely(!skb))
3713 rx_handler = rcu_dereference(skb->dev->rx_handler);
3716 ret = deliver_skb(skb, pt_prev, orig_dev);
3719 switch (rx_handler(&skb)) {
3720 case RX_HANDLER_CONSUMED:
3721 ret = NET_RX_SUCCESS;
3723 case RX_HANDLER_ANOTHER:
3725 case RX_HANDLER_EXACT:
3726 deliver_exact = true;
3727 case RX_HANDLER_PASS:
3734 if (unlikely(skb_vlan_tag_present(skb))) {
3735 if (skb_vlan_tag_get_id(skb))
3736 skb->pkt_type = PACKET_OTHERHOST;
3737 /* Note: we might in the future use prio bits
3738 * and set skb->priority like in vlan_do_receive()
3739 * For the time being, just ignore Priority Code Point
3744 type = skb->protocol;
3746 /* deliver only exact match when indicated */
3747 if (likely(!deliver_exact)) {
3748 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3749 &ptype_base[ntohs(type) &
3753 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3754 &orig_dev->ptype_specific);
3756 if (unlikely(skb->dev != orig_dev)) {
3757 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3758 &skb->dev->ptype_specific);
3762 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
3765 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3768 atomic_long_inc(&skb->dev->rx_dropped);
3770 /* Jamal, now you will not able to escape explaining
3771 * me how you were going to use this. :-)
3781 static int __netif_receive_skb(struct sk_buff *skb)
3785 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3786 unsigned long pflags = current->flags;
3789 * PFMEMALLOC skbs are special, they should
3790 * - be delivered to SOCK_MEMALLOC sockets only
3791 * - stay away from userspace
3792 * - have bounded memory usage
3794 * Use PF_MEMALLOC as this saves us from propagating the allocation
3795 * context down to all allocation sites.
3797 current->flags |= PF_MEMALLOC;
3798 ret = __netif_receive_skb_core(skb, true);
3799 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3801 ret = __netif_receive_skb_core(skb, false);
3806 static int netif_receive_skb_internal(struct sk_buff *skb)
3808 net_timestamp_check(netdev_tstamp_prequeue, skb);
3810 if (skb_defer_rx_timestamp(skb))
3811 return NET_RX_SUCCESS;
3814 if (static_key_false(&rps_needed)) {
3815 struct rps_dev_flow voidflow, *rflow = &voidflow;
3820 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3823 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3830 return __netif_receive_skb(skb);
3834 * netif_receive_skb - process receive buffer from network
3835 * @skb: buffer to process
3837 * netif_receive_skb() is the main receive data processing function.
3838 * It always succeeds. The buffer may be dropped during processing
3839 * for congestion control or by the protocol layers.
3841 * This function may only be called from softirq context and interrupts
3842 * should be enabled.
3844 * Return values (usually ignored):
3845 * NET_RX_SUCCESS: no congestion
3846 * NET_RX_DROP: packet was dropped
3848 int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb)
3850 trace_netif_receive_skb_entry(skb);
3852 return netif_receive_skb_internal(skb);
3854 EXPORT_SYMBOL(netif_receive_skb_sk);
3856 /* Network device is going away, flush any packets still pending
3857 * Called with irqs disabled.
3859 static void flush_backlog(void *arg)
3861 struct net_device *dev = arg;
3862 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3863 struct sk_buff *skb, *tmp;
3866 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3867 if (skb->dev == dev) {
3868 __skb_unlink(skb, &sd->input_pkt_queue);
3870 input_queue_head_incr(sd);
3875 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3876 if (skb->dev == dev) {
3877 __skb_unlink(skb, &sd->process_queue);
3879 input_queue_head_incr(sd);
3884 static int napi_gro_complete(struct sk_buff *skb)
3886 struct packet_offload *ptype;
3887 __be16 type = skb->protocol;
3888 struct list_head *head = &offload_base;
3891 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3893 if (NAPI_GRO_CB(skb)->count == 1) {
3894 skb_shinfo(skb)->gso_size = 0;
3899 list_for_each_entry_rcu(ptype, head, list) {
3900 if (ptype->type != type || !ptype->callbacks.gro_complete)
3903 err = ptype->callbacks.gro_complete(skb, 0);
3909 WARN_ON(&ptype->list == head);
3911 return NET_RX_SUCCESS;
3915 return netif_receive_skb_internal(skb);
3918 /* napi->gro_list contains packets ordered by age.
3919 * youngest packets at the head of it.
3920 * Complete skbs in reverse order to reduce latencies.
3922 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
3924 struct sk_buff *skb, *prev = NULL;
3926 /* scan list and build reverse chain */
3927 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3932 for (skb = prev; skb; skb = prev) {
3935 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3939 napi_gro_complete(skb);
3943 napi->gro_list = NULL;
3945 EXPORT_SYMBOL(napi_gro_flush);
3947 static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3950 unsigned int maclen = skb->dev->hard_header_len;
3951 u32 hash = skb_get_hash_raw(skb);
3953 for (p = napi->gro_list; p; p = p->next) {
3954 unsigned long diffs;
3956 NAPI_GRO_CB(p)->flush = 0;
3958 if (hash != skb_get_hash_raw(p)) {
3959 NAPI_GRO_CB(p)->same_flow = 0;
3963 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3964 diffs |= p->vlan_tci ^ skb->vlan_tci;
3965 if (maclen == ETH_HLEN)
3966 diffs |= compare_ether_header(skb_mac_header(p),
3967 skb_mac_header(skb));
3969 diffs = memcmp(skb_mac_header(p),
3970 skb_mac_header(skb),
3972 NAPI_GRO_CB(p)->same_flow = !diffs;
3976 static void skb_gro_reset_offset(struct sk_buff *skb)
3978 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3979 const skb_frag_t *frag0 = &pinfo->frags[0];
3981 NAPI_GRO_CB(skb)->data_offset = 0;
3982 NAPI_GRO_CB(skb)->frag0 = NULL;
3983 NAPI_GRO_CB(skb)->frag0_len = 0;
3985 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
3987 !PageHighMem(skb_frag_page(frag0))) {
3988 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3989 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
3993 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
3995 struct skb_shared_info *pinfo = skb_shinfo(skb);
3997 BUG_ON(skb->end - skb->tail < grow);
3999 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4001 skb->data_len -= grow;
4004 pinfo->frags[0].page_offset += grow;
4005 skb_frag_size_sub(&pinfo->frags[0], grow);
4007 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4008 skb_frag_unref(skb, 0);
4009 memmove(pinfo->frags, pinfo->frags + 1,
4010 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4014 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4016 struct sk_buff **pp = NULL;
4017 struct packet_offload *ptype;
4018 __be16 type = skb->protocol;
4019 struct list_head *head = &offload_base;
4021 enum gro_result ret;
4024 if (!(skb->dev->features & NETIF_F_GRO))
4027 if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
4030 gro_list_prepare(napi, skb);
4033 list_for_each_entry_rcu(ptype, head, list) {
4034 if (ptype->type != type || !ptype->callbacks.gro_receive)
4037 skb_set_network_header(skb, skb_gro_offset(skb));
4038 skb_reset_mac_len(skb);
4039 NAPI_GRO_CB(skb)->same_flow = 0;
4040 NAPI_GRO_CB(skb)->flush = 0;
4041 NAPI_GRO_CB(skb)->free = 0;
4042 NAPI_GRO_CB(skb)->udp_mark = 0;
4043 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
4045 /* Setup for GRO checksum validation */
4046 switch (skb->ip_summed) {
4047 case CHECKSUM_COMPLETE:
4048 NAPI_GRO_CB(skb)->csum = skb->csum;
4049 NAPI_GRO_CB(skb)->csum_valid = 1;
4050 NAPI_GRO_CB(skb)->csum_cnt = 0;
4052 case CHECKSUM_UNNECESSARY:
4053 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4054 NAPI_GRO_CB(skb)->csum_valid = 0;
4057 NAPI_GRO_CB(skb)->csum_cnt = 0;
4058 NAPI_GRO_CB(skb)->csum_valid = 0;
4061 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
4066 if (&ptype->list == head)
4069 same_flow = NAPI_GRO_CB(skb)->same_flow;
4070 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
4073 struct sk_buff *nskb = *pp;
4077 napi_gro_complete(nskb);
4084 if (NAPI_GRO_CB(skb)->flush)
4087 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4088 struct sk_buff *nskb = napi->gro_list;
4090 /* locate the end of the list to select the 'oldest' flow */
4091 while (nskb->next) {
4097 napi_gro_complete(nskb);
4101 NAPI_GRO_CB(skb)->count = 1;
4102 NAPI_GRO_CB(skb)->age = jiffies;
4103 NAPI_GRO_CB(skb)->last = skb;
4104 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
4105 skb->next = napi->gro_list;
4106 napi->gro_list = skb;
4110 grow = skb_gro_offset(skb) - skb_headlen(skb);
4112 gro_pull_from_frag0(skb, grow);
4121 struct packet_offload *gro_find_receive_by_type(__be16 type)
4123 struct list_head *offload_head = &offload_base;
4124 struct packet_offload *ptype;
4126 list_for_each_entry_rcu(ptype, offload_head, list) {
4127 if (ptype->type != type || !ptype->callbacks.gro_receive)
4133 EXPORT_SYMBOL(gro_find_receive_by_type);
4135 struct packet_offload *gro_find_complete_by_type(__be16 type)
4137 struct list_head *offload_head = &offload_base;
4138 struct packet_offload *ptype;
4140 list_for_each_entry_rcu(ptype, offload_head, list) {
4141 if (ptype->type != type || !ptype->callbacks.gro_complete)
4147 EXPORT_SYMBOL(gro_find_complete_by_type);
4149 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4153 if (netif_receive_skb_internal(skb))
4161 case GRO_MERGED_FREE:
4162 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4163 kmem_cache_free(skbuff_head_cache, skb);
4176 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4178 trace_napi_gro_receive_entry(skb);
4180 skb_gro_reset_offset(skb);
4182 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
4184 EXPORT_SYMBOL(napi_gro_receive);
4186 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4188 if (unlikely(skb->pfmemalloc)) {
4192 __skb_pull(skb, skb_headlen(skb));
4193 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4194 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
4196 skb->dev = napi->dev;
4198 skb->encapsulation = 0;
4199 skb_shinfo(skb)->gso_type = 0;
4200 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
4205 struct sk_buff *napi_get_frags(struct napi_struct *napi)
4207 struct sk_buff *skb = napi->skb;
4210 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
4215 EXPORT_SYMBOL(napi_get_frags);
4217 static gro_result_t napi_frags_finish(struct napi_struct *napi,
4218 struct sk_buff *skb,
4224 __skb_push(skb, ETH_HLEN);
4225 skb->protocol = eth_type_trans(skb, skb->dev);
4226 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
4231 case GRO_MERGED_FREE:
4232 napi_reuse_skb(napi, skb);
4242 /* Upper GRO stack assumes network header starts at gro_offset=0
4243 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4244 * We copy ethernet header into skb->data to have a common layout.
4246 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
4248 struct sk_buff *skb = napi->skb;
4249 const struct ethhdr *eth;
4250 unsigned int hlen = sizeof(*eth);
4254 skb_reset_mac_header(skb);
4255 skb_gro_reset_offset(skb);
4257 eth = skb_gro_header_fast(skb, 0);
4258 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4259 eth = skb_gro_header_slow(skb, hlen, 0);
4260 if (unlikely(!eth)) {
4261 napi_reuse_skb(napi, skb);
4265 gro_pull_from_frag0(skb, hlen);
4266 NAPI_GRO_CB(skb)->frag0 += hlen;
4267 NAPI_GRO_CB(skb)->frag0_len -= hlen;
4269 __skb_pull(skb, hlen);
4272 * This works because the only protocols we care about don't require
4274 * We'll fix it up properly in napi_frags_finish()
4276 skb->protocol = eth->h_proto;
4281 gro_result_t napi_gro_frags(struct napi_struct *napi)
4283 struct sk_buff *skb = napi_frags_skb(napi);
4288 trace_napi_gro_frags_entry(skb);
4290 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
4292 EXPORT_SYMBOL(napi_gro_frags);
4294 /* Compute the checksum from gro_offset and return the folded value
4295 * after adding in any pseudo checksum.
4297 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4302 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4304 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4305 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4307 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4308 !skb->csum_complete_sw)
4309 netdev_rx_csum_fault(skb->dev);
4312 NAPI_GRO_CB(skb)->csum = wsum;
4313 NAPI_GRO_CB(skb)->csum_valid = 1;
4317 EXPORT_SYMBOL(__skb_gro_checksum_complete);
4320 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
4321 * Note: called with local irq disabled, but exits with local irq enabled.
4323 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4326 struct softnet_data *remsd = sd->rps_ipi_list;
4329 sd->rps_ipi_list = NULL;
4333 /* Send pending IPI's to kick RPS processing on remote cpus. */
4335 struct softnet_data *next = remsd->rps_ipi_next;
4337 if (cpu_online(remsd->cpu))
4338 smp_call_function_single_async(remsd->cpu,
4347 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4350 return sd->rps_ipi_list != NULL;
4356 static int process_backlog(struct napi_struct *napi, int quota)
4359 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
4361 /* Check if we have pending ipi, its better to send them now,
4362 * not waiting net_rx_action() end.
4364 if (sd_has_rps_ipi_waiting(sd)) {
4365 local_irq_disable();
4366 net_rps_action_and_irq_enable(sd);
4369 napi->weight = weight_p;
4370 local_irq_disable();
4372 struct sk_buff *skb;
4374 while ((skb = __skb_dequeue(&sd->process_queue))) {
4376 __netif_receive_skb(skb);
4377 local_irq_disable();
4378 input_queue_head_incr(sd);
4379 if (++work >= quota) {
4386 if (skb_queue_empty(&sd->input_pkt_queue)) {
4388 * Inline a custom version of __napi_complete().
4389 * only current cpu owns and manipulates this napi,
4390 * and NAPI_STATE_SCHED is the only possible flag set
4392 * We can use a plain write instead of clear_bit(),
4393 * and we dont need an smp_mb() memory barrier.
4401 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4402 &sd->process_queue);
4411 * __napi_schedule - schedule for receive
4412 * @n: entry to schedule
4414 * The entry's receive function will be scheduled to run.
4415 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
4417 void __napi_schedule(struct napi_struct *n)
4419 unsigned long flags;
4421 local_irq_save(flags);
4422 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4423 local_irq_restore(flags);
4425 EXPORT_SYMBOL(__napi_schedule);
4428 * __napi_schedule_irqoff - schedule for receive
4429 * @n: entry to schedule
4431 * Variant of __napi_schedule() assuming hard irqs are masked
4433 void __napi_schedule_irqoff(struct napi_struct *n)
4435 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4437 EXPORT_SYMBOL(__napi_schedule_irqoff);
4439 void __napi_complete(struct napi_struct *n)
4441 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4443 list_del_init(&n->poll_list);
4444 smp_mb__before_atomic();
4445 clear_bit(NAPI_STATE_SCHED, &n->state);
4447 EXPORT_SYMBOL(__napi_complete);
4449 void napi_complete_done(struct napi_struct *n, int work_done)
4451 unsigned long flags;
4454 * don't let napi dequeue from the cpu poll list
4455 * just in case its running on a different cpu
4457 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4461 unsigned long timeout = 0;
4464 timeout = n->dev->gro_flush_timeout;
4467 hrtimer_start(&n->timer, ns_to_ktime(timeout),
4468 HRTIMER_MODE_REL_PINNED);
4470 napi_gro_flush(n, false);
4472 if (likely(list_empty(&n->poll_list))) {
4473 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
4475 /* If n->poll_list is not empty, we need to mask irqs */
4476 local_irq_save(flags);
4478 local_irq_restore(flags);
4481 EXPORT_SYMBOL(napi_complete_done);
4483 /* must be called under rcu_read_lock(), as we dont take a reference */
4484 struct napi_struct *napi_by_id(unsigned int napi_id)
4486 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4487 struct napi_struct *napi;
4489 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4490 if (napi->napi_id == napi_id)
4495 EXPORT_SYMBOL_GPL(napi_by_id);
4497 void napi_hash_add(struct napi_struct *napi)
4499 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4501 spin_lock(&napi_hash_lock);
4503 /* 0 is not a valid id, we also skip an id that is taken
4504 * we expect both events to be extremely rare
4507 while (!napi->napi_id) {
4508 napi->napi_id = ++napi_gen_id;
4509 if (napi_by_id(napi->napi_id))
4513 hlist_add_head_rcu(&napi->napi_hash_node,
4514 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4516 spin_unlock(&napi_hash_lock);
4519 EXPORT_SYMBOL_GPL(napi_hash_add);
4521 /* Warning : caller is responsible to make sure rcu grace period
4522 * is respected before freeing memory containing @napi
4524 void napi_hash_del(struct napi_struct *napi)
4526 spin_lock(&napi_hash_lock);
4528 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4529 hlist_del_rcu(&napi->napi_hash_node);
4531 spin_unlock(&napi_hash_lock);
4533 EXPORT_SYMBOL_GPL(napi_hash_del);
4535 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
4537 struct napi_struct *napi;
4539 napi = container_of(timer, struct napi_struct, timer);
4541 napi_schedule(napi);
4543 return HRTIMER_NORESTART;
4546 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4547 int (*poll)(struct napi_struct *, int), int weight)
4549 INIT_LIST_HEAD(&napi->poll_list);
4550 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
4551 napi->timer.function = napi_watchdog;
4552 napi->gro_count = 0;
4553 napi->gro_list = NULL;
4556 if (weight > NAPI_POLL_WEIGHT)
4557 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4559 napi->weight = weight;
4560 list_add(&napi->dev_list, &dev->napi_list);
4562 #ifdef CONFIG_NETPOLL
4563 spin_lock_init(&napi->poll_lock);
4564 napi->poll_owner = -1;
4566 set_bit(NAPI_STATE_SCHED, &napi->state);
4568 EXPORT_SYMBOL(netif_napi_add);
4570 void napi_disable(struct napi_struct *n)
4573 set_bit(NAPI_STATE_DISABLE, &n->state);
4575 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
4578 hrtimer_cancel(&n->timer);
4580 clear_bit(NAPI_STATE_DISABLE, &n->state);
4582 EXPORT_SYMBOL(napi_disable);
4584 void netif_napi_del(struct napi_struct *napi)
4586 list_del_init(&napi->dev_list);
4587 napi_free_frags(napi);
4589 kfree_skb_list(napi->gro_list);
4590 napi->gro_list = NULL;
4591 napi->gro_count = 0;
4593 EXPORT_SYMBOL(netif_napi_del);
4595 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
4600 list_del_init(&n->poll_list);
4602 have = netpoll_poll_lock(n);
4606 /* This NAPI_STATE_SCHED test is for avoiding a race
4607 * with netpoll's poll_napi(). Only the entity which
4608 * obtains the lock and sees NAPI_STATE_SCHED set will
4609 * actually make the ->poll() call. Therefore we avoid
4610 * accidentally calling ->poll() when NAPI is not scheduled.
4613 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4614 work = n->poll(n, weight);
4618 WARN_ON_ONCE(work > weight);
4620 if (likely(work < weight))
4623 /* Drivers must not modify the NAPI state if they
4624 * consume the entire weight. In such cases this code
4625 * still "owns" the NAPI instance and therefore can
4626 * move the instance around on the list at-will.
4628 if (unlikely(napi_disable_pending(n))) {
4634 /* flush too old packets
4635 * If HZ < 1000, flush all packets.
4637 napi_gro_flush(n, HZ >= 1000);
4640 /* Some drivers may have called napi_schedule
4641 * prior to exhausting their budget.
4643 if (unlikely(!list_empty(&n->poll_list))) {
4644 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
4645 n->dev ? n->dev->name : "backlog");
4649 list_add_tail(&n->poll_list, repoll);
4652 netpoll_poll_unlock(have);
4657 static void net_rx_action(struct softirq_action *h)
4659 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4660 unsigned long time_limit = jiffies + 2;
4661 int budget = netdev_budget;
4665 local_irq_disable();
4666 list_splice_init(&sd->poll_list, &list);
4670 struct napi_struct *n;
4672 if (list_empty(&list)) {
4673 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
4678 n = list_first_entry(&list, struct napi_struct, poll_list);
4679 budget -= napi_poll(n, &repoll);
4681 /* If softirq window is exhausted then punt.
4682 * Allow this to run for 2 jiffies since which will allow
4683 * an average latency of 1.5/HZ.
4685 if (unlikely(budget <= 0 ||
4686 time_after_eq(jiffies, time_limit))) {
4692 local_irq_disable();
4694 list_splice_tail_init(&sd->poll_list, &list);
4695 list_splice_tail(&repoll, &list);
4696 list_splice(&list, &sd->poll_list);
4697 if (!list_empty(&sd->poll_list))
4698 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4700 net_rps_action_and_irq_enable(sd);
4703 struct netdev_adjacent {
4704 struct net_device *dev;
4706 /* upper master flag, there can only be one master device per list */
4709 /* counter for the number of times this device was added to us */
4712 /* private field for the users */
4715 struct list_head list;
4716 struct rcu_head rcu;
4719 static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
4720 struct net_device *adj_dev,
4721 struct list_head *adj_list)
4723 struct netdev_adjacent *adj;
4725 list_for_each_entry(adj, adj_list, list) {
4726 if (adj->dev == adj_dev)
4733 * netdev_has_upper_dev - Check if device is linked to an upper device
4735 * @upper_dev: upper device to check
4737 * Find out if a device is linked to specified upper device and return true
4738 * in case it is. Note that this checks only immediate upper device,
4739 * not through a complete stack of devices. The caller must hold the RTNL lock.
4741 bool netdev_has_upper_dev(struct net_device *dev,
4742 struct net_device *upper_dev)
4746 return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
4748 EXPORT_SYMBOL(netdev_has_upper_dev);
4751 * netdev_has_any_upper_dev - Check if device is linked to some device
4754 * Find out if a device is linked to an upper device and return true in case
4755 * it is. The caller must hold the RTNL lock.
4757 static bool netdev_has_any_upper_dev(struct net_device *dev)
4761 return !list_empty(&dev->all_adj_list.upper);
4765 * netdev_master_upper_dev_get - Get master upper device
4768 * Find a master upper device and return pointer to it or NULL in case
4769 * it's not there. The caller must hold the RTNL lock.
4771 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4773 struct netdev_adjacent *upper;
4777 if (list_empty(&dev->adj_list.upper))
4780 upper = list_first_entry(&dev->adj_list.upper,
4781 struct netdev_adjacent, list);
4782 if (likely(upper->master))
4786 EXPORT_SYMBOL(netdev_master_upper_dev_get);
4788 void *netdev_adjacent_get_private(struct list_head *adj_list)
4790 struct netdev_adjacent *adj;
4792 adj = list_entry(adj_list, struct netdev_adjacent, list);
4794 return adj->private;
4796 EXPORT_SYMBOL(netdev_adjacent_get_private);
4799 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4801 * @iter: list_head ** of the current position
4803 * Gets the next device from the dev's upper list, starting from iter
4804 * position. The caller must hold RCU read lock.
4806 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4807 struct list_head **iter)
4809 struct netdev_adjacent *upper;
4811 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4813 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4815 if (&upper->list == &dev->adj_list.upper)
4818 *iter = &upper->list;
4822 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4825 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
4827 * @iter: list_head ** of the current position
4829 * Gets the next device from the dev's upper list, starting from iter
4830 * position. The caller must hold RCU read lock.
4832 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4833 struct list_head **iter)
4835 struct netdev_adjacent *upper;
4837 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4839 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4841 if (&upper->list == &dev->all_adj_list.upper)
4844 *iter = &upper->list;
4848 EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
4851 * netdev_lower_get_next_private - Get the next ->private from the
4852 * lower neighbour list
4854 * @iter: list_head ** of the current position
4856 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4857 * list, starting from iter position. The caller must hold either hold the
4858 * RTNL lock or its own locking that guarantees that the neighbour lower
4859 * list will remain unchainged.
4861 void *netdev_lower_get_next_private(struct net_device *dev,
4862 struct list_head **iter)
4864 struct netdev_adjacent *lower;
4866 lower = list_entry(*iter, struct netdev_adjacent, list);
4868 if (&lower->list == &dev->adj_list.lower)
4871 *iter = lower->list.next;
4873 return lower->private;
4875 EXPORT_SYMBOL(netdev_lower_get_next_private);
4878 * netdev_lower_get_next_private_rcu - Get the next ->private from the
4879 * lower neighbour list, RCU
4882 * @iter: list_head ** of the current position
4884 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4885 * list, starting from iter position. The caller must hold RCU read lock.
4887 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4888 struct list_head **iter)
4890 struct netdev_adjacent *lower;
4892 WARN_ON_ONCE(!rcu_read_lock_held());
4894 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4896 if (&lower->list == &dev->adj_list.lower)
4899 *iter = &lower->list;
4901 return lower->private;
4903 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4906 * netdev_lower_get_next - Get the next device from the lower neighbour
4909 * @iter: list_head ** of the current position
4911 * Gets the next netdev_adjacent from the dev's lower neighbour
4912 * list, starting from iter position. The caller must hold RTNL lock or
4913 * its own locking that guarantees that the neighbour lower
4914 * list will remain unchainged.
4916 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
4918 struct netdev_adjacent *lower;
4920 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
4922 if (&lower->list == &dev->adj_list.lower)
4925 *iter = &lower->list;
4929 EXPORT_SYMBOL(netdev_lower_get_next);
4932 * netdev_lower_get_first_private_rcu - Get the first ->private from the
4933 * lower neighbour list, RCU
4937 * Gets the first netdev_adjacent->private from the dev's lower neighbour
4938 * list. The caller must hold RCU read lock.
4940 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
4942 struct netdev_adjacent *lower;
4944 lower = list_first_or_null_rcu(&dev->adj_list.lower,
4945 struct netdev_adjacent, list);
4947 return lower->private;
4950 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
4953 * netdev_master_upper_dev_get_rcu - Get master upper device
4956 * Find a master upper device and return pointer to it or NULL in case
4957 * it's not there. The caller must hold the RCU read lock.
4959 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4961 struct netdev_adjacent *upper;
4963 upper = list_first_or_null_rcu(&dev->adj_list.upper,
4964 struct netdev_adjacent, list);
4965 if (upper && likely(upper->master))
4969 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4971 static int netdev_adjacent_sysfs_add(struct net_device *dev,
4972 struct net_device *adj_dev,
4973 struct list_head *dev_list)
4975 char linkname[IFNAMSIZ+7];
4976 sprintf(linkname, dev_list == &dev->adj_list.upper ?
4977 "upper_%s" : "lower_%s", adj_dev->name);
4978 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
4981 static void netdev_adjacent_sysfs_del(struct net_device *dev,
4983 struct list_head *dev_list)
4985 char linkname[IFNAMSIZ+7];
4986 sprintf(linkname, dev_list == &dev->adj_list.upper ?
4987 "upper_%s" : "lower_%s", name);
4988 sysfs_remove_link(&(dev->dev.kobj), linkname);
4991 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
4992 struct net_device *adj_dev,
4993 struct list_head *dev_list)
4995 return (dev_list == &dev->adj_list.upper ||
4996 dev_list == &dev->adj_list.lower) &&
4997 net_eq(dev_net(dev), dev_net(adj_dev));
5000 static int __netdev_adjacent_dev_insert(struct net_device *dev,
5001 struct net_device *adj_dev,
5002 struct list_head *dev_list,
5003 void *private, bool master)
5005 struct netdev_adjacent *adj;
5008 adj = __netdev_find_adj(dev, adj_dev, dev_list);
5015 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
5020 adj->master = master;
5022 adj->private = private;
5025 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
5026 adj_dev->name, dev->name, adj_dev->name);
5028 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
5029 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5034 /* Ensure that master link is always the first item in list. */
5036 ret = sysfs_create_link(&(dev->dev.kobj),
5037 &(adj_dev->dev.kobj), "master");
5039 goto remove_symlinks;
5041 list_add_rcu(&adj->list, dev_list);
5043 list_add_tail_rcu(&adj->list, dev_list);
5049 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
5050 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5058 static void __netdev_adjacent_dev_remove(struct net_device *dev,
5059 struct net_device *adj_dev,
5060 struct list_head *dev_list)
5062 struct netdev_adjacent *adj;
5064 adj = __netdev_find_adj(dev, adj_dev, dev_list);
5067 pr_err("tried to remove device %s from %s\n",
5068 dev->name, adj_dev->name);
5072 if (adj->ref_nr > 1) {
5073 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
5080 sysfs_remove_link(&(dev->dev.kobj), "master");
5082 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
5083 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5085 list_del_rcu(&adj->list);
5086 pr_debug("dev_put for %s, because link removed from %s to %s\n",
5087 adj_dev->name, dev->name, adj_dev->name);
5089 kfree_rcu(adj, rcu);
5092 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
5093 struct net_device *upper_dev,
5094 struct list_head *up_list,
5095 struct list_head *down_list,
5096 void *private, bool master)
5100 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
5105 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
5108 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5115 static int __netdev_adjacent_dev_link(struct net_device *dev,
5116 struct net_device *upper_dev)
5118 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
5119 &dev->all_adj_list.upper,
5120 &upper_dev->all_adj_list.lower,
5124 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5125 struct net_device *upper_dev,
5126 struct list_head *up_list,
5127 struct list_head *down_list)
5129 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5130 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
5133 static void __netdev_adjacent_dev_unlink(struct net_device *dev,
5134 struct net_device *upper_dev)
5136 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5137 &dev->all_adj_list.upper,
5138 &upper_dev->all_adj_list.lower);
5141 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5142 struct net_device *upper_dev,
5143 void *private, bool master)
5145 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
5150 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
5151 &dev->adj_list.upper,
5152 &upper_dev->adj_list.lower,
5155 __netdev_adjacent_dev_unlink(dev, upper_dev);
5162 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5163 struct net_device *upper_dev)
5165 __netdev_adjacent_dev_unlink(dev, upper_dev);
5166 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5167 &dev->adj_list.upper,
5168 &upper_dev->adj_list.lower);
5171 static int __netdev_upper_dev_link(struct net_device *dev,
5172 struct net_device *upper_dev, bool master,
5175 struct netdev_adjacent *i, *j, *to_i, *to_j;
5180 if (dev == upper_dev)
5183 /* To prevent loops, check if dev is not upper device to upper_dev. */
5184 if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
5187 if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
5190 if (master && netdev_master_upper_dev_get(dev))
5193 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
5198 /* Now that we linked these devs, make all the upper_dev's
5199 * all_adj_list.upper visible to every dev's all_adj_list.lower an
5200 * versa, and don't forget the devices itself. All of these
5201 * links are non-neighbours.
5203 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5204 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5205 pr_debug("Interlinking %s with %s, non-neighbour\n",
5206 i->dev->name, j->dev->name);
5207 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
5213 /* add dev to every upper_dev's upper device */
5214 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5215 pr_debug("linking %s's upper device %s with %s\n",
5216 upper_dev->name, i->dev->name, dev->name);
5217 ret = __netdev_adjacent_dev_link(dev, i->dev);
5219 goto rollback_upper_mesh;
5222 /* add upper_dev to every dev's lower device */
5223 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5224 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5225 i->dev->name, upper_dev->name);
5226 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5228 goto rollback_lower_mesh;
5231 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
5234 rollback_lower_mesh:
5236 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5239 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5244 rollback_upper_mesh:
5246 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5249 __netdev_adjacent_dev_unlink(dev, i->dev);
5257 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5258 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5259 if (i == to_i && j == to_j)
5261 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5267 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5273 * netdev_upper_dev_link - Add a link to the upper device
5275 * @upper_dev: new upper device
5277 * Adds a link to device which is upper to this one. The caller must hold
5278 * the RTNL lock. On a failure a negative errno code is returned.
5279 * On success the reference counts are adjusted and the function
5282 int netdev_upper_dev_link(struct net_device *dev,
5283 struct net_device *upper_dev)
5285 return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
5287 EXPORT_SYMBOL(netdev_upper_dev_link);
5290 * netdev_master_upper_dev_link - Add a master link to the upper device
5292 * @upper_dev: new upper device
5294 * Adds a link to device which is upper to this one. In this case, only
5295 * one master upper device can be linked, although other non-master devices
5296 * might be linked as well. The caller must hold the RTNL lock.
5297 * On a failure a negative errno code is returned. On success the reference
5298 * counts are adjusted and the function returns zero.
5300 int netdev_master_upper_dev_link(struct net_device *dev,
5301 struct net_device *upper_dev)
5303 return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
5305 EXPORT_SYMBOL(netdev_master_upper_dev_link);
5307 int netdev_master_upper_dev_link_private(struct net_device *dev,
5308 struct net_device *upper_dev,
5311 return __netdev_upper_dev_link(dev, upper_dev, true, private);
5313 EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
5316 * netdev_upper_dev_unlink - Removes a link to upper device
5318 * @upper_dev: new upper device
5320 * Removes a link to device which is upper to this one. The caller must hold
5323 void netdev_upper_dev_unlink(struct net_device *dev,
5324 struct net_device *upper_dev)
5326 struct netdev_adjacent *i, *j;
5329 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5331 /* Here is the tricky part. We must remove all dev's lower
5332 * devices from all upper_dev's upper devices and vice
5333 * versa, to maintain the graph relationship.
5335 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5336 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
5337 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5339 /* remove also the devices itself from lower/upper device
5342 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5343 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5345 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
5346 __netdev_adjacent_dev_unlink(dev, i->dev);
5348 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
5350 EXPORT_SYMBOL(netdev_upper_dev_unlink);
5353 * netdev_bonding_info_change - Dispatch event about slave change
5355 * @bonding_info: info to dispatch
5357 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
5358 * The caller must hold the RTNL lock.
5360 void netdev_bonding_info_change(struct net_device *dev,
5361 struct netdev_bonding_info *bonding_info)
5363 struct netdev_notifier_bonding_info info;
5365 memcpy(&info.bonding_info, bonding_info,
5366 sizeof(struct netdev_bonding_info));
5367 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
5370 EXPORT_SYMBOL(netdev_bonding_info_change);
5372 static void netdev_adjacent_add_links(struct net_device *dev)
5374 struct netdev_adjacent *iter;
5376 struct net *net = dev_net(dev);
5378 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5379 if (!net_eq(net,dev_net(iter->dev)))
5381 netdev_adjacent_sysfs_add(iter->dev, dev,
5382 &iter->dev->adj_list.lower);
5383 netdev_adjacent_sysfs_add(dev, iter->dev,
5384 &dev->adj_list.upper);
5387 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5388 if (!net_eq(net,dev_net(iter->dev)))
5390 netdev_adjacent_sysfs_add(iter->dev, dev,
5391 &iter->dev->adj_list.upper);
5392 netdev_adjacent_sysfs_add(dev, iter->dev,
5393 &dev->adj_list.lower);
5397 static void netdev_adjacent_del_links(struct net_device *dev)
5399 struct netdev_adjacent *iter;
5401 struct net *net = dev_net(dev);
5403 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5404 if (!net_eq(net,dev_net(iter->dev)))
5406 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5407 &iter->dev->adj_list.lower);
5408 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5409 &dev->adj_list.upper);
5412 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5413 if (!net_eq(net,dev_net(iter->dev)))
5415 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5416 &iter->dev->adj_list.upper);
5417 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5418 &dev->adj_list.lower);
5422 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
5424 struct netdev_adjacent *iter;
5426 struct net *net = dev_net(dev);
5428 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5429 if (!net_eq(net,dev_net(iter->dev)))
5431 netdev_adjacent_sysfs_del(iter->dev, oldname,
5432 &iter->dev->adj_list.lower);
5433 netdev_adjacent_sysfs_add(iter->dev, dev,
5434 &iter->dev->adj_list.lower);
5437 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5438 if (!net_eq(net,dev_net(iter->dev)))
5440 netdev_adjacent_sysfs_del(iter->dev, oldname,
5441 &iter->dev->adj_list.upper);
5442 netdev_adjacent_sysfs_add(iter->dev, dev,
5443 &iter->dev->adj_list.upper);
5447 void *netdev_lower_dev_get_private(struct net_device *dev,
5448 struct net_device *lower_dev)
5450 struct netdev_adjacent *lower;
5454 lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
5458 return lower->private;
5460 EXPORT_SYMBOL(netdev_lower_dev_get_private);
5463 int dev_get_nest_level(struct net_device *dev,
5464 bool (*type_check)(struct net_device *dev))
5466 struct net_device *lower = NULL;
5467 struct list_head *iter;
5473 netdev_for_each_lower_dev(dev, lower, iter) {
5474 nest = dev_get_nest_level(lower, type_check);
5475 if (max_nest < nest)
5479 if (type_check(dev))
5484 EXPORT_SYMBOL(dev_get_nest_level);
5486 static void dev_change_rx_flags(struct net_device *dev, int flags)
5488 const struct net_device_ops *ops = dev->netdev_ops;
5490 if (ops->ndo_change_rx_flags)
5491 ops->ndo_change_rx_flags(dev, flags);
5494 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
5496 unsigned int old_flags = dev->flags;
5502 dev->flags |= IFF_PROMISC;
5503 dev->promiscuity += inc;
5504 if (dev->promiscuity == 0) {
5507 * If inc causes overflow, untouch promisc and return error.
5510 dev->flags &= ~IFF_PROMISC;
5512 dev->promiscuity -= inc;
5513 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5518 if (dev->flags != old_flags) {
5519 pr_info("device %s %s promiscuous mode\n",
5521 dev->flags & IFF_PROMISC ? "entered" : "left");
5522 if (audit_enabled) {
5523 current_uid_gid(&uid, &gid);
5524 audit_log(current->audit_context, GFP_ATOMIC,
5525 AUDIT_ANOM_PROMISCUOUS,
5526 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5527 dev->name, (dev->flags & IFF_PROMISC),
5528 (old_flags & IFF_PROMISC),
5529 from_kuid(&init_user_ns, audit_get_loginuid(current)),
5530 from_kuid(&init_user_ns, uid),
5531 from_kgid(&init_user_ns, gid),
5532 audit_get_sessionid(current));
5535 dev_change_rx_flags(dev, IFF_PROMISC);
5538 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
5543 * dev_set_promiscuity - update promiscuity count on a device
5547 * Add or remove promiscuity from a device. While the count in the device
5548 * remains above zero the interface remains promiscuous. Once it hits zero
5549 * the device reverts back to normal filtering operation. A negative inc
5550 * value is used to drop promiscuity on the device.
5551 * Return 0 if successful or a negative errno code on error.
5553 int dev_set_promiscuity(struct net_device *dev, int inc)
5555 unsigned int old_flags = dev->flags;
5558 err = __dev_set_promiscuity(dev, inc, true);
5561 if (dev->flags != old_flags)
5562 dev_set_rx_mode(dev);
5565 EXPORT_SYMBOL(dev_set_promiscuity);
5567 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
5569 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
5573 dev->flags |= IFF_ALLMULTI;
5574 dev->allmulti += inc;
5575 if (dev->allmulti == 0) {
5578 * If inc causes overflow, untouch allmulti and return error.
5581 dev->flags &= ~IFF_ALLMULTI;
5583 dev->allmulti -= inc;
5584 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5589 if (dev->flags ^ old_flags) {
5590 dev_change_rx_flags(dev, IFF_ALLMULTI);
5591 dev_set_rx_mode(dev);
5593 __dev_notify_flags(dev, old_flags,
5594 dev->gflags ^ old_gflags);
5600 * dev_set_allmulti - update allmulti count on a device
5604 * Add or remove reception of all multicast frames to a device. While the
5605 * count in the device remains above zero the interface remains listening
5606 * to all interfaces. Once it hits zero the device reverts back to normal
5607 * filtering operation. A negative @inc value is used to drop the counter
5608 * when releasing a resource needing all multicasts.
5609 * Return 0 if successful or a negative errno code on error.
5612 int dev_set_allmulti(struct net_device *dev, int inc)
5614 return __dev_set_allmulti(dev, inc, true);
5616 EXPORT_SYMBOL(dev_set_allmulti);
5619 * Upload unicast and multicast address lists to device and
5620 * configure RX filtering. When the device doesn't support unicast
5621 * filtering it is put in promiscuous mode while unicast addresses
5624 void __dev_set_rx_mode(struct net_device *dev)
5626 const struct net_device_ops *ops = dev->netdev_ops;
5628 /* dev_open will call this function so the list will stay sane. */
5629 if (!(dev->flags&IFF_UP))
5632 if (!netif_device_present(dev))
5635 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
5636 /* Unicast addresses changes may only happen under the rtnl,
5637 * therefore calling __dev_set_promiscuity here is safe.
5639 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
5640 __dev_set_promiscuity(dev, 1, false);
5641 dev->uc_promisc = true;
5642 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
5643 __dev_set_promiscuity(dev, -1, false);
5644 dev->uc_promisc = false;
5648 if (ops->ndo_set_rx_mode)
5649 ops->ndo_set_rx_mode(dev);
5652 void dev_set_rx_mode(struct net_device *dev)
5654 netif_addr_lock_bh(dev);
5655 __dev_set_rx_mode(dev);
5656 netif_addr_unlock_bh(dev);
5660 * dev_get_flags - get flags reported to userspace
5663 * Get the combination of flag bits exported through APIs to userspace.
5665 unsigned int dev_get_flags(const struct net_device *dev)
5669 flags = (dev->flags & ~(IFF_PROMISC |
5674 (dev->gflags & (IFF_PROMISC |
5677 if (netif_running(dev)) {
5678 if (netif_oper_up(dev))
5679 flags |= IFF_RUNNING;
5680 if (netif_carrier_ok(dev))
5681 flags |= IFF_LOWER_UP;
5682 if (netif_dormant(dev))
5683 flags |= IFF_DORMANT;
5688 EXPORT_SYMBOL(dev_get_flags);
5690 int __dev_change_flags(struct net_device *dev, unsigned int flags)
5692 unsigned int old_flags = dev->flags;
5698 * Set the flags on our device.
5701 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5702 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5704 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5708 * Load in the correct multicast list now the flags have changed.
5711 if ((old_flags ^ flags) & IFF_MULTICAST)
5712 dev_change_rx_flags(dev, IFF_MULTICAST);
5714 dev_set_rx_mode(dev);
5717 * Have we downed the interface. We handle IFF_UP ourselves
5718 * according to user attempts to set it, rather than blindly
5723 if ((old_flags ^ flags) & IFF_UP)
5724 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
5726 if ((flags ^ dev->gflags) & IFF_PROMISC) {
5727 int inc = (flags & IFF_PROMISC) ? 1 : -1;
5728 unsigned int old_flags = dev->flags;
5730 dev->gflags ^= IFF_PROMISC;
5732 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5733 if (dev->flags != old_flags)
5734 dev_set_rx_mode(dev);
5737 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5738 is important. Some (broken) drivers set IFF_PROMISC, when
5739 IFF_ALLMULTI is requested not asking us and not reporting.
5741 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
5742 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5744 dev->gflags ^= IFF_ALLMULTI;
5745 __dev_set_allmulti(dev, inc, false);
5751 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5752 unsigned int gchanges)
5754 unsigned int changes = dev->flags ^ old_flags;
5757 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
5759 if (changes & IFF_UP) {
5760 if (dev->flags & IFF_UP)
5761 call_netdevice_notifiers(NETDEV_UP, dev);
5763 call_netdevice_notifiers(NETDEV_DOWN, dev);
5766 if (dev->flags & IFF_UP &&
5767 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5768 struct netdev_notifier_change_info change_info;
5770 change_info.flags_changed = changes;
5771 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5777 * dev_change_flags - change device settings
5779 * @flags: device state flags
5781 * Change settings on device based state flags. The flags are
5782 * in the userspace exported format.
5784 int dev_change_flags(struct net_device *dev, unsigned int flags)
5787 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
5789 ret = __dev_change_flags(dev, flags);
5793 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
5794 __dev_notify_flags(dev, old_flags, changes);
5797 EXPORT_SYMBOL(dev_change_flags);
5799 static int __dev_set_mtu(struct net_device *dev, int new_mtu)
5801 const struct net_device_ops *ops = dev->netdev_ops;
5803 if (ops->ndo_change_mtu)
5804 return ops->ndo_change_mtu(dev, new_mtu);
5811 * dev_set_mtu - Change maximum transfer unit
5813 * @new_mtu: new transfer unit
5815 * Change the maximum transfer size of the network device.
5817 int dev_set_mtu(struct net_device *dev, int new_mtu)
5821 if (new_mtu == dev->mtu)
5824 /* MTU must be positive. */
5828 if (!netif_device_present(dev))
5831 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
5832 err = notifier_to_errno(err);
5836 orig_mtu = dev->mtu;
5837 err = __dev_set_mtu(dev, new_mtu);
5840 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5841 err = notifier_to_errno(err);
5843 /* setting mtu back and notifying everyone again,
5844 * so that they have a chance to revert changes.
5846 __dev_set_mtu(dev, orig_mtu);
5847 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5852 EXPORT_SYMBOL(dev_set_mtu);
5855 * dev_set_group - Change group this device belongs to
5857 * @new_group: group this device should belong to
5859 void dev_set_group(struct net_device *dev, int new_group)
5861 dev->group = new_group;
5863 EXPORT_SYMBOL(dev_set_group);
5866 * dev_set_mac_address - Change Media Access Control Address
5870 * Change the hardware (MAC) address of the device
5872 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5874 const struct net_device_ops *ops = dev->netdev_ops;
5877 if (!ops->ndo_set_mac_address)
5879 if (sa->sa_family != dev->type)
5881 if (!netif_device_present(dev))
5883 err = ops->ndo_set_mac_address(dev, sa);
5886 dev->addr_assign_type = NET_ADDR_SET;
5887 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5888 add_device_randomness(dev->dev_addr, dev->addr_len);
5891 EXPORT_SYMBOL(dev_set_mac_address);
5894 * dev_change_carrier - Change device carrier
5896 * @new_carrier: new value
5898 * Change device carrier
5900 int dev_change_carrier(struct net_device *dev, bool new_carrier)
5902 const struct net_device_ops *ops = dev->netdev_ops;
5904 if (!ops->ndo_change_carrier)
5906 if (!netif_device_present(dev))
5908 return ops->ndo_change_carrier(dev, new_carrier);
5910 EXPORT_SYMBOL(dev_change_carrier);
5913 * dev_get_phys_port_id - Get device physical port ID
5917 * Get device physical port ID
5919 int dev_get_phys_port_id(struct net_device *dev,
5920 struct netdev_phys_item_id *ppid)
5922 const struct net_device_ops *ops = dev->netdev_ops;
5924 if (!ops->ndo_get_phys_port_id)
5926 return ops->ndo_get_phys_port_id(dev, ppid);
5928 EXPORT_SYMBOL(dev_get_phys_port_id);
5931 * dev_get_phys_port_name - Get device physical port name
5935 * Get device physical port name
5937 int dev_get_phys_port_name(struct net_device *dev,
5938 char *name, size_t len)
5940 const struct net_device_ops *ops = dev->netdev_ops;
5942 if (!ops->ndo_get_phys_port_name)
5944 return ops->ndo_get_phys_port_name(dev, name, len);
5946 EXPORT_SYMBOL(dev_get_phys_port_name);
5949 * dev_new_index - allocate an ifindex
5950 * @net: the applicable net namespace
5952 * Returns a suitable unique value for a new device interface
5953 * number. The caller must hold the rtnl semaphore or the
5954 * dev_base_lock to be sure it remains unique.
5956 static int dev_new_index(struct net *net)
5958 int ifindex = net->ifindex;
5962 if (!__dev_get_by_index(net, ifindex))
5963 return net->ifindex = ifindex;
5967 /* Delayed registration/unregisteration */
5968 static LIST_HEAD(net_todo_list);
5969 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
5971 static void net_set_todo(struct net_device *dev)
5973 list_add_tail(&dev->todo_list, &net_todo_list);
5974 dev_net(dev)->dev_unreg_count++;
5977 static void rollback_registered_many(struct list_head *head)
5979 struct net_device *dev, *tmp;
5980 LIST_HEAD(close_head);
5982 BUG_ON(dev_boot_phase);
5985 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5986 /* Some devices call without registering
5987 * for initialization unwind. Remove those
5988 * devices and proceed with the remaining.
5990 if (dev->reg_state == NETREG_UNINITIALIZED) {
5991 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5995 list_del(&dev->unreg_list);
5998 dev->dismantle = true;
5999 BUG_ON(dev->reg_state != NETREG_REGISTERED);
6002 /* If device is running, close it first. */
6003 list_for_each_entry(dev, head, unreg_list)
6004 list_add_tail(&dev->close_list, &close_head);
6005 dev_close_many(&close_head, true);
6007 list_for_each_entry(dev, head, unreg_list) {
6008 /* And unlink it from device chain. */
6009 unlist_netdevice(dev);
6011 dev->reg_state = NETREG_UNREGISTERING;
6016 list_for_each_entry(dev, head, unreg_list) {
6017 struct sk_buff *skb = NULL;
6019 /* Shutdown queueing discipline. */
6023 /* Notify protocols, that we are about to destroy
6024 this device. They should clean all the things.
6026 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6028 if (!dev->rtnl_link_ops ||
6029 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6030 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
6034 * Flush the unicast and multicast chains
6039 if (dev->netdev_ops->ndo_uninit)
6040 dev->netdev_ops->ndo_uninit(dev);
6043 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
6045 /* Notifier chain MUST detach us all upper devices. */
6046 WARN_ON(netdev_has_any_upper_dev(dev));
6048 /* Remove entries from kobject tree */
6049 netdev_unregister_kobject(dev);
6051 /* Remove XPS queueing entries */
6052 netif_reset_xps_queues_gt(dev, 0);
6058 list_for_each_entry(dev, head, unreg_list)
6062 static void rollback_registered(struct net_device *dev)
6066 list_add(&dev->unreg_list, &single);
6067 rollback_registered_many(&single);
6071 static netdev_features_t netdev_fix_features(struct net_device *dev,
6072 netdev_features_t features)
6074 /* Fix illegal checksum combinations */
6075 if ((features & NETIF_F_HW_CSUM) &&
6076 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6077 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
6078 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
6081 /* TSO requires that SG is present as well. */
6082 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6083 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
6084 features &= ~NETIF_F_ALL_TSO;
6087 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
6088 !(features & NETIF_F_IP_CSUM)) {
6089 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
6090 features &= ~NETIF_F_TSO;
6091 features &= ~NETIF_F_TSO_ECN;
6094 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
6095 !(features & NETIF_F_IPV6_CSUM)) {
6096 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
6097 features &= ~NETIF_F_TSO6;
6100 /* TSO ECN requires that TSO is present as well. */
6101 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
6102 features &= ~NETIF_F_TSO_ECN;
6104 /* Software GSO depends on SG. */
6105 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6106 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
6107 features &= ~NETIF_F_GSO;
6110 /* UFO needs SG and checksumming */
6111 if (features & NETIF_F_UFO) {
6112 /* maybe split UFO into V4 and V6? */
6113 if (!((features & NETIF_F_GEN_CSUM) ||
6114 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
6115 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6117 "Dropping NETIF_F_UFO since no checksum offload features.\n");
6118 features &= ~NETIF_F_UFO;
6121 if (!(features & NETIF_F_SG)) {
6123 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
6124 features &= ~NETIF_F_UFO;
6128 #ifdef CONFIG_NET_RX_BUSY_POLL
6129 if (dev->netdev_ops->ndo_busy_poll)
6130 features |= NETIF_F_BUSY_POLL;
6133 features &= ~NETIF_F_BUSY_POLL;
6138 int __netdev_update_features(struct net_device *dev)
6140 netdev_features_t features;
6145 features = netdev_get_wanted_features(dev);
6147 if (dev->netdev_ops->ndo_fix_features)
6148 features = dev->netdev_ops->ndo_fix_features(dev, features);
6150 /* driver might be less strict about feature dependencies */
6151 features = netdev_fix_features(dev, features);
6153 if (dev->features == features)
6156 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6157 &dev->features, &features);
6159 if (dev->netdev_ops->ndo_set_features)
6160 err = dev->netdev_ops->ndo_set_features(dev, features);
6162 if (unlikely(err < 0)) {
6164 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6165 err, &features, &dev->features);
6170 dev->features = features;
6176 * netdev_update_features - recalculate device features
6177 * @dev: the device to check
6179 * Recalculate dev->features set and send notifications if it
6180 * has changed. Should be called after driver or hardware dependent
6181 * conditions might have changed that influence the features.
6183 void netdev_update_features(struct net_device *dev)
6185 if (__netdev_update_features(dev))
6186 netdev_features_change(dev);
6188 EXPORT_SYMBOL(netdev_update_features);
6191 * netdev_change_features - recalculate device features
6192 * @dev: the device to check
6194 * Recalculate dev->features set and send notifications even
6195 * if they have not changed. Should be called instead of
6196 * netdev_update_features() if also dev->vlan_features might
6197 * have changed to allow the changes to be propagated to stacked
6200 void netdev_change_features(struct net_device *dev)
6202 __netdev_update_features(dev);
6203 netdev_features_change(dev);
6205 EXPORT_SYMBOL(netdev_change_features);
6208 * netif_stacked_transfer_operstate - transfer operstate
6209 * @rootdev: the root or lower level device to transfer state from
6210 * @dev: the device to transfer operstate to
6212 * Transfer operational state from root to device. This is normally
6213 * called when a stacking relationship exists between the root
6214 * device and the device(a leaf device).
6216 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6217 struct net_device *dev)
6219 if (rootdev->operstate == IF_OPER_DORMANT)
6220 netif_dormant_on(dev);
6222 netif_dormant_off(dev);
6224 if (netif_carrier_ok(rootdev)) {
6225 if (!netif_carrier_ok(dev))
6226 netif_carrier_on(dev);
6228 if (netif_carrier_ok(dev))
6229 netif_carrier_off(dev);
6232 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6235 static int netif_alloc_rx_queues(struct net_device *dev)
6237 unsigned int i, count = dev->num_rx_queues;
6238 struct netdev_rx_queue *rx;
6239 size_t sz = count * sizeof(*rx);
6243 rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6251 for (i = 0; i < count; i++)
6257 static void netdev_init_one_queue(struct net_device *dev,
6258 struct netdev_queue *queue, void *_unused)
6260 /* Initialize queue lock */
6261 spin_lock_init(&queue->_xmit_lock);
6262 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6263 queue->xmit_lock_owner = -1;
6264 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
6267 dql_init(&queue->dql, HZ);
6271 static void netif_free_tx_queues(struct net_device *dev)
6276 static int netif_alloc_netdev_queues(struct net_device *dev)
6278 unsigned int count = dev->num_tx_queues;
6279 struct netdev_queue *tx;
6280 size_t sz = count * sizeof(*tx);
6282 BUG_ON(count < 1 || count > 0xffff);
6284 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6292 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6293 spin_lock_init(&dev->tx_global_lock);
6299 * register_netdevice - register a network device
6300 * @dev: device to register
6302 * Take a completed network device structure and add it to the kernel
6303 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6304 * chain. 0 is returned on success. A negative errno code is returned
6305 * on a failure to set up the device, or if the name is a duplicate.
6307 * Callers must hold the rtnl semaphore. You may want
6308 * register_netdev() instead of this.
6311 * The locking appears insufficient to guarantee two parallel registers
6312 * will not get the same name.
6315 int register_netdevice(struct net_device *dev)
6318 struct net *net = dev_net(dev);
6320 BUG_ON(dev_boot_phase);
6325 /* When net_device's are persistent, this will be fatal. */
6326 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
6329 spin_lock_init(&dev->addr_list_lock);
6330 netdev_set_addr_lockdep_class(dev);
6332 ret = dev_get_valid_name(net, dev, dev->name);
6336 /* Init, if this function is available */
6337 if (dev->netdev_ops->ndo_init) {
6338 ret = dev->netdev_ops->ndo_init(dev);
6346 if (((dev->hw_features | dev->features) &
6347 NETIF_F_HW_VLAN_CTAG_FILTER) &&
6348 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6349 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6350 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6357 dev->ifindex = dev_new_index(net);
6358 else if (__dev_get_by_index(net, dev->ifindex))
6361 /* Transfer changeable features to wanted_features and enable
6362 * software offloads (GSO and GRO).
6364 dev->hw_features |= NETIF_F_SOFT_FEATURES;
6365 dev->features |= NETIF_F_SOFT_FEATURES;
6366 dev->wanted_features = dev->features & dev->hw_features;
6368 if (!(dev->flags & IFF_LOOPBACK)) {
6369 dev->hw_features |= NETIF_F_NOCACHE_COPY;
6372 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
6374 dev->vlan_features |= NETIF_F_HIGHDMA;
6376 /* Make NETIF_F_SG inheritable to tunnel devices.
6378 dev->hw_enc_features |= NETIF_F_SG;
6380 /* Make NETIF_F_SG inheritable to MPLS.
6382 dev->mpls_features |= NETIF_F_SG;
6384 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6385 ret = notifier_to_errno(ret);
6389 ret = netdev_register_kobject(dev);
6392 dev->reg_state = NETREG_REGISTERED;
6394 __netdev_update_features(dev);
6397 * Default initial state at registry is that the
6398 * device is present.
6401 set_bit(__LINK_STATE_PRESENT, &dev->state);
6403 linkwatch_init_dev(dev);
6405 dev_init_scheduler(dev);
6407 list_netdevice(dev);
6408 add_device_randomness(dev->dev_addr, dev->addr_len);
6410 /* If the device has permanent device address, driver should
6411 * set dev_addr and also addr_assign_type should be set to
6412 * NET_ADDR_PERM (default value).
6414 if (dev->addr_assign_type == NET_ADDR_PERM)
6415 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6417 /* Notify protocols, that a new device appeared. */
6418 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
6419 ret = notifier_to_errno(ret);
6421 rollback_registered(dev);
6422 dev->reg_state = NETREG_UNREGISTERED;
6425 * Prevent userspace races by waiting until the network
6426 * device is fully setup before sending notifications.
6428 if (!dev->rtnl_link_ops ||
6429 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6430 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
6436 if (dev->netdev_ops->ndo_uninit)
6437 dev->netdev_ops->ndo_uninit(dev);
6440 EXPORT_SYMBOL(register_netdevice);
6443 * init_dummy_netdev - init a dummy network device for NAPI
6444 * @dev: device to init
6446 * This takes a network device structure and initialize the minimum
6447 * amount of fields so it can be used to schedule NAPI polls without
6448 * registering a full blown interface. This is to be used by drivers
6449 * that need to tie several hardware interfaces to a single NAPI
6450 * poll scheduler due to HW limitations.
6452 int init_dummy_netdev(struct net_device *dev)
6454 /* Clear everything. Note we don't initialize spinlocks
6455 * are they aren't supposed to be taken by any of the
6456 * NAPI code and this dummy netdev is supposed to be
6457 * only ever used for NAPI polls
6459 memset(dev, 0, sizeof(struct net_device));
6461 /* make sure we BUG if trying to hit standard
6462 * register/unregister code path
6464 dev->reg_state = NETREG_DUMMY;
6466 /* NAPI wants this */
6467 INIT_LIST_HEAD(&dev->napi_list);
6469 /* a dummy interface is started by default */
6470 set_bit(__LINK_STATE_PRESENT, &dev->state);
6471 set_bit(__LINK_STATE_START, &dev->state);
6473 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6474 * because users of this 'device' dont need to change
6480 EXPORT_SYMBOL_GPL(init_dummy_netdev);
6484 * register_netdev - register a network device
6485 * @dev: device to register
6487 * Take a completed network device structure and add it to the kernel
6488 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6489 * chain. 0 is returned on success. A negative errno code is returned
6490 * on a failure to set up the device, or if the name is a duplicate.
6492 * This is a wrapper around register_netdevice that takes the rtnl semaphore
6493 * and expands the device name if you passed a format string to
6496 int register_netdev(struct net_device *dev)
6501 err = register_netdevice(dev);
6505 EXPORT_SYMBOL(register_netdev);
6507 int netdev_refcnt_read(const struct net_device *dev)
6511 for_each_possible_cpu(i)
6512 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6515 EXPORT_SYMBOL(netdev_refcnt_read);
6518 * netdev_wait_allrefs - wait until all references are gone.
6519 * @dev: target net_device
6521 * This is called when unregistering network devices.
6523 * Any protocol or device that holds a reference should register
6524 * for netdevice notification, and cleanup and put back the
6525 * reference if they receive an UNREGISTER event.
6526 * We can get stuck here if buggy protocols don't correctly
6529 static void netdev_wait_allrefs(struct net_device *dev)
6531 unsigned long rebroadcast_time, warning_time;
6534 linkwatch_forget_dev(dev);
6536 rebroadcast_time = warning_time = jiffies;
6537 refcnt = netdev_refcnt_read(dev);
6539 while (refcnt != 0) {
6540 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6543 /* Rebroadcast unregister notification */
6544 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6550 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6551 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6553 /* We must not have linkwatch events
6554 * pending on unregister. If this
6555 * happens, we simply run the queue
6556 * unscheduled, resulting in a noop
6559 linkwatch_run_queue();
6564 rebroadcast_time = jiffies;
6569 refcnt = netdev_refcnt_read(dev);
6571 if (time_after(jiffies, warning_time + 10 * HZ)) {
6572 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6574 warning_time = jiffies;
6583 * register_netdevice(x1);
6584 * register_netdevice(x2);
6586 * unregister_netdevice(y1);
6587 * unregister_netdevice(y2);
6593 * We are invoked by rtnl_unlock().
6594 * This allows us to deal with problems:
6595 * 1) We can delete sysfs objects which invoke hotplug
6596 * without deadlocking with linkwatch via keventd.
6597 * 2) Since we run with the RTNL semaphore not held, we can sleep
6598 * safely in order to wait for the netdev refcnt to drop to zero.
6600 * We must not return until all unregister events added during
6601 * the interval the lock was held have been completed.
6603 void netdev_run_todo(void)
6605 struct list_head list;
6607 /* Snapshot list, allow later requests */
6608 list_replace_init(&net_todo_list, &list);
6613 /* Wait for rcu callbacks to finish before next phase */
6614 if (!list_empty(&list))
6617 while (!list_empty(&list)) {
6618 struct net_device *dev
6619 = list_first_entry(&list, struct net_device, todo_list);
6620 list_del(&dev->todo_list);
6623 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6626 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
6627 pr_err("network todo '%s' but state %d\n",
6628 dev->name, dev->reg_state);
6633 dev->reg_state = NETREG_UNREGISTERED;
6635 on_each_cpu(flush_backlog, dev, 1);
6637 netdev_wait_allrefs(dev);
6640 BUG_ON(netdev_refcnt_read(dev));
6641 BUG_ON(!list_empty(&dev->ptype_all));
6642 BUG_ON(!list_empty(&dev->ptype_specific));
6643 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6644 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
6645 WARN_ON(dev->dn_ptr);
6647 if (dev->destructor)
6648 dev->destructor(dev);
6650 /* Report a network device has been unregistered */
6652 dev_net(dev)->dev_unreg_count--;
6654 wake_up(&netdev_unregistering_wq);
6656 /* Free network device */
6657 kobject_put(&dev->dev.kobj);
6661 /* Convert net_device_stats to rtnl_link_stats64. They have the same
6662 * fields in the same order, with only the type differing.
6664 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6665 const struct net_device_stats *netdev_stats)
6667 #if BITS_PER_LONG == 64
6668 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6669 memcpy(stats64, netdev_stats, sizeof(*stats64));
6671 size_t i, n = sizeof(*stats64) / sizeof(u64);
6672 const unsigned long *src = (const unsigned long *)netdev_stats;
6673 u64 *dst = (u64 *)stats64;
6675 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6676 sizeof(*stats64) / sizeof(u64));
6677 for (i = 0; i < n; i++)
6681 EXPORT_SYMBOL(netdev_stats_to_stats64);
6684 * dev_get_stats - get network device statistics
6685 * @dev: device to get statistics from
6686 * @storage: place to store stats
6688 * Get network statistics from device. Return @storage.
6689 * The device driver may provide its own method by setting
6690 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6691 * otherwise the internal statistics structure is used.
6693 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6694 struct rtnl_link_stats64 *storage)
6696 const struct net_device_ops *ops = dev->netdev_ops;
6698 if (ops->ndo_get_stats64) {
6699 memset(storage, 0, sizeof(*storage));
6700 ops->ndo_get_stats64(dev, storage);
6701 } else if (ops->ndo_get_stats) {
6702 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
6704 netdev_stats_to_stats64(storage, &dev->stats);
6706 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
6707 storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
6710 EXPORT_SYMBOL(dev_get_stats);
6712 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
6714 struct netdev_queue *queue = dev_ingress_queue(dev);
6716 #ifdef CONFIG_NET_CLS_ACT
6719 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6722 netdev_init_one_queue(dev, queue, NULL);
6723 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
6724 queue->qdisc_sleeping = &noop_qdisc;
6725 rcu_assign_pointer(dev->ingress_queue, queue);
6730 static const struct ethtool_ops default_ethtool_ops;
6732 void netdev_set_default_ethtool_ops(struct net_device *dev,
6733 const struct ethtool_ops *ops)
6735 if (dev->ethtool_ops == &default_ethtool_ops)
6736 dev->ethtool_ops = ops;
6738 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6740 void netdev_freemem(struct net_device *dev)
6742 char *addr = (char *)dev - dev->padded;
6748 * alloc_netdev_mqs - allocate network device
6749 * @sizeof_priv: size of private data to allocate space for
6750 * @name: device name format string
6751 * @name_assign_type: origin of device name
6752 * @setup: callback to initialize device
6753 * @txqs: the number of TX subqueues to allocate
6754 * @rxqs: the number of RX subqueues to allocate
6756 * Allocates a struct net_device with private data area for driver use
6757 * and performs basic initialization. Also allocates subqueue structs
6758 * for each queue on the device.
6760 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6761 unsigned char name_assign_type,
6762 void (*setup)(struct net_device *),
6763 unsigned int txqs, unsigned int rxqs)
6765 struct net_device *dev;
6767 struct net_device *p;
6769 BUG_ON(strlen(name) >= sizeof(dev->name));
6772 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
6778 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
6783 alloc_size = sizeof(struct net_device);
6785 /* ensure 32-byte alignment of private area */
6786 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
6787 alloc_size += sizeof_priv;
6789 /* ensure 32-byte alignment of whole construct */
6790 alloc_size += NETDEV_ALIGN - 1;
6792 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6794 p = vzalloc(alloc_size);
6798 dev = PTR_ALIGN(p, NETDEV_ALIGN);
6799 dev->padded = (char *)dev - (char *)p;
6801 dev->pcpu_refcnt = alloc_percpu(int);
6802 if (!dev->pcpu_refcnt)
6805 if (dev_addr_init(dev))
6811 dev_net_set(dev, &init_net);
6813 dev->gso_max_size = GSO_MAX_SIZE;
6814 dev->gso_max_segs = GSO_MAX_SEGS;
6815 dev->gso_min_segs = 0;
6817 INIT_LIST_HEAD(&dev->napi_list);
6818 INIT_LIST_HEAD(&dev->unreg_list);
6819 INIT_LIST_HEAD(&dev->close_list);
6820 INIT_LIST_HEAD(&dev->link_watch_list);
6821 INIT_LIST_HEAD(&dev->adj_list.upper);
6822 INIT_LIST_HEAD(&dev->adj_list.lower);
6823 INIT_LIST_HEAD(&dev->all_adj_list.upper);
6824 INIT_LIST_HEAD(&dev->all_adj_list.lower);
6825 INIT_LIST_HEAD(&dev->ptype_all);
6826 INIT_LIST_HEAD(&dev->ptype_specific);
6827 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
6830 dev->num_tx_queues = txqs;
6831 dev->real_num_tx_queues = txqs;
6832 if (netif_alloc_netdev_queues(dev))
6836 dev->num_rx_queues = rxqs;
6837 dev->real_num_rx_queues = rxqs;
6838 if (netif_alloc_rx_queues(dev))
6842 strcpy(dev->name, name);
6843 dev->name_assign_type = name_assign_type;
6844 dev->group = INIT_NETDEV_GROUP;
6845 if (!dev->ethtool_ops)
6846 dev->ethtool_ops = &default_ethtool_ops;
6854 free_percpu(dev->pcpu_refcnt);
6856 netdev_freemem(dev);
6859 EXPORT_SYMBOL(alloc_netdev_mqs);
6862 * free_netdev - free network device
6865 * This function does the last stage of destroying an allocated device
6866 * interface. The reference to the device object is released.
6867 * If this is the last reference then it will be freed.
6869 void free_netdev(struct net_device *dev)
6871 struct napi_struct *p, *n;
6873 netif_free_tx_queues(dev);
6878 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
6880 /* Flush device addresses */
6881 dev_addr_flush(dev);
6883 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6886 free_percpu(dev->pcpu_refcnt);
6887 dev->pcpu_refcnt = NULL;
6889 /* Compatibility with error handling in drivers */
6890 if (dev->reg_state == NETREG_UNINITIALIZED) {
6891 netdev_freemem(dev);
6895 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6896 dev->reg_state = NETREG_RELEASED;
6898 /* will free via device release */
6899 put_device(&dev->dev);
6901 EXPORT_SYMBOL(free_netdev);
6904 * synchronize_net - Synchronize with packet receive processing
6906 * Wait for packets currently being received to be done.
6907 * Does not block later packets from starting.
6909 void synchronize_net(void)
6912 if (rtnl_is_locked())
6913 synchronize_rcu_expedited();
6917 EXPORT_SYMBOL(synchronize_net);
6920 * unregister_netdevice_queue - remove device from the kernel
6924 * This function shuts down a device interface and removes it
6925 * from the kernel tables.
6926 * If head not NULL, device is queued to be unregistered later.
6928 * Callers must hold the rtnl semaphore. You may want
6929 * unregister_netdev() instead of this.
6932 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
6937 list_move_tail(&dev->unreg_list, head);
6939 rollback_registered(dev);
6940 /* Finish processing unregister after unlock */
6944 EXPORT_SYMBOL(unregister_netdevice_queue);
6947 * unregister_netdevice_many - unregister many devices
6948 * @head: list of devices
6950 * Note: As most callers use a stack allocated list_head,
6951 * we force a list_del() to make sure stack wont be corrupted later.
6953 void unregister_netdevice_many(struct list_head *head)
6955 struct net_device *dev;
6957 if (!list_empty(head)) {
6958 rollback_registered_many(head);
6959 list_for_each_entry(dev, head, unreg_list)
6964 EXPORT_SYMBOL(unregister_netdevice_many);
6967 * unregister_netdev - remove device from the kernel
6970 * This function shuts down a device interface and removes it
6971 * from the kernel tables.
6973 * This is just a wrapper for unregister_netdevice that takes
6974 * the rtnl semaphore. In general you want to use this and not
6975 * unregister_netdevice.
6977 void unregister_netdev(struct net_device *dev)
6980 unregister_netdevice(dev);
6983 EXPORT_SYMBOL(unregister_netdev);
6986 * dev_change_net_namespace - move device to different nethost namespace
6988 * @net: network namespace
6989 * @pat: If not NULL name pattern to try if the current device name
6990 * is already taken in the destination network namespace.
6992 * This function shuts down a device interface and moves it
6993 * to a new network namespace. On success 0 is returned, on
6994 * a failure a netagive errno code is returned.
6996 * Callers must hold the rtnl semaphore.
6999 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
7005 /* Don't allow namespace local devices to be moved. */
7007 if (dev->features & NETIF_F_NETNS_LOCAL)
7010 /* Ensure the device has been registrered */
7011 if (dev->reg_state != NETREG_REGISTERED)
7014 /* Get out if there is nothing todo */
7016 if (net_eq(dev_net(dev), net))
7019 /* Pick the destination device name, and ensure
7020 * we can use it in the destination network namespace.
7023 if (__dev_get_by_name(net, dev->name)) {
7024 /* We get here if we can't use the current device name */
7027 if (dev_get_valid_name(net, dev, pat) < 0)
7032 * And now a mini version of register_netdevice unregister_netdevice.
7035 /* If device is running close it first. */
7038 /* And unlink it from device chain */
7040 unlist_netdevice(dev);
7044 /* Shutdown queueing discipline. */
7047 /* Notify protocols, that we are about to destroy
7048 this device. They should clean all the things.
7050 Note that dev->reg_state stays at NETREG_REGISTERED.
7051 This is wanted because this way 8021q and macvlan know
7052 the device is just moving and can keep their slaves up.
7054 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
7056 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7057 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
7060 * Flush the unicast and multicast chains
7065 /* Send a netdev-removed uevent to the old namespace */
7066 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
7067 netdev_adjacent_del_links(dev);
7069 /* Actually switch the network namespace */
7070 dev_net_set(dev, net);
7072 /* If there is an ifindex conflict assign a new one */
7073 if (__dev_get_by_index(net, dev->ifindex))
7074 dev->ifindex = dev_new_index(net);
7076 /* Send a netdev-add uevent to the new namespace */
7077 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
7078 netdev_adjacent_add_links(dev);
7080 /* Fixup kobjects */
7081 err = device_rename(&dev->dev, dev->name);
7084 /* Add the device back in the hashes */
7085 list_netdevice(dev);
7087 /* Notify protocols, that a new device appeared. */
7088 call_netdevice_notifiers(NETDEV_REGISTER, dev);
7091 * Prevent userspace races by waiting until the network
7092 * device is fully setup before sending notifications.
7094 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
7101 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
7103 static int dev_cpu_callback(struct notifier_block *nfb,
7104 unsigned long action,
7107 struct sk_buff **list_skb;
7108 struct sk_buff *skb;
7109 unsigned int cpu, oldcpu = (unsigned long)ocpu;
7110 struct softnet_data *sd, *oldsd;
7112 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
7115 local_irq_disable();
7116 cpu = smp_processor_id();
7117 sd = &per_cpu(softnet_data, cpu);
7118 oldsd = &per_cpu(softnet_data, oldcpu);
7120 /* Find end of our completion_queue. */
7121 list_skb = &sd->completion_queue;
7123 list_skb = &(*list_skb)->next;
7124 /* Append completion queue from offline CPU. */
7125 *list_skb = oldsd->completion_queue;
7126 oldsd->completion_queue = NULL;
7128 /* Append output queue from offline CPU. */
7129 if (oldsd->output_queue) {
7130 *sd->output_queue_tailp = oldsd->output_queue;
7131 sd->output_queue_tailp = oldsd->output_queue_tailp;
7132 oldsd->output_queue = NULL;
7133 oldsd->output_queue_tailp = &oldsd->output_queue;
7135 /* Append NAPI poll list from offline CPU, with one exception :
7136 * process_backlog() must be called by cpu owning percpu backlog.
7137 * We properly handle process_queue & input_pkt_queue later.
7139 while (!list_empty(&oldsd->poll_list)) {
7140 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
7144 list_del_init(&napi->poll_list);
7145 if (napi->poll == process_backlog)
7148 ____napi_schedule(sd, napi);
7151 raise_softirq_irqoff(NET_TX_SOFTIRQ);
7154 /* Process offline CPU's input_pkt_queue */
7155 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
7157 input_queue_head_incr(oldsd);
7159 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
7161 input_queue_head_incr(oldsd);
7169 * netdev_increment_features - increment feature set by one
7170 * @all: current feature set
7171 * @one: new feature set
7172 * @mask: mask feature set
7174 * Computes a new feature set after adding a device with feature set
7175 * @one to the master device with current feature set @all. Will not
7176 * enable anything that is off in @mask. Returns the new feature set.
7178 netdev_features_t netdev_increment_features(netdev_features_t all,
7179 netdev_features_t one, netdev_features_t mask)
7181 if (mask & NETIF_F_GEN_CSUM)
7182 mask |= NETIF_F_ALL_CSUM;
7183 mask |= NETIF_F_VLAN_CHALLENGED;
7185 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
7186 all &= one | ~NETIF_F_ALL_FOR_ALL;
7188 /* If one device supports hw checksumming, set for all. */
7189 if (all & NETIF_F_GEN_CSUM)
7190 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
7194 EXPORT_SYMBOL(netdev_increment_features);
7196 static struct hlist_head * __net_init netdev_create_hash(void)
7199 struct hlist_head *hash;
7201 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7203 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7204 INIT_HLIST_HEAD(&hash[i]);
7209 /* Initialize per network namespace state */
7210 static int __net_init netdev_init(struct net *net)
7212 if (net != &init_net)
7213 INIT_LIST_HEAD(&net->dev_base_head);
7215 net->dev_name_head = netdev_create_hash();
7216 if (net->dev_name_head == NULL)
7219 net->dev_index_head = netdev_create_hash();
7220 if (net->dev_index_head == NULL)
7226 kfree(net->dev_name_head);
7232 * netdev_drivername - network driver for the device
7233 * @dev: network device
7235 * Determine network driver for device.
7237 const char *netdev_drivername(const struct net_device *dev)
7239 const struct device_driver *driver;
7240 const struct device *parent;
7241 const char *empty = "";
7243 parent = dev->dev.parent;
7247 driver = parent->driver;
7248 if (driver && driver->name)
7249 return driver->name;
7253 static void __netdev_printk(const char *level, const struct net_device *dev,
7254 struct va_format *vaf)
7256 if (dev && dev->dev.parent) {
7257 dev_printk_emit(level[1] - '0',
7260 dev_driver_string(dev->dev.parent),
7261 dev_name(dev->dev.parent),
7262 netdev_name(dev), netdev_reg_state(dev),
7265 printk("%s%s%s: %pV",
7266 level, netdev_name(dev), netdev_reg_state(dev), vaf);
7268 printk("%s(NULL net_device): %pV", level, vaf);
7272 void netdev_printk(const char *level, const struct net_device *dev,
7273 const char *format, ...)
7275 struct va_format vaf;
7278 va_start(args, format);
7283 __netdev_printk(level, dev, &vaf);
7287 EXPORT_SYMBOL(netdev_printk);
7289 #define define_netdev_printk_level(func, level) \
7290 void func(const struct net_device *dev, const char *fmt, ...) \
7292 struct va_format vaf; \
7295 va_start(args, fmt); \
7300 __netdev_printk(level, dev, &vaf); \
7304 EXPORT_SYMBOL(func);
7306 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7307 define_netdev_printk_level(netdev_alert, KERN_ALERT);
7308 define_netdev_printk_level(netdev_crit, KERN_CRIT);
7309 define_netdev_printk_level(netdev_err, KERN_ERR);
7310 define_netdev_printk_level(netdev_warn, KERN_WARNING);
7311 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7312 define_netdev_printk_level(netdev_info, KERN_INFO);
7314 static void __net_exit netdev_exit(struct net *net)
7316 kfree(net->dev_name_head);
7317 kfree(net->dev_index_head);
7320 static struct pernet_operations __net_initdata netdev_net_ops = {
7321 .init = netdev_init,
7322 .exit = netdev_exit,
7325 static void __net_exit default_device_exit(struct net *net)
7327 struct net_device *dev, *aux;
7329 * Push all migratable network devices back to the
7330 * initial network namespace
7333 for_each_netdev_safe(net, dev, aux) {
7335 char fb_name[IFNAMSIZ];
7337 /* Ignore unmoveable devices (i.e. loopback) */
7338 if (dev->features & NETIF_F_NETNS_LOCAL)
7341 /* Leave virtual devices for the generic cleanup */
7342 if (dev->rtnl_link_ops)
7345 /* Push remaining network devices to init_net */
7346 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7347 err = dev_change_net_namespace(dev, &init_net, fb_name);
7349 pr_emerg("%s: failed to move %s to init_net: %d\n",
7350 __func__, dev->name, err);
7357 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7359 /* Return with the rtnl_lock held when there are no network
7360 * devices unregistering in any network namespace in net_list.
7364 DEFINE_WAIT_FUNC(wait, woken_wake_function);
7366 add_wait_queue(&netdev_unregistering_wq, &wait);
7368 unregistering = false;
7370 list_for_each_entry(net, net_list, exit_list) {
7371 if (net->dev_unreg_count > 0) {
7372 unregistering = true;
7380 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
7382 remove_wait_queue(&netdev_unregistering_wq, &wait);
7385 static void __net_exit default_device_exit_batch(struct list_head *net_list)
7387 /* At exit all network devices most be removed from a network
7388 * namespace. Do this in the reverse order of registration.
7389 * Do this across as many network namespaces as possible to
7390 * improve batching efficiency.
7392 struct net_device *dev;
7394 LIST_HEAD(dev_kill_list);
7396 /* To prevent network device cleanup code from dereferencing
7397 * loopback devices or network devices that have been freed
7398 * wait here for all pending unregistrations to complete,
7399 * before unregistring the loopback device and allowing the
7400 * network namespace be freed.
7402 * The netdev todo list containing all network devices
7403 * unregistrations that happen in default_device_exit_batch
7404 * will run in the rtnl_unlock() at the end of
7405 * default_device_exit_batch.
7407 rtnl_lock_unregistering(net_list);
7408 list_for_each_entry(net, net_list, exit_list) {
7409 for_each_netdev_reverse(net, dev) {
7410 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
7411 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7413 unregister_netdevice_queue(dev, &dev_kill_list);
7416 unregister_netdevice_many(&dev_kill_list);
7420 static struct pernet_operations __net_initdata default_device_ops = {
7421 .exit = default_device_exit,
7422 .exit_batch = default_device_exit_batch,
7426 * Initialize the DEV module. At boot time this walks the device list and
7427 * unhooks any devices that fail to initialise (normally hardware not
7428 * present) and leaves us with a valid list of present and active devices.
7433 * This is called single threaded during boot, so no need
7434 * to take the rtnl semaphore.
7436 static int __init net_dev_init(void)
7438 int i, rc = -ENOMEM;
7440 BUG_ON(!dev_boot_phase);
7442 if (dev_proc_init())
7445 if (netdev_kobject_init())
7448 INIT_LIST_HEAD(&ptype_all);
7449 for (i = 0; i < PTYPE_HASH_SIZE; i++)
7450 INIT_LIST_HEAD(&ptype_base[i]);
7452 INIT_LIST_HEAD(&offload_base);
7454 if (register_pernet_subsys(&netdev_net_ops))
7458 * Initialise the packet receive queues.
7461 for_each_possible_cpu(i) {
7462 struct softnet_data *sd = &per_cpu(softnet_data, i);
7464 skb_queue_head_init(&sd->input_pkt_queue);
7465 skb_queue_head_init(&sd->process_queue);
7466 INIT_LIST_HEAD(&sd->poll_list);
7467 sd->output_queue_tailp = &sd->output_queue;
7469 sd->csd.func = rps_trigger_softirq;
7474 sd->backlog.poll = process_backlog;
7475 sd->backlog.weight = weight_p;
7480 /* The loopback device is special if any other network devices
7481 * is present in a network namespace the loopback device must
7482 * be present. Since we now dynamically allocate and free the
7483 * loopback device ensure this invariant is maintained by
7484 * keeping the loopback device as the first device on the
7485 * list of network devices. Ensuring the loopback devices
7486 * is the first device that appears and the last network device
7489 if (register_pernet_device(&loopback_net_ops))
7492 if (register_pernet_device(&default_device_ops))
7495 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7496 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
7498 hotcpu_notifier(dev_cpu_callback, 0);
7505 subsys_initcall(net_dev_init);