]> Git Repo - linux.git/blame - net/core/dev.c
tcp: add a force_schedule argument to sk_stream_alloc_skb()
[linux.git] / net / core / dev.c
CommitLineData
1da177e4
LT
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <[email protected]>
12 * Mark Evans, <[email protected]>
13 *
14 * Additional Authors:
15 * Florian la Roche <[email protected]>
16 * Alan Cox <[email protected]>
17 * David Hinds <[email protected]>
18 * Alexey Kuznetsov <[email protected]>
19 * Adam Sulmicki <[email protected]>
20 * Pekka Riikonen <[email protected]>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
1da177e4 76#include <linux/bitops.h>
4fc268d2 77#include <linux/capability.h>
1da177e4
LT
78#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
08e9897d 81#include <linux/hash.h>
5a0e3ad6 82#include <linux/slab.h>
1da177e4 83#include <linux/sched.h>
4a3e2f71 84#include <linux/mutex.h>
1da177e4
LT
85#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
0187bdfb 94#include <linux/ethtool.h>
1da177e4
LT
95#include <linux/notifier.h>
96#include <linux/skbuff.h>
457c4cbc 97#include <net/net_namespace.h>
1da177e4
LT
98#include <net/sock.h>
99#include <linux/rtnetlink.h>
1da177e4 100#include <linux/stat.h>
1da177e4
LT
101#include <net/dst.h>
102#include <net/pkt_sched.h>
103#include <net/checksum.h>
44540960 104#include <net/xfrm.h>
1da177e4
LT
105#include <linux/highmem.h>
106#include <linux/init.h>
1da177e4 107#include <linux/module.h>
1da177e4
LT
108#include <linux/netpoll.h>
109#include <linux/rcupdate.h>
110#include <linux/delay.h>
1da177e4 111#include <net/iw_handler.h>
1da177e4 112#include <asm/current.h>
5bdb9886 113#include <linux/audit.h>
db217334 114#include <linux/dmaengine.h>
f6a78bfc 115#include <linux/err.h>
c7fa9d18 116#include <linux/ctype.h>
723e98b7 117#include <linux/if_arp.h>
6de329e2 118#include <linux/if_vlan.h>
8f0f2223 119#include <linux/ip.h>
ad55dcaf 120#include <net/ip.h>
25cd9ba0 121#include <net/mpls.h>
8f0f2223
DM
122#include <linux/ipv6.h>
123#include <linux/in.h>
b6b2fed1
DM
124#include <linux/jhash.h>
125#include <linux/random.h>
9cbc1cb8 126#include <trace/events/napi.h>
cf66ba58 127#include <trace/events/net.h>
07dc22e7 128#include <trace/events/skb.h>
5acbbd42 129#include <linux/pci.h>
caeda9b9 130#include <linux/inetdevice.h>
c445477d 131#include <linux/cpu_rmap.h>
c5905afb 132#include <linux/static_key.h>
af12fa6e 133#include <linux/hashtable.h>
60877a32 134#include <linux/vmalloc.h>
529d0489 135#include <linux/if_macvlan.h>
e7fd2885 136#include <linux/errqueue.h>
3b47d303 137#include <linux/hrtimer.h>
e687ad60 138#include <linux/netfilter_ingress.h>
1da177e4 139
342709ef
PE
140#include "net-sysfs.h"
141
d565b0a1
HX
142/* Instead of increasing this, you should create a hash table. */
143#define MAX_GRO_SKBS 8
144
5d38a079
HX
145/* This should be increased if a protocol with a bigger head is added. */
146#define GRO_MAX_HEAD (MAX_HEADER + 128)
147
1da177e4 148static DEFINE_SPINLOCK(ptype_lock);
62532da9 149static DEFINE_SPINLOCK(offload_lock);
900ff8c6
CW
150struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
151struct list_head ptype_all __read_mostly; /* Taps */
62532da9 152static struct list_head offload_base __read_mostly;
1da177e4 153
ae78dbfa 154static int netif_rx_internal(struct sk_buff *skb);
54951194
LP
155static int call_netdevice_notifiers_info(unsigned long val,
156 struct net_device *dev,
157 struct netdev_notifier_info *info);
ae78dbfa 158
1da177e4 159/*
7562f876 160 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
161 * semaphore.
162 *
c6d14c84 163 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
164 *
165 * Writers must hold the rtnl semaphore while they loop through the
7562f876 166 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
167 * actual updates. This allows pure readers to access the list even
168 * while a writer is preparing to update it.
169 *
170 * To put it another way, dev_base_lock is held for writing only to
171 * protect against pure readers; the rtnl semaphore provides the
172 * protection against other writers.
173 *
174 * See, for example usages, register_netdevice() and
175 * unregister_netdevice(), which must be called with the rtnl
176 * semaphore held.
177 */
1da177e4 178DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
179EXPORT_SYMBOL(dev_base_lock);
180
af12fa6e
ET
181/* protects napi_hash addition/deletion and napi_gen_id */
182static DEFINE_SPINLOCK(napi_hash_lock);
183
184static unsigned int napi_gen_id;
185static DEFINE_HASHTABLE(napi_hash, 8);
186
18afa4b0 187static seqcount_t devnet_rename_seq;
c91f6df2 188
4e985ada
TG
189static inline void dev_base_seq_inc(struct net *net)
190{
191 while (++net->dev_base_seq == 0);
192}
193
881d966b 194static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4 195{
95c96174
ED
196 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
197
08e9897d 198 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
199}
200
881d966b 201static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 202{
7c28bd0b 203 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
204}
205
e36fa2f7 206static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
207{
208#ifdef CONFIG_RPS
e36fa2f7 209 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
210#endif
211}
212
e36fa2f7 213static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
214{
215#ifdef CONFIG_RPS
e36fa2f7 216 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
217#endif
218}
219
ce286d32 220/* Device list insertion */
53759be9 221static void list_netdevice(struct net_device *dev)
ce286d32 222{
c346dca1 223 struct net *net = dev_net(dev);
ce286d32
EB
224
225 ASSERT_RTNL();
226
227 write_lock_bh(&dev_base_lock);
c6d14c84 228 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
72c9528b 229 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
fb699dfd
ED
230 hlist_add_head_rcu(&dev->index_hlist,
231 dev_index_hash(net, dev->ifindex));
ce286d32 232 write_unlock_bh(&dev_base_lock);
4e985ada
TG
233
234 dev_base_seq_inc(net);
ce286d32
EB
235}
236
fb699dfd
ED
237/* Device list removal
238 * caller must respect a RCU grace period before freeing/reusing dev
239 */
ce286d32
EB
240static void unlist_netdevice(struct net_device *dev)
241{
242 ASSERT_RTNL();
243
244 /* Unlink dev from the device chain */
245 write_lock_bh(&dev_base_lock);
c6d14c84 246 list_del_rcu(&dev->dev_list);
72c9528b 247 hlist_del_rcu(&dev->name_hlist);
fb699dfd 248 hlist_del_rcu(&dev->index_hlist);
ce286d32 249 write_unlock_bh(&dev_base_lock);
4e985ada
TG
250
251 dev_base_seq_inc(dev_net(dev));
ce286d32
EB
252}
253
1da177e4
LT
254/*
255 * Our notifier list
256 */
257
f07d5b94 258static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
259
260/*
261 * Device drivers call our routines to queue packets here. We empty the
262 * queue in the local softnet handler.
263 */
bea3348e 264
9958da05 265DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 266EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 267
cf508b12 268#ifdef CONFIG_LOCKDEP
723e98b7 269/*
c773e847 270 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
271 * according to dev->type
272 */
273static const unsigned short netdev_lock_type[] =
274 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
275 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
276 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
277 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
278 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
279 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
280 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
281 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
282 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
283 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
284 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
285 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
211ed865
PG
286 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
287 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
288 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
723e98b7 289
36cbd3dc 290static const char *const netdev_lock_name[] =
723e98b7
JP
291 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
292 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
293 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
294 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
295 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
296 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
297 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
298 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
299 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
300 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
301 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
302 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
211ed865
PG
303 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
304 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
305 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
306
307static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 308static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
309
310static inline unsigned short netdev_lock_pos(unsigned short dev_type)
311{
312 int i;
313
314 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
315 if (netdev_lock_type[i] == dev_type)
316 return i;
317 /* the last key is used by default */
318 return ARRAY_SIZE(netdev_lock_type) - 1;
319}
320
cf508b12
DM
321static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
322 unsigned short dev_type)
723e98b7
JP
323{
324 int i;
325
326 i = netdev_lock_pos(dev_type);
327 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
328 netdev_lock_name[i]);
329}
cf508b12
DM
330
331static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
332{
333 int i;
334
335 i = netdev_lock_pos(dev->type);
336 lockdep_set_class_and_name(&dev->addr_list_lock,
337 &netdev_addr_lock_key[i],
338 netdev_lock_name[i]);
339}
723e98b7 340#else
cf508b12
DM
341static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
342 unsigned short dev_type)
343{
344}
345static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
346{
347}
348#endif
1da177e4
LT
349
350/*******************************************************************************
351
352 Protocol management and registration routines
353
354*******************************************************************************/
355
1da177e4
LT
356/*
357 * Add a protocol ID to the list. Now that the input handler is
358 * smarter we can dispense with all the messy stuff that used to be
359 * here.
360 *
361 * BEWARE!!! Protocol handlers, mangling input packets,
362 * MUST BE last in hash buckets and checking protocol handlers
363 * MUST start from promiscuous ptype_all chain in net_bh.
364 * It is true now, do not change it.
365 * Explanation follows: if protocol handler, mangling packet, will
366 * be the first on list, it is not able to sense, that packet
367 * is cloned and should be copied-on-write, so that it will
368 * change it and subsequent readers will get broken packet.
369 * --ANK (980803)
370 */
371
c07b68e8
ED
372static inline struct list_head *ptype_head(const struct packet_type *pt)
373{
374 if (pt->type == htons(ETH_P_ALL))
7866a621 375 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
c07b68e8 376 else
7866a621
SN
377 return pt->dev ? &pt->dev->ptype_specific :
378 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
c07b68e8
ED
379}
380
1da177e4
LT
381/**
382 * dev_add_pack - add packet handler
383 * @pt: packet type declaration
384 *
385 * Add a protocol handler to the networking stack. The passed &packet_type
386 * is linked into kernel lists and may not be freed until it has been
387 * removed from the kernel lists.
388 *
4ec93edb 389 * This call does not sleep therefore it can not
1da177e4
LT
390 * guarantee all CPU's that are in middle of receiving packets
391 * will see the new packet type (until the next received packet).
392 */
393
394void dev_add_pack(struct packet_type *pt)
395{
c07b68e8 396 struct list_head *head = ptype_head(pt);
1da177e4 397
c07b68e8
ED
398 spin_lock(&ptype_lock);
399 list_add_rcu(&pt->list, head);
400 spin_unlock(&ptype_lock);
1da177e4 401}
d1b19dff 402EXPORT_SYMBOL(dev_add_pack);
1da177e4 403
1da177e4
LT
404/**
405 * __dev_remove_pack - remove packet handler
406 * @pt: packet type declaration
407 *
408 * Remove a protocol handler that was previously added to the kernel
409 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
410 * from the kernel lists and can be freed or reused once this function
4ec93edb 411 * returns.
1da177e4
LT
412 *
413 * The packet type might still be in use by receivers
414 * and must not be freed until after all the CPU's have gone
415 * through a quiescent state.
416 */
417void __dev_remove_pack(struct packet_type *pt)
418{
c07b68e8 419 struct list_head *head = ptype_head(pt);
1da177e4
LT
420 struct packet_type *pt1;
421
c07b68e8 422 spin_lock(&ptype_lock);
1da177e4
LT
423
424 list_for_each_entry(pt1, head, list) {
425 if (pt == pt1) {
426 list_del_rcu(&pt->list);
427 goto out;
428 }
429 }
430
7b6cd1ce 431 pr_warn("dev_remove_pack: %p not found\n", pt);
1da177e4 432out:
c07b68e8 433 spin_unlock(&ptype_lock);
1da177e4 434}
d1b19dff
ED
435EXPORT_SYMBOL(__dev_remove_pack);
436
1da177e4
LT
437/**
438 * dev_remove_pack - remove packet handler
439 * @pt: packet type declaration
440 *
441 * Remove a protocol handler that was previously added to the kernel
442 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
443 * from the kernel lists and can be freed or reused once this function
444 * returns.
445 *
446 * This call sleeps to guarantee that no CPU is looking at the packet
447 * type after return.
448 */
449void dev_remove_pack(struct packet_type *pt)
450{
451 __dev_remove_pack(pt);
4ec93edb 452
1da177e4
LT
453 synchronize_net();
454}
d1b19dff 455EXPORT_SYMBOL(dev_remove_pack);
1da177e4 456
62532da9
VY
457
458/**
459 * dev_add_offload - register offload handlers
460 * @po: protocol offload declaration
461 *
462 * Add protocol offload handlers to the networking stack. The passed
463 * &proto_offload is linked into kernel lists and may not be freed until
464 * it has been removed from the kernel lists.
465 *
466 * This call does not sleep therefore it can not
467 * guarantee all CPU's that are in middle of receiving packets
468 * will see the new offload handlers (until the next received packet).
469 */
470void dev_add_offload(struct packet_offload *po)
471{
472 struct list_head *head = &offload_base;
473
474 spin_lock(&offload_lock);
475 list_add_rcu(&po->list, head);
476 spin_unlock(&offload_lock);
477}
478EXPORT_SYMBOL(dev_add_offload);
479
480/**
481 * __dev_remove_offload - remove offload handler
482 * @po: packet offload declaration
483 *
484 * Remove a protocol offload handler that was previously added to the
485 * kernel offload handlers by dev_add_offload(). The passed &offload_type
486 * is removed from the kernel lists and can be freed or reused once this
487 * function returns.
488 *
489 * The packet type might still be in use by receivers
490 * and must not be freed until after all the CPU's have gone
491 * through a quiescent state.
492 */
1d143d9f 493static void __dev_remove_offload(struct packet_offload *po)
62532da9
VY
494{
495 struct list_head *head = &offload_base;
496 struct packet_offload *po1;
497
c53aa505 498 spin_lock(&offload_lock);
62532da9
VY
499
500 list_for_each_entry(po1, head, list) {
501 if (po == po1) {
502 list_del_rcu(&po->list);
503 goto out;
504 }
505 }
506
507 pr_warn("dev_remove_offload: %p not found\n", po);
508out:
c53aa505 509 spin_unlock(&offload_lock);
62532da9 510}
62532da9
VY
511
512/**
513 * dev_remove_offload - remove packet offload handler
514 * @po: packet offload declaration
515 *
516 * Remove a packet offload handler that was previously added to the kernel
517 * offload handlers by dev_add_offload(). The passed &offload_type is
518 * removed from the kernel lists and can be freed or reused once this
519 * function returns.
520 *
521 * This call sleeps to guarantee that no CPU is looking at the packet
522 * type after return.
523 */
524void dev_remove_offload(struct packet_offload *po)
525{
526 __dev_remove_offload(po);
527
528 synchronize_net();
529}
530EXPORT_SYMBOL(dev_remove_offload);
531
1da177e4
LT
532/******************************************************************************
533
534 Device Boot-time Settings Routines
535
536*******************************************************************************/
537
538/* Boot time configuration table */
539static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
540
541/**
542 * netdev_boot_setup_add - add new setup entry
543 * @name: name of the device
544 * @map: configured settings for the device
545 *
546 * Adds new setup entry to the dev_boot_setup list. The function
547 * returns 0 on error and 1 on success. This is a generic routine to
548 * all netdevices.
549 */
550static int netdev_boot_setup_add(char *name, struct ifmap *map)
551{
552 struct netdev_boot_setup *s;
553 int i;
554
555 s = dev_boot_setup;
556 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
557 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
558 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 559 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
560 memcpy(&s[i].map, map, sizeof(s[i].map));
561 break;
562 }
563 }
564
565 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
566}
567
568/**
569 * netdev_boot_setup_check - check boot time settings
570 * @dev: the netdevice
571 *
572 * Check boot time settings for the device.
573 * The found settings are set for the device to be used
574 * later in the device probing.
575 * Returns 0 if no settings found, 1 if they are.
576 */
577int netdev_boot_setup_check(struct net_device *dev)
578{
579 struct netdev_boot_setup *s = dev_boot_setup;
580 int i;
581
582 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
583 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 584 !strcmp(dev->name, s[i].name)) {
1da177e4
LT
585 dev->irq = s[i].map.irq;
586 dev->base_addr = s[i].map.base_addr;
587 dev->mem_start = s[i].map.mem_start;
588 dev->mem_end = s[i].map.mem_end;
589 return 1;
590 }
591 }
592 return 0;
593}
d1b19dff 594EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
595
596
597/**
598 * netdev_boot_base - get address from boot time settings
599 * @prefix: prefix for network device
600 * @unit: id for network device
601 *
602 * Check boot time settings for the base address of device.
603 * The found settings are set for the device to be used
604 * later in the device probing.
605 * Returns 0 if no settings found.
606 */
607unsigned long netdev_boot_base(const char *prefix, int unit)
608{
609 const struct netdev_boot_setup *s = dev_boot_setup;
610 char name[IFNAMSIZ];
611 int i;
612
613 sprintf(name, "%s%d", prefix, unit);
614
615 /*
616 * If device already registered then return base of 1
617 * to indicate not to probe for this interface
618 */
881d966b 619 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
620 return 1;
621
622 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
623 if (!strcmp(name, s[i].name))
624 return s[i].map.base_addr;
625 return 0;
626}
627
628/*
629 * Saves at boot time configured settings for any netdevice.
630 */
631int __init netdev_boot_setup(char *str)
632{
633 int ints[5];
634 struct ifmap map;
635
636 str = get_options(str, ARRAY_SIZE(ints), ints);
637 if (!str || !*str)
638 return 0;
639
640 /* Save settings */
641 memset(&map, 0, sizeof(map));
642 if (ints[0] > 0)
643 map.irq = ints[1];
644 if (ints[0] > 1)
645 map.base_addr = ints[2];
646 if (ints[0] > 2)
647 map.mem_start = ints[3];
648 if (ints[0] > 3)
649 map.mem_end = ints[4];
650
651 /* Add new entry to the list */
652 return netdev_boot_setup_add(str, &map);
653}
654
655__setup("netdev=", netdev_boot_setup);
656
657/*******************************************************************************
658
659 Device Interface Subroutines
660
661*******************************************************************************/
662
a54acb3a
ND
663/**
664 * dev_get_iflink - get 'iflink' value of a interface
665 * @dev: targeted interface
666 *
667 * Indicates the ifindex the interface is linked to.
668 * Physical interfaces have the same 'ifindex' and 'iflink' values.
669 */
670
671int dev_get_iflink(const struct net_device *dev)
672{
673 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
674 return dev->netdev_ops->ndo_get_iflink(dev);
675
e1622baf
ND
676 /* If dev->rtnl_link_ops is set, it's a virtual interface. */
677 if (dev->rtnl_link_ops)
678 return 0;
679
7a66bbc9 680 return dev->ifindex;
a54acb3a
ND
681}
682EXPORT_SYMBOL(dev_get_iflink);
683
1da177e4
LT
684/**
685 * __dev_get_by_name - find a device by its name
c4ea43c5 686 * @net: the applicable net namespace
1da177e4
LT
687 * @name: name to find
688 *
689 * Find an interface by name. Must be called under RTNL semaphore
690 * or @dev_base_lock. If the name is found a pointer to the device
691 * is returned. If the name is not found then %NULL is returned. The
692 * reference counters are not incremented so the caller must be
693 * careful with locks.
694 */
695
881d966b 696struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4 697{
0bd8d536
ED
698 struct net_device *dev;
699 struct hlist_head *head = dev_name_hash(net, name);
1da177e4 700
b67bfe0d 701 hlist_for_each_entry(dev, head, name_hlist)
1da177e4
LT
702 if (!strncmp(dev->name, name, IFNAMSIZ))
703 return dev;
0bd8d536 704
1da177e4
LT
705 return NULL;
706}
d1b19dff 707EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 708
72c9528b
ED
709/**
710 * dev_get_by_name_rcu - find a device by its name
711 * @net: the applicable net namespace
712 * @name: name to find
713 *
714 * Find an interface by name.
715 * If the name is found a pointer to the device is returned.
716 * If the name is not found then %NULL is returned.
717 * The reference counters are not incremented so the caller must be
718 * careful with locks. The caller must hold RCU lock.
719 */
720
721struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
722{
72c9528b
ED
723 struct net_device *dev;
724 struct hlist_head *head = dev_name_hash(net, name);
725
b67bfe0d 726 hlist_for_each_entry_rcu(dev, head, name_hlist)
72c9528b
ED
727 if (!strncmp(dev->name, name, IFNAMSIZ))
728 return dev;
729
730 return NULL;
731}
732EXPORT_SYMBOL(dev_get_by_name_rcu);
733
1da177e4
LT
734/**
735 * dev_get_by_name - find a device by its name
c4ea43c5 736 * @net: the applicable net namespace
1da177e4
LT
737 * @name: name to find
738 *
739 * Find an interface by name. This can be called from any
740 * context and does its own locking. The returned handle has
741 * the usage count incremented and the caller must use dev_put() to
742 * release it when it is no longer needed. %NULL is returned if no
743 * matching device is found.
744 */
745
881d966b 746struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
747{
748 struct net_device *dev;
749
72c9528b
ED
750 rcu_read_lock();
751 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
752 if (dev)
753 dev_hold(dev);
72c9528b 754 rcu_read_unlock();
1da177e4
LT
755 return dev;
756}
d1b19dff 757EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
758
759/**
760 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 761 * @net: the applicable net namespace
1da177e4
LT
762 * @ifindex: index of device
763 *
764 * Search for an interface by index. Returns %NULL if the device
765 * is not found or a pointer to the device. The device has not
766 * had its reference counter increased so the caller must be careful
767 * about locking. The caller must hold either the RTNL semaphore
768 * or @dev_base_lock.
769 */
770
881d966b 771struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4 772{
0bd8d536
ED
773 struct net_device *dev;
774 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 775
b67bfe0d 776 hlist_for_each_entry(dev, head, index_hlist)
1da177e4
LT
777 if (dev->ifindex == ifindex)
778 return dev;
0bd8d536 779
1da177e4
LT
780 return NULL;
781}
d1b19dff 782EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 783
fb699dfd
ED
784/**
785 * dev_get_by_index_rcu - find a device by its ifindex
786 * @net: the applicable net namespace
787 * @ifindex: index of device
788 *
789 * Search for an interface by index. Returns %NULL if the device
790 * is not found or a pointer to the device. The device has not
791 * had its reference counter increased so the caller must be careful
792 * about locking. The caller must hold RCU lock.
793 */
794
795struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
796{
fb699dfd
ED
797 struct net_device *dev;
798 struct hlist_head *head = dev_index_hash(net, ifindex);
799
b67bfe0d 800 hlist_for_each_entry_rcu(dev, head, index_hlist)
fb699dfd
ED
801 if (dev->ifindex == ifindex)
802 return dev;
803
804 return NULL;
805}
806EXPORT_SYMBOL(dev_get_by_index_rcu);
807
1da177e4
LT
808
809/**
810 * dev_get_by_index - find a device by its ifindex
c4ea43c5 811 * @net: the applicable net namespace
1da177e4
LT
812 * @ifindex: index of device
813 *
814 * Search for an interface by index. Returns NULL if the device
815 * is not found or a pointer to the device. The device returned has
816 * had a reference added and the pointer is safe until the user calls
817 * dev_put to indicate they have finished with it.
818 */
819
881d966b 820struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
821{
822 struct net_device *dev;
823
fb699dfd
ED
824 rcu_read_lock();
825 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
826 if (dev)
827 dev_hold(dev);
fb699dfd 828 rcu_read_unlock();
1da177e4
LT
829 return dev;
830}
d1b19dff 831EXPORT_SYMBOL(dev_get_by_index);
1da177e4 832
5dbe7c17
NS
833/**
834 * netdev_get_name - get a netdevice name, knowing its ifindex.
835 * @net: network namespace
836 * @name: a pointer to the buffer where the name will be stored.
837 * @ifindex: the ifindex of the interface to get the name from.
838 *
839 * The use of raw_seqcount_begin() and cond_resched() before
840 * retrying is required as we want to give the writers a chance
841 * to complete when CONFIG_PREEMPT is not set.
842 */
843int netdev_get_name(struct net *net, char *name, int ifindex)
844{
845 struct net_device *dev;
846 unsigned int seq;
847
848retry:
849 seq = raw_seqcount_begin(&devnet_rename_seq);
850 rcu_read_lock();
851 dev = dev_get_by_index_rcu(net, ifindex);
852 if (!dev) {
853 rcu_read_unlock();
854 return -ENODEV;
855 }
856
857 strcpy(name, dev->name);
858 rcu_read_unlock();
859 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
860 cond_resched();
861 goto retry;
862 }
863
864 return 0;
865}
866
1da177e4 867/**
941666c2 868 * dev_getbyhwaddr_rcu - find a device by its hardware address
c4ea43c5 869 * @net: the applicable net namespace
1da177e4
LT
870 * @type: media type of device
871 * @ha: hardware address
872 *
873 * Search for an interface by MAC address. Returns NULL if the device
c506653d
ED
874 * is not found or a pointer to the device.
875 * The caller must hold RCU or RTNL.
941666c2 876 * The returned device has not had its ref count increased
1da177e4
LT
877 * and the caller must therefore be careful about locking
878 *
1da177e4
LT
879 */
880
941666c2
ED
881struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
882 const char *ha)
1da177e4
LT
883{
884 struct net_device *dev;
885
941666c2 886 for_each_netdev_rcu(net, dev)
1da177e4
LT
887 if (dev->type == type &&
888 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
889 return dev;
890
891 return NULL;
1da177e4 892}
941666c2 893EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
cf309e3f 894
881d966b 895struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
896{
897 struct net_device *dev;
898
4e9cac2b 899 ASSERT_RTNL();
881d966b 900 for_each_netdev(net, dev)
4e9cac2b 901 if (dev->type == type)
7562f876
PE
902 return dev;
903
904 return NULL;
4e9cac2b 905}
4e9cac2b
PM
906EXPORT_SYMBOL(__dev_getfirstbyhwtype);
907
881d966b 908struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 909{
99fe3c39 910 struct net_device *dev, *ret = NULL;
4e9cac2b 911
99fe3c39
ED
912 rcu_read_lock();
913 for_each_netdev_rcu(net, dev)
914 if (dev->type == type) {
915 dev_hold(dev);
916 ret = dev;
917 break;
918 }
919 rcu_read_unlock();
920 return ret;
1da177e4 921}
1da177e4
LT
922EXPORT_SYMBOL(dev_getfirstbyhwtype);
923
924/**
6c555490 925 * __dev_get_by_flags - find any device with given flags
c4ea43c5 926 * @net: the applicable net namespace
1da177e4
LT
927 * @if_flags: IFF_* values
928 * @mask: bitmask of bits in if_flags to check
929 *
930 * Search for any interface with the given flags. Returns NULL if a device
bb69ae04 931 * is not found or a pointer to the device. Must be called inside
6c555490 932 * rtnl_lock(), and result refcount is unchanged.
1da177e4
LT
933 */
934
6c555490
WC
935struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
936 unsigned short mask)
1da177e4 937{
7562f876 938 struct net_device *dev, *ret;
1da177e4 939
6c555490
WC
940 ASSERT_RTNL();
941
7562f876 942 ret = NULL;
6c555490 943 for_each_netdev(net, dev) {
1da177e4 944 if (((dev->flags ^ if_flags) & mask) == 0) {
7562f876 945 ret = dev;
1da177e4
LT
946 break;
947 }
948 }
7562f876 949 return ret;
1da177e4 950}
6c555490 951EXPORT_SYMBOL(__dev_get_by_flags);
1da177e4
LT
952
953/**
954 * dev_valid_name - check if name is okay for network device
955 * @name: name string
956 *
957 * Network device names need to be valid file names to
c7fa9d18
DM
958 * to allow sysfs to work. We also disallow any kind of
959 * whitespace.
1da177e4 960 */
95f050bf 961bool dev_valid_name(const char *name)
1da177e4 962{
c7fa9d18 963 if (*name == '\0')
95f050bf 964 return false;
b6fe17d6 965 if (strlen(name) >= IFNAMSIZ)
95f050bf 966 return false;
c7fa9d18 967 if (!strcmp(name, ".") || !strcmp(name, ".."))
95f050bf 968 return false;
c7fa9d18
DM
969
970 while (*name) {
a4176a93 971 if (*name == '/' || *name == ':' || isspace(*name))
95f050bf 972 return false;
c7fa9d18
DM
973 name++;
974 }
95f050bf 975 return true;
1da177e4 976}
d1b19dff 977EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
978
979/**
b267b179
EB
980 * __dev_alloc_name - allocate a name for a device
981 * @net: network namespace to allocate the device name in
1da177e4 982 * @name: name format string
b267b179 983 * @buf: scratch buffer and result name string
1da177e4
LT
984 *
985 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
986 * id. It scans list of devices to build up a free map, then chooses
987 * the first empty slot. The caller must hold the dev_base or rtnl lock
988 * while allocating the name and adding the device in order to avoid
989 * duplicates.
990 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
991 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
992 */
993
b267b179 994static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
995{
996 int i = 0;
1da177e4
LT
997 const char *p;
998 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 999 unsigned long *inuse;
1da177e4
LT
1000 struct net_device *d;
1001
1002 p = strnchr(name, IFNAMSIZ-1, '%');
1003 if (p) {
1004 /*
1005 * Verify the string as this thing may have come from
1006 * the user. There must be either one "%d" and no other "%"
1007 * characters.
1008 */
1009 if (p[1] != 'd' || strchr(p + 2, '%'))
1010 return -EINVAL;
1011
1012 /* Use one page as a bit array of possible slots */
cfcabdcc 1013 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
1014 if (!inuse)
1015 return -ENOMEM;
1016
881d966b 1017 for_each_netdev(net, d) {
1da177e4
LT
1018 if (!sscanf(d->name, name, &i))
1019 continue;
1020 if (i < 0 || i >= max_netdevices)
1021 continue;
1022
1023 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 1024 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
1025 if (!strncmp(buf, d->name, IFNAMSIZ))
1026 set_bit(i, inuse);
1027 }
1028
1029 i = find_first_zero_bit(inuse, max_netdevices);
1030 free_page((unsigned long) inuse);
1031 }
1032
d9031024
OP
1033 if (buf != name)
1034 snprintf(buf, IFNAMSIZ, name, i);
b267b179 1035 if (!__dev_get_by_name(net, buf))
1da177e4 1036 return i;
1da177e4
LT
1037
1038 /* It is possible to run out of possible slots
1039 * when the name is long and there isn't enough space left
1040 * for the digits, or if all bits are used.
1041 */
1042 return -ENFILE;
1043}
1044
b267b179
EB
1045/**
1046 * dev_alloc_name - allocate a name for a device
1047 * @dev: device
1048 * @name: name format string
1049 *
1050 * Passed a format string - eg "lt%d" it will try and find a suitable
1051 * id. It scans list of devices to build up a free map, then chooses
1052 * the first empty slot. The caller must hold the dev_base or rtnl lock
1053 * while allocating the name and adding the device in order to avoid
1054 * duplicates.
1055 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1056 * Returns the number of the unit assigned or a negative errno code.
1057 */
1058
1059int dev_alloc_name(struct net_device *dev, const char *name)
1060{
1061 char buf[IFNAMSIZ];
1062 struct net *net;
1063 int ret;
1064
c346dca1
YH
1065 BUG_ON(!dev_net(dev));
1066 net = dev_net(dev);
b267b179
EB
1067 ret = __dev_alloc_name(net, name, buf);
1068 if (ret >= 0)
1069 strlcpy(dev->name, buf, IFNAMSIZ);
1070 return ret;
1071}
d1b19dff 1072EXPORT_SYMBOL(dev_alloc_name);
b267b179 1073
828de4f6
G
1074static int dev_alloc_name_ns(struct net *net,
1075 struct net_device *dev,
1076 const char *name)
d9031024 1077{
828de4f6
G
1078 char buf[IFNAMSIZ];
1079 int ret;
8ce6cebc 1080
828de4f6
G
1081 ret = __dev_alloc_name(net, name, buf);
1082 if (ret >= 0)
1083 strlcpy(dev->name, buf, IFNAMSIZ);
1084 return ret;
1085}
1086
1087static int dev_get_valid_name(struct net *net,
1088 struct net_device *dev,
1089 const char *name)
1090{
1091 BUG_ON(!net);
8ce6cebc 1092
d9031024
OP
1093 if (!dev_valid_name(name))
1094 return -EINVAL;
1095
1c5cae81 1096 if (strchr(name, '%'))
828de4f6 1097 return dev_alloc_name_ns(net, dev, name);
d9031024
OP
1098 else if (__dev_get_by_name(net, name))
1099 return -EEXIST;
8ce6cebc
DL
1100 else if (dev->name != name)
1101 strlcpy(dev->name, name, IFNAMSIZ);
d9031024
OP
1102
1103 return 0;
1104}
1da177e4
LT
1105
1106/**
1107 * dev_change_name - change name of a device
1108 * @dev: device
1109 * @newname: name (or format string) must be at least IFNAMSIZ
1110 *
1111 * Change name of a device, can pass format strings "eth%d".
1112 * for wildcarding.
1113 */
cf04a4c7 1114int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 1115{
238fa362 1116 unsigned char old_assign_type;
fcc5a03a 1117 char oldname[IFNAMSIZ];
1da177e4 1118 int err = 0;
fcc5a03a 1119 int ret;
881d966b 1120 struct net *net;
1da177e4
LT
1121
1122 ASSERT_RTNL();
c346dca1 1123 BUG_ON(!dev_net(dev));
1da177e4 1124
c346dca1 1125 net = dev_net(dev);
1da177e4
LT
1126 if (dev->flags & IFF_UP)
1127 return -EBUSY;
1128
30e6c9fa 1129 write_seqcount_begin(&devnet_rename_seq);
c91f6df2
BH
1130
1131 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
30e6c9fa 1132 write_seqcount_end(&devnet_rename_seq);
c8d90dca 1133 return 0;
c91f6df2 1134 }
c8d90dca 1135
fcc5a03a
HX
1136 memcpy(oldname, dev->name, IFNAMSIZ);
1137
828de4f6 1138 err = dev_get_valid_name(net, dev, newname);
c91f6df2 1139 if (err < 0) {
30e6c9fa 1140 write_seqcount_end(&devnet_rename_seq);
d9031024 1141 return err;
c91f6df2 1142 }
1da177e4 1143
6fe82a39
VF
1144 if (oldname[0] && !strchr(oldname, '%'))
1145 netdev_info(dev, "renamed from %s\n", oldname);
1146
238fa362
TG
1147 old_assign_type = dev->name_assign_type;
1148 dev->name_assign_type = NET_NAME_RENAMED;
1149
fcc5a03a 1150rollback:
a1b3f594
EB
1151 ret = device_rename(&dev->dev, dev->name);
1152 if (ret) {
1153 memcpy(dev->name, oldname, IFNAMSIZ);
238fa362 1154 dev->name_assign_type = old_assign_type;
30e6c9fa 1155 write_seqcount_end(&devnet_rename_seq);
a1b3f594 1156 return ret;
dcc99773 1157 }
7f988eab 1158
30e6c9fa 1159 write_seqcount_end(&devnet_rename_seq);
c91f6df2 1160
5bb025fa
VF
1161 netdev_adjacent_rename_links(dev, oldname);
1162
7f988eab 1163 write_lock_bh(&dev_base_lock);
372b2312 1164 hlist_del_rcu(&dev->name_hlist);
72c9528b
ED
1165 write_unlock_bh(&dev_base_lock);
1166
1167 synchronize_rcu();
1168
1169 write_lock_bh(&dev_base_lock);
1170 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
1171 write_unlock_bh(&dev_base_lock);
1172
056925ab 1173 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1174 ret = notifier_to_errno(ret);
1175
1176 if (ret) {
91e9c07b
ED
1177 /* err >= 0 after dev_alloc_name() or stores the first errno */
1178 if (err >= 0) {
fcc5a03a 1179 err = ret;
30e6c9fa 1180 write_seqcount_begin(&devnet_rename_seq);
fcc5a03a 1181 memcpy(dev->name, oldname, IFNAMSIZ);
5bb025fa 1182 memcpy(oldname, newname, IFNAMSIZ);
238fa362
TG
1183 dev->name_assign_type = old_assign_type;
1184 old_assign_type = NET_NAME_RENAMED;
fcc5a03a 1185 goto rollback;
91e9c07b 1186 } else {
7b6cd1ce 1187 pr_err("%s: name change rollback failed: %d\n",
91e9c07b 1188 dev->name, ret);
fcc5a03a
HX
1189 }
1190 }
1da177e4
LT
1191
1192 return err;
1193}
1194
0b815a1a
SH
1195/**
1196 * dev_set_alias - change ifalias of a device
1197 * @dev: device
1198 * @alias: name up to IFALIASZ
f0db275a 1199 * @len: limit of bytes to copy from info
0b815a1a
SH
1200 *
1201 * Set ifalias for a device,
1202 */
1203int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1204{
7364e445
AK
1205 char *new_ifalias;
1206
0b815a1a
SH
1207 ASSERT_RTNL();
1208
1209 if (len >= IFALIASZ)
1210 return -EINVAL;
1211
96ca4a2c 1212 if (!len) {
388dfc2d
SK
1213 kfree(dev->ifalias);
1214 dev->ifalias = NULL;
96ca4a2c
OH
1215 return 0;
1216 }
1217
7364e445
AK
1218 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1219 if (!new_ifalias)
0b815a1a 1220 return -ENOMEM;
7364e445 1221 dev->ifalias = new_ifalias;
0b815a1a
SH
1222
1223 strlcpy(dev->ifalias, alias, len+1);
1224 return len;
1225}
1226
1227
d8a33ac4 1228/**
3041a069 1229 * netdev_features_change - device changes features
d8a33ac4
SH
1230 * @dev: device to cause notification
1231 *
1232 * Called to indicate a device has changed features.
1233 */
1234void netdev_features_change(struct net_device *dev)
1235{
056925ab 1236 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1237}
1238EXPORT_SYMBOL(netdev_features_change);
1239
1da177e4
LT
1240/**
1241 * netdev_state_change - device changes state
1242 * @dev: device to cause notification
1243 *
1244 * Called to indicate a device has changed state. This function calls
1245 * the notifier chains for netdev_chain and sends a NEWLINK message
1246 * to the routing socket.
1247 */
1248void netdev_state_change(struct net_device *dev)
1249{
1250 if (dev->flags & IFF_UP) {
54951194
LP
1251 struct netdev_notifier_change_info change_info;
1252
1253 change_info.flags_changed = 0;
1254 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1255 &change_info.info);
7f294054 1256 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1da177e4
LT
1257 }
1258}
d1b19dff 1259EXPORT_SYMBOL(netdev_state_change);
1da177e4 1260
ee89bab1
AW
1261/**
1262 * netdev_notify_peers - notify network peers about existence of @dev
1263 * @dev: network device
1264 *
1265 * Generate traffic such that interested network peers are aware of
1266 * @dev, such as by generating a gratuitous ARP. This may be used when
1267 * a device wants to inform the rest of the network about some sort of
1268 * reconfiguration such as a failover event or virtual machine
1269 * migration.
1270 */
1271void netdev_notify_peers(struct net_device *dev)
c1da4ac7 1272{
ee89bab1
AW
1273 rtnl_lock();
1274 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1275 rtnl_unlock();
c1da4ac7 1276}
ee89bab1 1277EXPORT_SYMBOL(netdev_notify_peers);
c1da4ac7 1278
bd380811 1279static int __dev_open(struct net_device *dev)
1da177e4 1280{
d314774c 1281 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1282 int ret;
1da177e4 1283
e46b66bc
BH
1284 ASSERT_RTNL();
1285
1da177e4
LT
1286 if (!netif_device_present(dev))
1287 return -ENODEV;
1288
ca99ca14
NH
1289 /* Block netpoll from trying to do any rx path servicing.
1290 * If we don't do this there is a chance ndo_poll_controller
1291 * or ndo_poll may be running while we open the device
1292 */
66b5552f 1293 netpoll_poll_disable(dev);
ca99ca14 1294
3b8bcfd5
JB
1295 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1296 ret = notifier_to_errno(ret);
1297 if (ret)
1298 return ret;
1299
1da177e4 1300 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1301
d314774c
SH
1302 if (ops->ndo_validate_addr)
1303 ret = ops->ndo_validate_addr(dev);
bada339b 1304
d314774c
SH
1305 if (!ret && ops->ndo_open)
1306 ret = ops->ndo_open(dev);
1da177e4 1307
66b5552f 1308 netpoll_poll_enable(dev);
ca99ca14 1309
bada339b
JG
1310 if (ret)
1311 clear_bit(__LINK_STATE_START, &dev->state);
1312 else {
1da177e4 1313 dev->flags |= IFF_UP;
4417da66 1314 dev_set_rx_mode(dev);
1da177e4 1315 dev_activate(dev);
7bf23575 1316 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 1317 }
bada339b 1318
1da177e4
LT
1319 return ret;
1320}
1321
1322/**
bd380811
PM
1323 * dev_open - prepare an interface for use.
1324 * @dev: device to open
1da177e4 1325 *
bd380811
PM
1326 * Takes a device from down to up state. The device's private open
1327 * function is invoked and then the multicast lists are loaded. Finally
1328 * the device is moved into the up state and a %NETDEV_UP message is
1329 * sent to the netdev notifier chain.
1330 *
1331 * Calling this function on an active interface is a nop. On a failure
1332 * a negative errno code is returned.
1da177e4 1333 */
bd380811
PM
1334int dev_open(struct net_device *dev)
1335{
1336 int ret;
1337
bd380811
PM
1338 if (dev->flags & IFF_UP)
1339 return 0;
1340
bd380811
PM
1341 ret = __dev_open(dev);
1342 if (ret < 0)
1343 return ret;
1344
7f294054 1345 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
bd380811
PM
1346 call_netdevice_notifiers(NETDEV_UP, dev);
1347
1348 return ret;
1349}
1350EXPORT_SYMBOL(dev_open);
1351
44345724 1352static int __dev_close_many(struct list_head *head)
1da177e4 1353{
44345724 1354 struct net_device *dev;
e46b66bc 1355
bd380811 1356 ASSERT_RTNL();
9d5010db
DM
1357 might_sleep();
1358
5cde2829 1359 list_for_each_entry(dev, head, close_list) {
3f4df206 1360 /* Temporarily disable netpoll until the interface is down */
66b5552f 1361 netpoll_poll_disable(dev);
3f4df206 1362
44345724 1363 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1364
44345724 1365 clear_bit(__LINK_STATE_START, &dev->state);
1da177e4 1366
44345724
OP
1367 /* Synchronize to scheduled poll. We cannot touch poll list, it
1368 * can be even on different cpu. So just clear netif_running().
1369 *
1370 * dev->stop() will invoke napi_disable() on all of it's
1371 * napi_struct instances on this device.
1372 */
4e857c58 1373 smp_mb__after_atomic(); /* Commit netif_running(). */
44345724 1374 }
1da177e4 1375
44345724 1376 dev_deactivate_many(head);
d8b2a4d2 1377
5cde2829 1378 list_for_each_entry(dev, head, close_list) {
44345724 1379 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4 1380
44345724
OP
1381 /*
1382 * Call the device specific close. This cannot fail.
1383 * Only if device is UP
1384 *
1385 * We allow it to be called even after a DETACH hot-plug
1386 * event.
1387 */
1388 if (ops->ndo_stop)
1389 ops->ndo_stop(dev);
1390
44345724 1391 dev->flags &= ~IFF_UP;
66b5552f 1392 netpoll_poll_enable(dev);
44345724
OP
1393 }
1394
1395 return 0;
1396}
1397
1398static int __dev_close(struct net_device *dev)
1399{
f87e6f47 1400 int retval;
44345724
OP
1401 LIST_HEAD(single);
1402
5cde2829 1403 list_add(&dev->close_list, &single);
f87e6f47
LT
1404 retval = __dev_close_many(&single);
1405 list_del(&single);
ca99ca14 1406
f87e6f47 1407 return retval;
44345724
OP
1408}
1409
99c4a26a 1410int dev_close_many(struct list_head *head, bool unlink)
44345724
OP
1411{
1412 struct net_device *dev, *tmp;
1da177e4 1413
5cde2829
EB
1414 /* Remove the devices that don't need to be closed */
1415 list_for_each_entry_safe(dev, tmp, head, close_list)
44345724 1416 if (!(dev->flags & IFF_UP))
5cde2829 1417 list_del_init(&dev->close_list);
44345724
OP
1418
1419 __dev_close_many(head);
1da177e4 1420
5cde2829 1421 list_for_each_entry_safe(dev, tmp, head, close_list) {
7f294054 1422 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
44345724 1423 call_netdevice_notifiers(NETDEV_DOWN, dev);
99c4a26a
DM
1424 if (unlink)
1425 list_del_init(&dev->close_list);
44345724 1426 }
bd380811
PM
1427
1428 return 0;
1429}
99c4a26a 1430EXPORT_SYMBOL(dev_close_many);
bd380811
PM
1431
1432/**
1433 * dev_close - shutdown an interface.
1434 * @dev: device to shutdown
1435 *
1436 * This function moves an active device into down state. A
1437 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1438 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1439 * chain.
1440 */
1441int dev_close(struct net_device *dev)
1442{
e14a5993
ED
1443 if (dev->flags & IFF_UP) {
1444 LIST_HEAD(single);
1da177e4 1445
5cde2829 1446 list_add(&dev->close_list, &single);
99c4a26a 1447 dev_close_many(&single, true);
e14a5993
ED
1448 list_del(&single);
1449 }
da6e378b 1450 return 0;
1da177e4 1451}
d1b19dff 1452EXPORT_SYMBOL(dev_close);
1da177e4
LT
1453
1454
0187bdfb
BH
1455/**
1456 * dev_disable_lro - disable Large Receive Offload on a device
1457 * @dev: device
1458 *
1459 * Disable Large Receive Offload (LRO) on a net device. Must be
1460 * called under RTNL. This is needed if received packets may be
1461 * forwarded to another interface.
1462 */
1463void dev_disable_lro(struct net_device *dev)
1464{
fbe168ba
MK
1465 struct net_device *lower_dev;
1466 struct list_head *iter;
529d0489 1467
bc5787c6
MM
1468 dev->wanted_features &= ~NETIF_F_LRO;
1469 netdev_update_features(dev);
27660515 1470
22d5969f
MM
1471 if (unlikely(dev->features & NETIF_F_LRO))
1472 netdev_WARN(dev, "failed to disable LRO!\n");
fbe168ba
MK
1473
1474 netdev_for_each_lower_dev(dev, lower_dev, iter)
1475 dev_disable_lro(lower_dev);
0187bdfb
BH
1476}
1477EXPORT_SYMBOL(dev_disable_lro);
1478
351638e7
JP
1479static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1480 struct net_device *dev)
1481{
1482 struct netdev_notifier_info info;
1483
1484 netdev_notifier_info_init(&info, dev);
1485 return nb->notifier_call(nb, val, &info);
1486}
0187bdfb 1487
881d966b
EB
1488static int dev_boot_phase = 1;
1489
1da177e4
LT
1490/**
1491 * register_netdevice_notifier - register a network notifier block
1492 * @nb: notifier
1493 *
1494 * Register a notifier to be called when network device events occur.
1495 * The notifier passed is linked into the kernel structures and must
1496 * not be reused until it has been unregistered. A negative errno code
1497 * is returned on a failure.
1498 *
1499 * When registered all registration and up events are replayed
4ec93edb 1500 * to the new notifier to allow device to have a race free
1da177e4
LT
1501 * view of the network device list.
1502 */
1503
1504int register_netdevice_notifier(struct notifier_block *nb)
1505{
1506 struct net_device *dev;
fcc5a03a 1507 struct net_device *last;
881d966b 1508 struct net *net;
1da177e4
LT
1509 int err;
1510
1511 rtnl_lock();
f07d5b94 1512 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1513 if (err)
1514 goto unlock;
881d966b
EB
1515 if (dev_boot_phase)
1516 goto unlock;
1517 for_each_net(net) {
1518 for_each_netdev(net, dev) {
351638e7 1519 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
881d966b
EB
1520 err = notifier_to_errno(err);
1521 if (err)
1522 goto rollback;
1523
1524 if (!(dev->flags & IFF_UP))
1525 continue;
1da177e4 1526
351638e7 1527 call_netdevice_notifier(nb, NETDEV_UP, dev);
881d966b 1528 }
1da177e4 1529 }
fcc5a03a
HX
1530
1531unlock:
1da177e4
LT
1532 rtnl_unlock();
1533 return err;
fcc5a03a
HX
1534
1535rollback:
1536 last = dev;
881d966b
EB
1537 for_each_net(net) {
1538 for_each_netdev(net, dev) {
1539 if (dev == last)
8f891489 1540 goto outroll;
fcc5a03a 1541
881d966b 1542 if (dev->flags & IFF_UP) {
351638e7
JP
1543 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1544 dev);
1545 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
881d966b 1546 }
351638e7 1547 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
fcc5a03a 1548 }
fcc5a03a 1549 }
c67625a1 1550
8f891489 1551outroll:
c67625a1 1552 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1553 goto unlock;
1da177e4 1554}
d1b19dff 1555EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1556
1557/**
1558 * unregister_netdevice_notifier - unregister a network notifier block
1559 * @nb: notifier
1560 *
1561 * Unregister a notifier previously registered by
1562 * register_netdevice_notifier(). The notifier is unlinked into the
1563 * kernel structures and may then be reused. A negative errno code
1564 * is returned on a failure.
7d3d43da
EB
1565 *
1566 * After unregistering unregister and down device events are synthesized
1567 * for all devices on the device list to the removed notifier to remove
1568 * the need for special case cleanup code.
1da177e4
LT
1569 */
1570
1571int unregister_netdevice_notifier(struct notifier_block *nb)
1572{
7d3d43da
EB
1573 struct net_device *dev;
1574 struct net *net;
9f514950
HX
1575 int err;
1576
1577 rtnl_lock();
f07d5b94 1578 err = raw_notifier_chain_unregister(&netdev_chain, nb);
7d3d43da
EB
1579 if (err)
1580 goto unlock;
1581
1582 for_each_net(net) {
1583 for_each_netdev(net, dev) {
1584 if (dev->flags & IFF_UP) {
351638e7
JP
1585 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1586 dev);
1587 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
7d3d43da 1588 }
351638e7 1589 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
7d3d43da
EB
1590 }
1591 }
1592unlock:
9f514950
HX
1593 rtnl_unlock();
1594 return err;
1da177e4 1595}
d1b19dff 1596EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4 1597
351638e7
JP
1598/**
1599 * call_netdevice_notifiers_info - call all network notifier blocks
1600 * @val: value passed unmodified to notifier function
1601 * @dev: net_device pointer passed unmodified to notifier function
1602 * @info: notifier information data
1603 *
1604 * Call all network notifier blocks. Parameters and return value
1605 * are as for raw_notifier_call_chain().
1606 */
1607
1d143d9f 1608static int call_netdevice_notifiers_info(unsigned long val,
1609 struct net_device *dev,
1610 struct netdev_notifier_info *info)
351638e7
JP
1611{
1612 ASSERT_RTNL();
1613 netdev_notifier_info_init(info, dev);
1614 return raw_notifier_call_chain(&netdev_chain, val, info);
1615}
351638e7 1616
1da177e4
LT
1617/**
1618 * call_netdevice_notifiers - call all network notifier blocks
1619 * @val: value passed unmodified to notifier function
c4ea43c5 1620 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1621 *
1622 * Call all network notifier blocks. Parameters and return value
f07d5b94 1623 * are as for raw_notifier_call_chain().
1da177e4
LT
1624 */
1625
ad7379d4 1626int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1627{
351638e7
JP
1628 struct netdev_notifier_info info;
1629
1630 return call_netdevice_notifiers_info(val, dev, &info);
1da177e4 1631}
edf947f1 1632EXPORT_SYMBOL(call_netdevice_notifiers);
1da177e4 1633
1cf51900 1634#ifdef CONFIG_NET_INGRESS
4577139b
DB
1635static struct static_key ingress_needed __read_mostly;
1636
1637void net_inc_ingress_queue(void)
1638{
1639 static_key_slow_inc(&ingress_needed);
1640}
1641EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1642
1643void net_dec_ingress_queue(void)
1644{
1645 static_key_slow_dec(&ingress_needed);
1646}
1647EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1648#endif
1649
c5905afb 1650static struct static_key netstamp_needed __read_mostly;
b90e5794 1651#ifdef HAVE_JUMP_LABEL
c5905afb 1652/* We are not allowed to call static_key_slow_dec() from irq context
b90e5794 1653 * If net_disable_timestamp() is called from irq context, defer the
c5905afb 1654 * static_key_slow_dec() calls.
b90e5794
ED
1655 */
1656static atomic_t netstamp_needed_deferred;
1657#endif
1da177e4
LT
1658
1659void net_enable_timestamp(void)
1660{
b90e5794
ED
1661#ifdef HAVE_JUMP_LABEL
1662 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1663
1664 if (deferred) {
1665 while (--deferred)
c5905afb 1666 static_key_slow_dec(&netstamp_needed);
b90e5794
ED
1667 return;
1668 }
1669#endif
c5905afb 1670 static_key_slow_inc(&netstamp_needed);
1da177e4 1671}
d1b19dff 1672EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1673
1674void net_disable_timestamp(void)
1675{
b90e5794
ED
1676#ifdef HAVE_JUMP_LABEL
1677 if (in_interrupt()) {
1678 atomic_inc(&netstamp_needed_deferred);
1679 return;
1680 }
1681#endif
c5905afb 1682 static_key_slow_dec(&netstamp_needed);
1da177e4 1683}
d1b19dff 1684EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1685
3b098e2d 1686static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4 1687{
588f0330 1688 skb->tstamp.tv64 = 0;
c5905afb 1689 if (static_key_false(&netstamp_needed))
a61bbcf2 1690 __net_timestamp(skb);
1da177e4
LT
1691}
1692
588f0330 1693#define net_timestamp_check(COND, SKB) \
c5905afb 1694 if (static_key_false(&netstamp_needed)) { \
588f0330
ED
1695 if ((COND) && !(SKB)->tstamp.tv64) \
1696 __net_timestamp(SKB); \
1697 } \
3b098e2d 1698
1ee481fb 1699bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
79b569f0
DL
1700{
1701 unsigned int len;
1702
1703 if (!(dev->flags & IFF_UP))
1704 return false;
1705
1706 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1707 if (skb->len <= len)
1708 return true;
1709
1710 /* if TSO is enabled, we don't care about the length as the packet
1711 * could be forwarded without being segmented before
1712 */
1713 if (skb_is_gso(skb))
1714 return true;
1715
1716 return false;
1717}
1ee481fb 1718EXPORT_SYMBOL_GPL(is_skb_forwardable);
79b569f0 1719
a0265d28
HX
1720int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1721{
1722 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1723 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1724 atomic_long_inc(&dev->rx_dropped);
1725 kfree_skb(skb);
1726 return NET_RX_DROP;
1727 }
1728 }
1729
1730 if (unlikely(!is_skb_forwardable(dev, skb))) {
1731 atomic_long_inc(&dev->rx_dropped);
1732 kfree_skb(skb);
1733 return NET_RX_DROP;
1734 }
1735
1736 skb_scrub_packet(skb, true);
08b4b8ea 1737 skb->priority = 0;
a0265d28 1738 skb->protocol = eth_type_trans(skb, dev);
2c26d34b 1739 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
a0265d28
HX
1740
1741 return 0;
1742}
1743EXPORT_SYMBOL_GPL(__dev_forward_skb);
1744
44540960
AB
1745/**
1746 * dev_forward_skb - loopback an skb to another netif
1747 *
1748 * @dev: destination network device
1749 * @skb: buffer to forward
1750 *
1751 * return values:
1752 * NET_RX_SUCCESS (no congestion)
6ec82562 1753 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
1754 *
1755 * dev_forward_skb can be used for injecting an skb from the
1756 * start_xmit function of one device into the receive queue
1757 * of another device.
1758 *
1759 * The receiving device may be in another namespace, so
1760 * we have to clear all information in the skb that could
1761 * impact namespace isolation.
1762 */
1763int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1764{
a0265d28 1765 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
44540960
AB
1766}
1767EXPORT_SYMBOL_GPL(dev_forward_skb);
1768
71d9dec2
CG
1769static inline int deliver_skb(struct sk_buff *skb,
1770 struct packet_type *pt_prev,
1771 struct net_device *orig_dev)
1772{
1080e512
MT
1773 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1774 return -ENOMEM;
71d9dec2
CG
1775 atomic_inc(&skb->users);
1776 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1777}
1778
7866a621
SN
1779static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1780 struct packet_type **pt,
fbcb2170
JP
1781 struct net_device *orig_dev,
1782 __be16 type,
7866a621
SN
1783 struct list_head *ptype_list)
1784{
1785 struct packet_type *ptype, *pt_prev = *pt;
1786
1787 list_for_each_entry_rcu(ptype, ptype_list, list) {
1788 if (ptype->type != type)
1789 continue;
1790 if (pt_prev)
fbcb2170 1791 deliver_skb(skb, pt_prev, orig_dev);
7866a621
SN
1792 pt_prev = ptype;
1793 }
1794 *pt = pt_prev;
1795}
1796
c0de08d0
EL
1797static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1798{
a3d744e9 1799 if (!ptype->af_packet_priv || !skb->sk)
c0de08d0
EL
1800 return false;
1801
1802 if (ptype->id_match)
1803 return ptype->id_match(ptype, skb->sk);
1804 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1805 return true;
1806
1807 return false;
1808}
1809
1da177e4
LT
1810/*
1811 * Support routine. Sends outgoing frames to any network
1812 * taps currently in use.
1813 */
1814
f6a78bfc 1815static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1816{
1817 struct packet_type *ptype;
71d9dec2
CG
1818 struct sk_buff *skb2 = NULL;
1819 struct packet_type *pt_prev = NULL;
7866a621 1820 struct list_head *ptype_list = &ptype_all;
a61bbcf2 1821
1da177e4 1822 rcu_read_lock();
7866a621
SN
1823again:
1824 list_for_each_entry_rcu(ptype, ptype_list, list) {
1da177e4
LT
1825 /* Never send packets back to the socket
1826 * they originated from - MvS ([email protected])
1827 */
7866a621
SN
1828 if (skb_loop_sk(ptype, skb))
1829 continue;
71d9dec2 1830
7866a621
SN
1831 if (pt_prev) {
1832 deliver_skb(skb2, pt_prev, skb->dev);
1833 pt_prev = ptype;
1834 continue;
1835 }
1da177e4 1836
7866a621
SN
1837 /* need to clone skb, done only once */
1838 skb2 = skb_clone(skb, GFP_ATOMIC);
1839 if (!skb2)
1840 goto out_unlock;
70978182 1841
7866a621 1842 net_timestamp_set(skb2);
1da177e4 1843
7866a621
SN
1844 /* skb->nh should be correctly
1845 * set by sender, so that the second statement is
1846 * just protection against buggy protocols.
1847 */
1848 skb_reset_mac_header(skb2);
1849
1850 if (skb_network_header(skb2) < skb2->data ||
1851 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1852 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1853 ntohs(skb2->protocol),
1854 dev->name);
1855 skb_reset_network_header(skb2);
1da177e4 1856 }
7866a621
SN
1857
1858 skb2->transport_header = skb2->network_header;
1859 skb2->pkt_type = PACKET_OUTGOING;
1860 pt_prev = ptype;
1861 }
1862
1863 if (ptype_list == &ptype_all) {
1864 ptype_list = &dev->ptype_all;
1865 goto again;
1da177e4 1866 }
7866a621 1867out_unlock:
71d9dec2
CG
1868 if (pt_prev)
1869 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1da177e4
LT
1870 rcu_read_unlock();
1871}
1872
2c53040f
BH
1873/**
1874 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
4f57c087
JF
1875 * @dev: Network device
1876 * @txq: number of queues available
1877 *
1878 * If real_num_tx_queues is changed the tc mappings may no longer be
1879 * valid. To resolve this verify the tc mapping remains valid and if
1880 * not NULL the mapping. With no priorities mapping to this
1881 * offset/count pair it will no longer be used. In the worst case TC0
1882 * is invalid nothing can be done so disable priority mappings. If is
1883 * expected that drivers will fix this mapping if they can before
1884 * calling netif_set_real_num_tx_queues.
1885 */
bb134d22 1886static void netif_setup_tc(struct net_device *dev, unsigned int txq)
4f57c087
JF
1887{
1888 int i;
1889 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1890
1891 /* If TC0 is invalidated disable TC mapping */
1892 if (tc->offset + tc->count > txq) {
7b6cd1ce 1893 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
4f57c087
JF
1894 dev->num_tc = 0;
1895 return;
1896 }
1897
1898 /* Invalidated prio to tc mappings set to TC0 */
1899 for (i = 1; i < TC_BITMASK + 1; i++) {
1900 int q = netdev_get_prio_tc_map(dev, i);
1901
1902 tc = &dev->tc_to_txq[q];
1903 if (tc->offset + tc->count > txq) {
7b6cd1ce
JP
1904 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1905 i, q);
4f57c087
JF
1906 netdev_set_prio_tc_map(dev, i, 0);
1907 }
1908 }
1909}
1910
537c00de
AD
1911#ifdef CONFIG_XPS
1912static DEFINE_MUTEX(xps_map_mutex);
1913#define xmap_dereference(P) \
1914 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1915
10cdc3f3
AD
1916static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1917 int cpu, u16 index)
537c00de 1918{
10cdc3f3
AD
1919 struct xps_map *map = NULL;
1920 int pos;
537c00de 1921
10cdc3f3
AD
1922 if (dev_maps)
1923 map = xmap_dereference(dev_maps->cpu_map[cpu]);
537c00de 1924
10cdc3f3
AD
1925 for (pos = 0; map && pos < map->len; pos++) {
1926 if (map->queues[pos] == index) {
537c00de
AD
1927 if (map->len > 1) {
1928 map->queues[pos] = map->queues[--map->len];
1929 } else {
10cdc3f3 1930 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
537c00de
AD
1931 kfree_rcu(map, rcu);
1932 map = NULL;
1933 }
10cdc3f3 1934 break;
537c00de 1935 }
537c00de
AD
1936 }
1937
10cdc3f3
AD
1938 return map;
1939}
1940
024e9679 1941static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
10cdc3f3
AD
1942{
1943 struct xps_dev_maps *dev_maps;
024e9679 1944 int cpu, i;
10cdc3f3
AD
1945 bool active = false;
1946
1947 mutex_lock(&xps_map_mutex);
1948 dev_maps = xmap_dereference(dev->xps_maps);
1949
1950 if (!dev_maps)
1951 goto out_no_maps;
1952
1953 for_each_possible_cpu(cpu) {
024e9679
AD
1954 for (i = index; i < dev->num_tx_queues; i++) {
1955 if (!remove_xps_queue(dev_maps, cpu, i))
1956 break;
1957 }
1958 if (i == dev->num_tx_queues)
10cdc3f3
AD
1959 active = true;
1960 }
1961
1962 if (!active) {
537c00de
AD
1963 RCU_INIT_POINTER(dev->xps_maps, NULL);
1964 kfree_rcu(dev_maps, rcu);
1965 }
1966
024e9679
AD
1967 for (i = index; i < dev->num_tx_queues; i++)
1968 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1969 NUMA_NO_NODE);
1970
537c00de
AD
1971out_no_maps:
1972 mutex_unlock(&xps_map_mutex);
1973}
1974
01c5f864
AD
1975static struct xps_map *expand_xps_map(struct xps_map *map,
1976 int cpu, u16 index)
1977{
1978 struct xps_map *new_map;
1979 int alloc_len = XPS_MIN_MAP_ALLOC;
1980 int i, pos;
1981
1982 for (pos = 0; map && pos < map->len; pos++) {
1983 if (map->queues[pos] != index)
1984 continue;
1985 return map;
1986 }
1987
1988 /* Need to add queue to this CPU's existing map */
1989 if (map) {
1990 if (pos < map->alloc_len)
1991 return map;
1992
1993 alloc_len = map->alloc_len * 2;
1994 }
1995
1996 /* Need to allocate new map to store queue on this CPU's map */
1997 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1998 cpu_to_node(cpu));
1999 if (!new_map)
2000 return NULL;
2001
2002 for (i = 0; i < pos; i++)
2003 new_map->queues[i] = map->queues[i];
2004 new_map->alloc_len = alloc_len;
2005 new_map->len = pos;
2006
2007 return new_map;
2008}
2009
3573540c
MT
2010int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2011 u16 index)
537c00de 2012{
01c5f864 2013 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
537c00de 2014 struct xps_map *map, *new_map;
537c00de 2015 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
01c5f864
AD
2016 int cpu, numa_node_id = -2;
2017 bool active = false;
537c00de
AD
2018
2019 mutex_lock(&xps_map_mutex);
2020
2021 dev_maps = xmap_dereference(dev->xps_maps);
2022
01c5f864
AD
2023 /* allocate memory for queue storage */
2024 for_each_online_cpu(cpu) {
2025 if (!cpumask_test_cpu(cpu, mask))
2026 continue;
2027
2028 if (!new_dev_maps)
2029 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2bb60cb9
AD
2030 if (!new_dev_maps) {
2031 mutex_unlock(&xps_map_mutex);
01c5f864 2032 return -ENOMEM;
2bb60cb9 2033 }
01c5f864
AD
2034
2035 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2036 NULL;
2037
2038 map = expand_xps_map(map, cpu, index);
2039 if (!map)
2040 goto error;
2041
2042 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2043 }
2044
2045 if (!new_dev_maps)
2046 goto out_no_new_maps;
2047
537c00de 2048 for_each_possible_cpu(cpu) {
01c5f864
AD
2049 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2050 /* add queue to CPU maps */
2051 int pos = 0;
2052
2053 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2054 while ((pos < map->len) && (map->queues[pos] != index))
2055 pos++;
2056
2057 if (pos == map->len)
2058 map->queues[map->len++] = index;
537c00de 2059#ifdef CONFIG_NUMA
537c00de
AD
2060 if (numa_node_id == -2)
2061 numa_node_id = cpu_to_node(cpu);
2062 else if (numa_node_id != cpu_to_node(cpu))
2063 numa_node_id = -1;
537c00de 2064#endif
01c5f864
AD
2065 } else if (dev_maps) {
2066 /* fill in the new device map from the old device map */
2067 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2068 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
537c00de 2069 }
01c5f864 2070
537c00de
AD
2071 }
2072
01c5f864
AD
2073 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2074
537c00de 2075 /* Cleanup old maps */
01c5f864
AD
2076 if (dev_maps) {
2077 for_each_possible_cpu(cpu) {
2078 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2079 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2080 if (map && map != new_map)
2081 kfree_rcu(map, rcu);
2082 }
537c00de 2083
01c5f864 2084 kfree_rcu(dev_maps, rcu);
537c00de
AD
2085 }
2086
01c5f864
AD
2087 dev_maps = new_dev_maps;
2088 active = true;
537c00de 2089
01c5f864
AD
2090out_no_new_maps:
2091 /* update Tx queue numa node */
537c00de
AD
2092 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2093 (numa_node_id >= 0) ? numa_node_id :
2094 NUMA_NO_NODE);
2095
01c5f864
AD
2096 if (!dev_maps)
2097 goto out_no_maps;
2098
2099 /* removes queue from unused CPUs */
2100 for_each_possible_cpu(cpu) {
2101 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2102 continue;
2103
2104 if (remove_xps_queue(dev_maps, cpu, index))
2105 active = true;
2106 }
2107
2108 /* free map if not active */
2109 if (!active) {
2110 RCU_INIT_POINTER(dev->xps_maps, NULL);
2111 kfree_rcu(dev_maps, rcu);
2112 }
2113
2114out_no_maps:
537c00de
AD
2115 mutex_unlock(&xps_map_mutex);
2116
2117 return 0;
2118error:
01c5f864
AD
2119 /* remove any maps that we added */
2120 for_each_possible_cpu(cpu) {
2121 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2122 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2123 NULL;
2124 if (new_map && new_map != map)
2125 kfree(new_map);
2126 }
2127
537c00de
AD
2128 mutex_unlock(&xps_map_mutex);
2129
537c00de
AD
2130 kfree(new_dev_maps);
2131 return -ENOMEM;
2132}
2133EXPORT_SYMBOL(netif_set_xps_queue);
2134
2135#endif
f0796d5c
JF
2136/*
2137 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2138 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2139 */
e6484930 2140int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
f0796d5c 2141{
1d24eb48
TH
2142 int rc;
2143
e6484930
TH
2144 if (txq < 1 || txq > dev->num_tx_queues)
2145 return -EINVAL;
f0796d5c 2146
5c56580b
BH
2147 if (dev->reg_state == NETREG_REGISTERED ||
2148 dev->reg_state == NETREG_UNREGISTERING) {
e6484930
TH
2149 ASSERT_RTNL();
2150
1d24eb48
TH
2151 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2152 txq);
bf264145
TH
2153 if (rc)
2154 return rc;
2155
4f57c087
JF
2156 if (dev->num_tc)
2157 netif_setup_tc(dev, txq);
2158
024e9679 2159 if (txq < dev->real_num_tx_queues) {
e6484930 2160 qdisc_reset_all_tx_gt(dev, txq);
024e9679
AD
2161#ifdef CONFIG_XPS
2162 netif_reset_xps_queues_gt(dev, txq);
2163#endif
2164 }
f0796d5c 2165 }
e6484930
TH
2166
2167 dev->real_num_tx_queues = txq;
2168 return 0;
f0796d5c
JF
2169}
2170EXPORT_SYMBOL(netif_set_real_num_tx_queues);
56079431 2171
a953be53 2172#ifdef CONFIG_SYSFS
62fe0b40
BH
2173/**
2174 * netif_set_real_num_rx_queues - set actual number of RX queues used
2175 * @dev: Network device
2176 * @rxq: Actual number of RX queues
2177 *
2178 * This must be called either with the rtnl_lock held or before
2179 * registration of the net device. Returns 0 on success, or a
4e7f7951
BH
2180 * negative error code. If called before registration, it always
2181 * succeeds.
62fe0b40
BH
2182 */
2183int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2184{
2185 int rc;
2186
bd25fa7b
TH
2187 if (rxq < 1 || rxq > dev->num_rx_queues)
2188 return -EINVAL;
2189
62fe0b40
BH
2190 if (dev->reg_state == NETREG_REGISTERED) {
2191 ASSERT_RTNL();
2192
62fe0b40
BH
2193 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2194 rxq);
2195 if (rc)
2196 return rc;
62fe0b40
BH
2197 }
2198
2199 dev->real_num_rx_queues = rxq;
2200 return 0;
2201}
2202EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2203#endif
2204
2c53040f
BH
2205/**
2206 * netif_get_num_default_rss_queues - default number of RSS queues
16917b87
YM
2207 *
2208 * This routine should set an upper limit on the number of RSS queues
2209 * used by default by multiqueue devices.
2210 */
a55b138b 2211int netif_get_num_default_rss_queues(void)
16917b87
YM
2212{
2213 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2214}
2215EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2216
def82a1d 2217static inline void __netif_reschedule(struct Qdisc *q)
56079431 2218{
def82a1d
JP
2219 struct softnet_data *sd;
2220 unsigned long flags;
56079431 2221
def82a1d 2222 local_irq_save(flags);
903ceff7 2223 sd = this_cpu_ptr(&softnet_data);
a9cbd588
CG
2224 q->next_sched = NULL;
2225 *sd->output_queue_tailp = q;
2226 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
2227 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2228 local_irq_restore(flags);
2229}
2230
2231void __netif_schedule(struct Qdisc *q)
2232{
2233 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2234 __netif_reschedule(q);
56079431
DV
2235}
2236EXPORT_SYMBOL(__netif_schedule);
2237
e6247027
ED
2238struct dev_kfree_skb_cb {
2239 enum skb_free_reason reason;
2240};
2241
2242static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
56079431 2243{
e6247027
ED
2244 return (struct dev_kfree_skb_cb *)skb->cb;
2245}
2246
46e5da40
JF
2247void netif_schedule_queue(struct netdev_queue *txq)
2248{
2249 rcu_read_lock();
2250 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2251 struct Qdisc *q = rcu_dereference(txq->qdisc);
2252
2253 __netif_schedule(q);
2254 }
2255 rcu_read_unlock();
2256}
2257EXPORT_SYMBOL(netif_schedule_queue);
2258
2259/**
2260 * netif_wake_subqueue - allow sending packets on subqueue
2261 * @dev: network device
2262 * @queue_index: sub queue index
2263 *
2264 * Resume individual transmit queue of a device with multiple transmit queues.
2265 */
2266void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2267{
2268 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2269
2270 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
2271 struct Qdisc *q;
2272
2273 rcu_read_lock();
2274 q = rcu_dereference(txq->qdisc);
2275 __netif_schedule(q);
2276 rcu_read_unlock();
2277 }
2278}
2279EXPORT_SYMBOL(netif_wake_subqueue);
2280
2281void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2282{
2283 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2284 struct Qdisc *q;
2285
2286 rcu_read_lock();
2287 q = rcu_dereference(dev_queue->qdisc);
2288 __netif_schedule(q);
2289 rcu_read_unlock();
2290 }
2291}
2292EXPORT_SYMBOL(netif_tx_wake_queue);
2293
e6247027 2294void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
56079431 2295{
e6247027 2296 unsigned long flags;
56079431 2297
e6247027
ED
2298 if (likely(atomic_read(&skb->users) == 1)) {
2299 smp_rmb();
2300 atomic_set(&skb->users, 0);
2301 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2302 return;
bea3348e 2303 }
e6247027
ED
2304 get_kfree_skb_cb(skb)->reason = reason;
2305 local_irq_save(flags);
2306 skb->next = __this_cpu_read(softnet_data.completion_queue);
2307 __this_cpu_write(softnet_data.completion_queue, skb);
2308 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2309 local_irq_restore(flags);
56079431 2310}
e6247027 2311EXPORT_SYMBOL(__dev_kfree_skb_irq);
56079431 2312
e6247027 2313void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
56079431
DV
2314{
2315 if (in_irq() || irqs_disabled())
e6247027 2316 __dev_kfree_skb_irq(skb, reason);
56079431
DV
2317 else
2318 dev_kfree_skb(skb);
2319}
e6247027 2320EXPORT_SYMBOL(__dev_kfree_skb_any);
56079431
DV
2321
2322
bea3348e
SH
2323/**
2324 * netif_device_detach - mark device as removed
2325 * @dev: network device
2326 *
2327 * Mark device as removed from system and therefore no longer available.
2328 */
56079431
DV
2329void netif_device_detach(struct net_device *dev)
2330{
2331 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2332 netif_running(dev)) {
d543103a 2333 netif_tx_stop_all_queues(dev);
56079431
DV
2334 }
2335}
2336EXPORT_SYMBOL(netif_device_detach);
2337
bea3348e
SH
2338/**
2339 * netif_device_attach - mark device as attached
2340 * @dev: network device
2341 *
2342 * Mark device as attached from system and restart if needed.
2343 */
56079431
DV
2344void netif_device_attach(struct net_device *dev)
2345{
2346 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2347 netif_running(dev)) {
d543103a 2348 netif_tx_wake_all_queues(dev);
4ec93edb 2349 __netdev_watchdog_up(dev);
56079431
DV
2350 }
2351}
2352EXPORT_SYMBOL(netif_device_attach);
2353
5605c762
JP
2354/*
2355 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2356 * to be used as a distribution range.
2357 */
2358u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2359 unsigned int num_tx_queues)
2360{
2361 u32 hash;
2362 u16 qoffset = 0;
2363 u16 qcount = num_tx_queues;
2364
2365 if (skb_rx_queue_recorded(skb)) {
2366 hash = skb_get_rx_queue(skb);
2367 while (unlikely(hash >= num_tx_queues))
2368 hash -= num_tx_queues;
2369 return hash;
2370 }
2371
2372 if (dev->num_tc) {
2373 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2374 qoffset = dev->tc_to_txq[tc].offset;
2375 qcount = dev->tc_to_txq[tc].count;
2376 }
2377
2378 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2379}
2380EXPORT_SYMBOL(__skb_tx_hash);
2381
36c92474
BH
2382static void skb_warn_bad_offload(const struct sk_buff *skb)
2383{
65e9d2fa 2384 static const netdev_features_t null_features = 0;
36c92474
BH
2385 struct net_device *dev = skb->dev;
2386 const char *driver = "";
2387
c846ad9b
BG
2388 if (!net_ratelimit())
2389 return;
2390
36c92474
BH
2391 if (dev && dev->dev.parent)
2392 driver = dev_driver_string(dev->dev.parent);
2393
2394 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2395 "gso_type=%d ip_summed=%d\n",
65e9d2fa
MM
2396 driver, dev ? &dev->features : &null_features,
2397 skb->sk ? &skb->sk->sk_route_caps : &null_features,
36c92474
BH
2398 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2399 skb_shinfo(skb)->gso_type, skb->ip_summed);
2400}
2401
1da177e4
LT
2402/*
2403 * Invalidate hardware checksum when packet is to be mangled, and
2404 * complete checksum manually on outgoing path.
2405 */
84fa7933 2406int skb_checksum_help(struct sk_buff *skb)
1da177e4 2407{
d3bc23e7 2408 __wsum csum;
663ead3b 2409 int ret = 0, offset;
1da177e4 2410
84fa7933 2411 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
2412 goto out_set_summed;
2413
2414 if (unlikely(skb_shinfo(skb)->gso_size)) {
36c92474
BH
2415 skb_warn_bad_offload(skb);
2416 return -EINVAL;
1da177e4
LT
2417 }
2418
cef401de
ED
2419 /* Before computing a checksum, we should make sure no frag could
2420 * be modified by an external entity : checksum could be wrong.
2421 */
2422 if (skb_has_shared_frag(skb)) {
2423 ret = __skb_linearize(skb);
2424 if (ret)
2425 goto out;
2426 }
2427
55508d60 2428 offset = skb_checksum_start_offset(skb);
a030847e
HX
2429 BUG_ON(offset >= skb_headlen(skb));
2430 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2431
2432 offset += skb->csum_offset;
2433 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2434
2435 if (skb_cloned(skb) &&
2436 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
2437 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2438 if (ret)
2439 goto out;
2440 }
2441
a030847e 2442 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
a430a43d 2443out_set_summed:
1da177e4 2444 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 2445out:
1da177e4
LT
2446 return ret;
2447}
d1b19dff 2448EXPORT_SYMBOL(skb_checksum_help);
1da177e4 2449
53d6471c 2450__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
f6a78bfc 2451{
252e3346 2452 __be16 type = skb->protocol;
f6a78bfc 2453
19acc327
PS
2454 /* Tunnel gso handlers can set protocol to ethernet. */
2455 if (type == htons(ETH_P_TEB)) {
2456 struct ethhdr *eth;
2457
2458 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2459 return 0;
2460
2461 eth = (struct ethhdr *)skb_mac_header(skb);
2462 type = eth->h_proto;
2463 }
2464
d4bcef3f 2465 return __vlan_get_protocol(skb, type, depth);
ec5f0615
PS
2466}
2467
2468/**
2469 * skb_mac_gso_segment - mac layer segmentation handler.
2470 * @skb: buffer to segment
2471 * @features: features for the output path (see dev->features)
2472 */
2473struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2474 netdev_features_t features)
2475{
2476 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2477 struct packet_offload *ptype;
53d6471c
VY
2478 int vlan_depth = skb->mac_len;
2479 __be16 type = skb_network_protocol(skb, &vlan_depth);
ec5f0615
PS
2480
2481 if (unlikely(!type))
2482 return ERR_PTR(-EINVAL);
2483
53d6471c 2484 __skb_pull(skb, vlan_depth);
f6a78bfc
HX
2485
2486 rcu_read_lock();
22061d80 2487 list_for_each_entry_rcu(ptype, &offload_base, list) {
f191a1d1 2488 if (ptype->type == type && ptype->callbacks.gso_segment) {
f191a1d1 2489 segs = ptype->callbacks.gso_segment(skb, features);
f6a78bfc
HX
2490 break;
2491 }
2492 }
2493 rcu_read_unlock();
2494
98e399f8 2495 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 2496
f6a78bfc
HX
2497 return segs;
2498}
05e8ef4a
PS
2499EXPORT_SYMBOL(skb_mac_gso_segment);
2500
2501
2502/* openvswitch calls this on rx path, so we need a different check.
2503 */
2504static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2505{
2506 if (tx_path)
2507 return skb->ip_summed != CHECKSUM_PARTIAL;
2508 else
2509 return skb->ip_summed == CHECKSUM_NONE;
2510}
2511
2512/**
2513 * __skb_gso_segment - Perform segmentation on skb.
2514 * @skb: buffer to segment
2515 * @features: features for the output path (see dev->features)
2516 * @tx_path: whether it is called in TX path
2517 *
2518 * This function segments the given skb and returns a list of segments.
2519 *
2520 * It may return NULL if the skb requires no segmentation. This is
2521 * only possible when GSO is used for verifying header integrity.
2522 */
2523struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2524 netdev_features_t features, bool tx_path)
2525{
2526 if (unlikely(skb_needs_check(skb, tx_path))) {
2527 int err;
2528
2529 skb_warn_bad_offload(skb);
2530
a40e0a66 2531 err = skb_cow_head(skb, 0);
2532 if (err < 0)
05e8ef4a
PS
2533 return ERR_PTR(err);
2534 }
2535
68c33163 2536 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3347c960
ED
2537 SKB_GSO_CB(skb)->encap_level = 0;
2538
05e8ef4a
PS
2539 skb_reset_mac_header(skb);
2540 skb_reset_mac_len(skb);
2541
2542 return skb_mac_gso_segment(skb, features);
2543}
12b0004d 2544EXPORT_SYMBOL(__skb_gso_segment);
f6a78bfc 2545
fb286bb2
HX
2546/* Take action when hardware reception checksum errors are detected. */
2547#ifdef CONFIG_BUG
2548void netdev_rx_csum_fault(struct net_device *dev)
2549{
2550 if (net_ratelimit()) {
7b6cd1ce 2551 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
fb286bb2
HX
2552 dump_stack();
2553 }
2554}
2555EXPORT_SYMBOL(netdev_rx_csum_fault);
2556#endif
2557
1da177e4
LT
2558/* Actually, we should eliminate this check as soon as we know, that:
2559 * 1. IOMMU is present and allows to map all the memory.
2560 * 2. No high memory really exists on this machine.
2561 */
2562
c1e756bf 2563static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 2564{
3d3a8533 2565#ifdef CONFIG_HIGHMEM
1da177e4 2566 int i;
5acbbd42 2567 if (!(dev->features & NETIF_F_HIGHDMA)) {
ea2ab693
IC
2568 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2569 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2570 if (PageHighMem(skb_frag_page(frag)))
5acbbd42 2571 return 1;
ea2ab693 2572 }
5acbbd42 2573 }
1da177e4 2574
5acbbd42
FT
2575 if (PCI_DMA_BUS_IS_PHYS) {
2576 struct device *pdev = dev->dev.parent;
1da177e4 2577
9092c658
ED
2578 if (!pdev)
2579 return 0;
5acbbd42 2580 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
ea2ab693
IC
2581 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2582 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
5acbbd42
FT
2583 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2584 return 1;
2585 }
2586 }
3d3a8533 2587#endif
1da177e4
LT
2588 return 0;
2589}
1da177e4 2590
3b392ddb
SH
2591/* If MPLS offload request, verify we are testing hardware MPLS features
2592 * instead of standard features for the netdev.
2593 */
d0edc7bf 2594#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3b392ddb
SH
2595static netdev_features_t net_mpls_features(struct sk_buff *skb,
2596 netdev_features_t features,
2597 __be16 type)
2598{
25cd9ba0 2599 if (eth_p_mpls(type))
3b392ddb
SH
2600 features &= skb->dev->mpls_features;
2601
2602 return features;
2603}
2604#else
2605static netdev_features_t net_mpls_features(struct sk_buff *skb,
2606 netdev_features_t features,
2607 __be16 type)
2608{
2609 return features;
2610}
2611#endif
2612
c8f44aff 2613static netdev_features_t harmonize_features(struct sk_buff *skb,
c1e756bf 2614 netdev_features_t features)
f01a5236 2615{
53d6471c 2616 int tmp;
3b392ddb
SH
2617 __be16 type;
2618
2619 type = skb_network_protocol(skb, &tmp);
2620 features = net_mpls_features(skb, features, type);
53d6471c 2621
c0d680e5 2622 if (skb->ip_summed != CHECKSUM_NONE &&
3b392ddb 2623 !can_checksum_protocol(features, type)) {
f01a5236 2624 features &= ~NETIF_F_ALL_CSUM;
c1e756bf 2625 } else if (illegal_highdma(skb->dev, skb)) {
f01a5236
JG
2626 features &= ~NETIF_F_SG;
2627 }
2628
2629 return features;
2630}
2631
e38f3025
TM
2632netdev_features_t passthru_features_check(struct sk_buff *skb,
2633 struct net_device *dev,
2634 netdev_features_t features)
2635{
2636 return features;
2637}
2638EXPORT_SYMBOL(passthru_features_check);
2639
8cb65d00
TM
2640static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2641 struct net_device *dev,
2642 netdev_features_t features)
2643{
2644 return vlan_features_check(skb, features);
2645}
2646
c1e756bf 2647netdev_features_t netif_skb_features(struct sk_buff *skb)
58e998c6 2648{
5f35227e 2649 struct net_device *dev = skb->dev;
fcbeb976
ED
2650 netdev_features_t features = dev->features;
2651 u16 gso_segs = skb_shinfo(skb)->gso_segs;
58e998c6 2652
fcbeb976 2653 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
30b678d8
BH
2654 features &= ~NETIF_F_GSO_MASK;
2655
5f35227e
JG
2656 /* If encapsulation offload request, verify we are testing
2657 * hardware encapsulation features instead of standard
2658 * features for the netdev
2659 */
2660 if (skb->encapsulation)
2661 features &= dev->hw_enc_features;
2662
f5a7fb88
TM
2663 if (skb_vlan_tagged(skb))
2664 features = netdev_intersect_features(features,
2665 dev->vlan_features |
2666 NETIF_F_HW_VLAN_CTAG_TX |
2667 NETIF_F_HW_VLAN_STAG_TX);
f01a5236 2668
5f35227e
JG
2669 if (dev->netdev_ops->ndo_features_check)
2670 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2671 features);
8cb65d00
TM
2672 else
2673 features &= dflt_features_check(skb, dev, features);
5f35227e 2674
c1e756bf 2675 return harmonize_features(skb, features);
58e998c6 2676}
c1e756bf 2677EXPORT_SYMBOL(netif_skb_features);
58e998c6 2678
2ea25513 2679static int xmit_one(struct sk_buff *skb, struct net_device *dev,
95f6b3dd 2680 struct netdev_queue *txq, bool more)
f6a78bfc 2681{
2ea25513
DM
2682 unsigned int len;
2683 int rc;
00829823 2684
7866a621 2685 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
2ea25513 2686 dev_queue_xmit_nit(skb, dev);
fc741216 2687
2ea25513
DM
2688 len = skb->len;
2689 trace_net_dev_start_xmit(skb, dev);
95f6b3dd 2690 rc = netdev_start_xmit(skb, dev, txq, more);
2ea25513 2691 trace_net_dev_xmit(skb, rc, dev, len);
adf30907 2692
2ea25513
DM
2693 return rc;
2694}
7b9c6090 2695
8dcda22a
DM
2696struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2697 struct netdev_queue *txq, int *ret)
7f2e870f
DM
2698{
2699 struct sk_buff *skb = first;
2700 int rc = NETDEV_TX_OK;
7b9c6090 2701
7f2e870f
DM
2702 while (skb) {
2703 struct sk_buff *next = skb->next;
fc70fb64 2704
7f2e870f 2705 skb->next = NULL;
95f6b3dd 2706 rc = xmit_one(skb, dev, txq, next != NULL);
7f2e870f
DM
2707 if (unlikely(!dev_xmit_complete(rc))) {
2708 skb->next = next;
2709 goto out;
2710 }
6afff0ca 2711
7f2e870f
DM
2712 skb = next;
2713 if (netif_xmit_stopped(txq) && skb) {
2714 rc = NETDEV_TX_BUSY;
2715 break;
9ccb8975 2716 }
7f2e870f 2717 }
9ccb8975 2718
7f2e870f
DM
2719out:
2720 *ret = rc;
2721 return skb;
2722}
b40863c6 2723
1ff0dc94
ED
2724static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2725 netdev_features_t features)
f6a78bfc 2726{
df8a39de 2727 if (skb_vlan_tag_present(skb) &&
5968250c
JP
2728 !vlan_hw_offload_capable(features, skb->vlan_proto))
2729 skb = __vlan_hwaccel_push_inside(skb);
eae3f88e
DM
2730 return skb;
2731}
f6a78bfc 2732
55a93b3e 2733static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
eae3f88e
DM
2734{
2735 netdev_features_t features;
f6a78bfc 2736
eae3f88e
DM
2737 if (skb->next)
2738 return skb;
068a2de5 2739
eae3f88e
DM
2740 features = netif_skb_features(skb);
2741 skb = validate_xmit_vlan(skb, features);
2742 if (unlikely(!skb))
2743 goto out_null;
7b9c6090 2744
8b86a61d 2745 if (netif_needs_gso(skb, features)) {
ce93718f
DM
2746 struct sk_buff *segs;
2747
2748 segs = skb_gso_segment(skb, features);
cecda693 2749 if (IS_ERR(segs)) {
af6dabc9 2750 goto out_kfree_skb;
cecda693
JW
2751 } else if (segs) {
2752 consume_skb(skb);
2753 skb = segs;
f6a78bfc 2754 }
eae3f88e
DM
2755 } else {
2756 if (skb_needs_linearize(skb, features) &&
2757 __skb_linearize(skb))
2758 goto out_kfree_skb;
4ec93edb 2759
eae3f88e
DM
2760 /* If packet is not checksummed and device does not
2761 * support checksumming for this protocol, complete
2762 * checksumming here.
2763 */
2764 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2765 if (skb->encapsulation)
2766 skb_set_inner_transport_header(skb,
2767 skb_checksum_start_offset(skb));
2768 else
2769 skb_set_transport_header(skb,
2770 skb_checksum_start_offset(skb));
2771 if (!(features & NETIF_F_ALL_CSUM) &&
2772 skb_checksum_help(skb))
2773 goto out_kfree_skb;
7b9c6090 2774 }
0c772159 2775 }
7b9c6090 2776
eae3f88e 2777 return skb;
fc70fb64 2778
f6a78bfc
HX
2779out_kfree_skb:
2780 kfree_skb(skb);
eae3f88e
DM
2781out_null:
2782 return NULL;
2783}
6afff0ca 2784
55a93b3e
ED
2785struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2786{
2787 struct sk_buff *next, *head = NULL, *tail;
2788
bec3cfdc 2789 for (; skb != NULL; skb = next) {
55a93b3e
ED
2790 next = skb->next;
2791 skb->next = NULL;
bec3cfdc
ED
2792
2793 /* in case skb wont be segmented, point to itself */
2794 skb->prev = skb;
2795
55a93b3e 2796 skb = validate_xmit_skb(skb, dev);
bec3cfdc
ED
2797 if (!skb)
2798 continue;
55a93b3e 2799
bec3cfdc
ED
2800 if (!head)
2801 head = skb;
2802 else
2803 tail->next = skb;
2804 /* If skb was segmented, skb->prev points to
2805 * the last segment. If not, it still contains skb.
2806 */
2807 tail = skb->prev;
55a93b3e
ED
2808 }
2809 return head;
f6a78bfc
HX
2810}
2811
1def9238
ED
2812static void qdisc_pkt_len_init(struct sk_buff *skb)
2813{
2814 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2815
2816 qdisc_skb_cb(skb)->pkt_len = skb->len;
2817
2818 /* To get more precise estimation of bytes sent on wire,
2819 * we add to pkt_len the headers size of all segments
2820 */
2821 if (shinfo->gso_size) {
757b8b1d 2822 unsigned int hdr_len;
15e5a030 2823 u16 gso_segs = shinfo->gso_segs;
1def9238 2824
757b8b1d
ED
2825 /* mac layer + network layer */
2826 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2827
2828 /* + transport layer */
1def9238
ED
2829 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2830 hdr_len += tcp_hdrlen(skb);
2831 else
2832 hdr_len += sizeof(struct udphdr);
15e5a030
JW
2833
2834 if (shinfo->gso_type & SKB_GSO_DODGY)
2835 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2836 shinfo->gso_size);
2837
2838 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
1def9238
ED
2839 }
2840}
2841
bbd8a0d3
KK
2842static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2843 struct net_device *dev,
2844 struct netdev_queue *txq)
2845{
2846 spinlock_t *root_lock = qdisc_lock(q);
a2da570d 2847 bool contended;
bbd8a0d3
KK
2848 int rc;
2849
1def9238 2850 qdisc_pkt_len_init(skb);
a2da570d 2851 qdisc_calculate_pkt_len(skb, q);
79640a4c
ED
2852 /*
2853 * Heuristic to force contended enqueues to serialize on a
2854 * separate lock before trying to get qdisc main lock.
9bf2b8c2
YX
2855 * This permits __QDISC___STATE_RUNNING owner to get the lock more
2856 * often and dequeue packets faster.
79640a4c 2857 */
a2da570d 2858 contended = qdisc_is_running(q);
79640a4c
ED
2859 if (unlikely(contended))
2860 spin_lock(&q->busylock);
2861
bbd8a0d3
KK
2862 spin_lock(root_lock);
2863 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2864 kfree_skb(skb);
2865 rc = NET_XMIT_DROP;
2866 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
bc135b23 2867 qdisc_run_begin(q)) {
bbd8a0d3
KK
2868 /*
2869 * This is a work-conserving queue; there are no old skbs
2870 * waiting to be sent out; and the qdisc is not running -
2871 * xmit the skb directly.
2872 */
bfe0d029 2873
bfe0d029
ED
2874 qdisc_bstats_update(q, skb);
2875
55a93b3e 2876 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
79640a4c
ED
2877 if (unlikely(contended)) {
2878 spin_unlock(&q->busylock);
2879 contended = false;
2880 }
bbd8a0d3 2881 __qdisc_run(q);
79640a4c 2882 } else
bc135b23 2883 qdisc_run_end(q);
bbd8a0d3
KK
2884
2885 rc = NET_XMIT_SUCCESS;
2886 } else {
a2da570d 2887 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
79640a4c
ED
2888 if (qdisc_run_begin(q)) {
2889 if (unlikely(contended)) {
2890 spin_unlock(&q->busylock);
2891 contended = false;
2892 }
2893 __qdisc_run(q);
2894 }
bbd8a0d3
KK
2895 }
2896 spin_unlock(root_lock);
79640a4c
ED
2897 if (unlikely(contended))
2898 spin_unlock(&q->busylock);
bbd8a0d3
KK
2899 return rc;
2900}
2901
86f8515f 2902#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
5bc1421e
NH
2903static void skb_update_prio(struct sk_buff *skb)
2904{
6977a79d 2905 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
5bc1421e 2906
91c68ce2
ED
2907 if (!skb->priority && skb->sk && map) {
2908 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2909
2910 if (prioidx < map->priomap_len)
2911 skb->priority = map->priomap[prioidx];
2912 }
5bc1421e
NH
2913}
2914#else
2915#define skb_update_prio(skb)
2916#endif
2917
f60e5990 2918DEFINE_PER_CPU(int, xmit_recursion);
2919EXPORT_SYMBOL(xmit_recursion);
2920
11a766ce 2921#define RECURSION_LIMIT 10
745e20f1 2922
95603e22
MM
2923/**
2924 * dev_loopback_xmit - loop back @skb
2925 * @skb: buffer to transmit
2926 */
7026b1dd 2927int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb)
95603e22
MM
2928{
2929 skb_reset_mac_header(skb);
2930 __skb_pull(skb, skb_network_offset(skb));
2931 skb->pkt_type = PACKET_LOOPBACK;
2932 skb->ip_summed = CHECKSUM_UNNECESSARY;
2933 WARN_ON(!skb_dst(skb));
2934 skb_dst_force(skb);
2935 netif_rx_ni(skb);
2936 return 0;
2937}
2938EXPORT_SYMBOL(dev_loopback_xmit);
2939
638b2a69
JP
2940static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2941{
2942#ifdef CONFIG_XPS
2943 struct xps_dev_maps *dev_maps;
2944 struct xps_map *map;
2945 int queue_index = -1;
2946
2947 rcu_read_lock();
2948 dev_maps = rcu_dereference(dev->xps_maps);
2949 if (dev_maps) {
2950 map = rcu_dereference(
2951 dev_maps->cpu_map[skb->sender_cpu - 1]);
2952 if (map) {
2953 if (map->len == 1)
2954 queue_index = map->queues[0];
2955 else
2956 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
2957 map->len)];
2958 if (unlikely(queue_index >= dev->real_num_tx_queues))
2959 queue_index = -1;
2960 }
2961 }
2962 rcu_read_unlock();
2963
2964 return queue_index;
2965#else
2966 return -1;
2967#endif
2968}
2969
2970static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
2971{
2972 struct sock *sk = skb->sk;
2973 int queue_index = sk_tx_queue_get(sk);
2974
2975 if (queue_index < 0 || skb->ooo_okay ||
2976 queue_index >= dev->real_num_tx_queues) {
2977 int new_index = get_xps_queue(dev, skb);
2978 if (new_index < 0)
2979 new_index = skb_tx_hash(dev, skb);
2980
2981 if (queue_index != new_index && sk &&
2982 rcu_access_pointer(sk->sk_dst_cache))
2983 sk_tx_queue_set(sk, new_index);
2984
2985 queue_index = new_index;
2986 }
2987
2988 return queue_index;
2989}
2990
2991struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2992 struct sk_buff *skb,
2993 void *accel_priv)
2994{
2995 int queue_index = 0;
2996
2997#ifdef CONFIG_XPS
2998 if (skb->sender_cpu == 0)
2999 skb->sender_cpu = raw_smp_processor_id() + 1;
3000#endif
3001
3002 if (dev->real_num_tx_queues != 1) {
3003 const struct net_device_ops *ops = dev->netdev_ops;
3004 if (ops->ndo_select_queue)
3005 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3006 __netdev_pick_tx);
3007 else
3008 queue_index = __netdev_pick_tx(dev, skb);
3009
3010 if (!accel_priv)
3011 queue_index = netdev_cap_txqueue(dev, queue_index);
3012 }
3013
3014 skb_set_queue_mapping(skb, queue_index);
3015 return netdev_get_tx_queue(dev, queue_index);
3016}
3017
d29f749e 3018/**
9d08dd3d 3019 * __dev_queue_xmit - transmit a buffer
d29f749e 3020 * @skb: buffer to transmit
9d08dd3d 3021 * @accel_priv: private data used for L2 forwarding offload
d29f749e
DJ
3022 *
3023 * Queue a buffer for transmission to a network device. The caller must
3024 * have set the device and priority and built the buffer before calling
3025 * this function. The function can be called from an interrupt.
3026 *
3027 * A negative errno code is returned on a failure. A success does not
3028 * guarantee the frame will be transmitted as it may be dropped due
3029 * to congestion or traffic shaping.
3030 *
3031 * -----------------------------------------------------------------------------------
3032 * I notice this method can also return errors from the queue disciplines,
3033 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3034 * be positive.
3035 *
3036 * Regardless of the return value, the skb is consumed, so it is currently
3037 * difficult to retry a send to this method. (You can bump the ref count
3038 * before sending to hold a reference for retry if you are careful.)
3039 *
3040 * When calling this method, interrupts MUST be enabled. This is because
3041 * the BH enable code must have IRQs enabled so that it will not deadlock.
3042 * --BLG
3043 */
0a59f3a9 3044static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
1da177e4
LT
3045{
3046 struct net_device *dev = skb->dev;
dc2b4847 3047 struct netdev_queue *txq;
1da177e4
LT
3048 struct Qdisc *q;
3049 int rc = -ENOMEM;
3050
6d1ccff6
ED
3051 skb_reset_mac_header(skb);
3052
e7fd2885
WB
3053 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3054 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3055
4ec93edb
YH
3056 /* Disable soft irqs for various locks below. Also
3057 * stops preemption for RCU.
1da177e4 3058 */
4ec93edb 3059 rcu_read_lock_bh();
1da177e4 3060
5bc1421e
NH
3061 skb_update_prio(skb);
3062
02875878
ED
3063 /* If device/qdisc don't need skb->dst, release it right now while
3064 * its hot in this cpu cache.
3065 */
3066 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3067 skb_dst_drop(skb);
3068 else
3069 skb_dst_force(skb);
3070
f663dd9a 3071 txq = netdev_pick_tx(dev, skb, accel_priv);
a898def2 3072 q = rcu_dereference_bh(txq->qdisc);
37437bb2 3073
1da177e4 3074#ifdef CONFIG_NET_CLS_ACT
d1b19dff 3075 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
1da177e4 3076#endif
cf66ba58 3077 trace_net_dev_queue(skb);
1da177e4 3078 if (q->enqueue) {
bbd8a0d3 3079 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 3080 goto out;
1da177e4
LT
3081 }
3082
3083 /* The device has no queue. Common case for software devices:
3084 loopback, all the sorts of tunnels...
3085
932ff279
HX
3086 Really, it is unlikely that netif_tx_lock protection is necessary
3087 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1da177e4
LT
3088 counters.)
3089 However, it is possible, that they rely on protection
3090 made by us here.
3091
3092 Check this and shot the lock. It is not prone from deadlocks.
3093 Either shot noqueue qdisc, it is even simpler 8)
3094 */
3095 if (dev->flags & IFF_UP) {
3096 int cpu = smp_processor_id(); /* ok because BHs are off */
3097
c773e847 3098 if (txq->xmit_lock_owner != cpu) {
1da177e4 3099
745e20f1
ED
3100 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
3101 goto recursion_alert;
3102
1f59533f
JDB
3103 skb = validate_xmit_skb(skb, dev);
3104 if (!skb)
3105 goto drop;
3106
c773e847 3107 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 3108
73466498 3109 if (!netif_xmit_stopped(txq)) {
745e20f1 3110 __this_cpu_inc(xmit_recursion);
ce93718f 3111 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
745e20f1 3112 __this_cpu_dec(xmit_recursion);
572a9d7b 3113 if (dev_xmit_complete(rc)) {
c773e847 3114 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
3115 goto out;
3116 }
3117 }
c773e847 3118 HARD_TX_UNLOCK(dev, txq);
e87cc472
JP
3119 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3120 dev->name);
1da177e4
LT
3121 } else {
3122 /* Recursion is detected! It is possible,
745e20f1
ED
3123 * unfortunately
3124 */
3125recursion_alert:
e87cc472
JP
3126 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3127 dev->name);
1da177e4
LT
3128 }
3129 }
3130
3131 rc = -ENETDOWN;
1f59533f 3132drop:
d4828d85 3133 rcu_read_unlock_bh();
1da177e4 3134
015f0688 3135 atomic_long_inc(&dev->tx_dropped);
1f59533f 3136 kfree_skb_list(skb);
1da177e4
LT
3137 return rc;
3138out:
d4828d85 3139 rcu_read_unlock_bh();
1da177e4
LT
3140 return rc;
3141}
f663dd9a 3142
7026b1dd 3143int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb)
f663dd9a
JW
3144{
3145 return __dev_queue_xmit(skb, NULL);
3146}
7026b1dd 3147EXPORT_SYMBOL(dev_queue_xmit_sk);
1da177e4 3148
f663dd9a
JW
3149int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3150{
3151 return __dev_queue_xmit(skb, accel_priv);
3152}
3153EXPORT_SYMBOL(dev_queue_xmit_accel);
3154
1da177e4
LT
3155
3156/*=======================================================================
3157 Receiver routines
3158 =======================================================================*/
3159
6b2bedc3 3160int netdev_max_backlog __read_mostly = 1000;
c9e6bc64
ED
3161EXPORT_SYMBOL(netdev_max_backlog);
3162
3b098e2d 3163int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3
SH
3164int netdev_budget __read_mostly = 300;
3165int weight_p __read_mostly = 64; /* old backlog weight */
1da177e4 3166
eecfd7c4
ED
3167/* Called with irq disabled */
3168static inline void ____napi_schedule(struct softnet_data *sd,
3169 struct napi_struct *napi)
3170{
3171 list_add_tail(&napi->poll_list, &sd->poll_list);
3172 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3173}
3174
bfb564e7
KK
3175#ifdef CONFIG_RPS
3176
3177/* One global table that all flow-based protocols share. */
6e3f7faf 3178struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
bfb564e7 3179EXPORT_SYMBOL(rps_sock_flow_table);
567e4b79
ED
3180u32 rps_cpu_mask __read_mostly;
3181EXPORT_SYMBOL(rps_cpu_mask);
bfb564e7 3182
c5905afb 3183struct static_key rps_needed __read_mostly;
adc9300e 3184
c445477d
BH
3185static struct rps_dev_flow *
3186set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3187 struct rps_dev_flow *rflow, u16 next_cpu)
3188{
a31196b0 3189 if (next_cpu < nr_cpu_ids) {
c445477d
BH
3190#ifdef CONFIG_RFS_ACCEL
3191 struct netdev_rx_queue *rxqueue;
3192 struct rps_dev_flow_table *flow_table;
3193 struct rps_dev_flow *old_rflow;
3194 u32 flow_id;
3195 u16 rxq_index;
3196 int rc;
3197
3198 /* Should we steer this flow to a different hardware queue? */
69a19ee6
BH
3199 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3200 !(dev->features & NETIF_F_NTUPLE))
c445477d
BH
3201 goto out;
3202 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3203 if (rxq_index == skb_get_rx_queue(skb))
3204 goto out;
3205
3206 rxqueue = dev->_rx + rxq_index;
3207 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3208 if (!flow_table)
3209 goto out;
61b905da 3210 flow_id = skb_get_hash(skb) & flow_table->mask;
c445477d
BH
3211 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3212 rxq_index, flow_id);
3213 if (rc < 0)
3214 goto out;
3215 old_rflow = rflow;
3216 rflow = &flow_table->flows[flow_id];
c445477d
BH
3217 rflow->filter = rc;
3218 if (old_rflow->filter == rflow->filter)
3219 old_rflow->filter = RPS_NO_FILTER;
3220 out:
3221#endif
3222 rflow->last_qtail =
09994d1b 3223 per_cpu(softnet_data, next_cpu).input_queue_head;
c445477d
BH
3224 }
3225
09994d1b 3226 rflow->cpu = next_cpu;
c445477d
BH
3227 return rflow;
3228}
3229
bfb564e7
KK
3230/*
3231 * get_rps_cpu is called from netif_receive_skb and returns the target
3232 * CPU from the RPS map of the receiving queue for a given skb.
3233 * rcu_read_lock must be held on entry.
3234 */
3235static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3236 struct rps_dev_flow **rflowp)
3237{
567e4b79
ED
3238 const struct rps_sock_flow_table *sock_flow_table;
3239 struct netdev_rx_queue *rxqueue = dev->_rx;
bfb564e7 3240 struct rps_dev_flow_table *flow_table;
567e4b79 3241 struct rps_map *map;
bfb564e7 3242 int cpu = -1;
567e4b79 3243 u32 tcpu;
61b905da 3244 u32 hash;
bfb564e7
KK
3245
3246 if (skb_rx_queue_recorded(skb)) {
3247 u16 index = skb_get_rx_queue(skb);
567e4b79 3248
62fe0b40
BH
3249 if (unlikely(index >= dev->real_num_rx_queues)) {
3250 WARN_ONCE(dev->real_num_rx_queues > 1,
3251 "%s received packet on queue %u, but number "
3252 "of RX queues is %u\n",
3253 dev->name, index, dev->real_num_rx_queues);
bfb564e7
KK
3254 goto done;
3255 }
567e4b79
ED
3256 rxqueue += index;
3257 }
bfb564e7 3258
567e4b79
ED
3259 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3260
3261 flow_table = rcu_dereference(rxqueue->rps_flow_table);
6e3f7faf 3262 map = rcu_dereference(rxqueue->rps_map);
567e4b79 3263 if (!flow_table && !map)
bfb564e7
KK
3264 goto done;
3265
2d47b459 3266 skb_reset_network_header(skb);
61b905da
TH
3267 hash = skb_get_hash(skb);
3268 if (!hash)
bfb564e7
KK
3269 goto done;
3270
fec5e652
TH
3271 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3272 if (flow_table && sock_flow_table) {
fec5e652 3273 struct rps_dev_flow *rflow;
567e4b79
ED
3274 u32 next_cpu;
3275 u32 ident;
3276
3277 /* First check into global flow table if there is a match */
3278 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3279 if ((ident ^ hash) & ~rps_cpu_mask)
3280 goto try_rps;
fec5e652 3281
567e4b79
ED
3282 next_cpu = ident & rps_cpu_mask;
3283
3284 /* OK, now we know there is a match,
3285 * we can look at the local (per receive queue) flow table
3286 */
61b905da 3287 rflow = &flow_table->flows[hash & flow_table->mask];
fec5e652
TH
3288 tcpu = rflow->cpu;
3289
fec5e652
TH
3290 /*
3291 * If the desired CPU (where last recvmsg was done) is
3292 * different from current CPU (one in the rx-queue flow
3293 * table entry), switch if one of the following holds:
a31196b0 3294 * - Current CPU is unset (>= nr_cpu_ids).
fec5e652
TH
3295 * - Current CPU is offline.
3296 * - The current CPU's queue tail has advanced beyond the
3297 * last packet that was enqueued using this table entry.
3298 * This guarantees that all previous packets for the flow
3299 * have been dequeued, thus preserving in order delivery.
3300 */
3301 if (unlikely(tcpu != next_cpu) &&
a31196b0 3302 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
fec5e652 3303 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
baefa31d
TH
3304 rflow->last_qtail)) >= 0)) {
3305 tcpu = next_cpu;
c445477d 3306 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
baefa31d 3307 }
c445477d 3308
a31196b0 3309 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
fec5e652
TH
3310 *rflowp = rflow;
3311 cpu = tcpu;
3312 goto done;
3313 }
3314 }
3315
567e4b79
ED
3316try_rps:
3317
0a9627f2 3318 if (map) {
8fc54f68 3319 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
0a9627f2
TH
3320 if (cpu_online(tcpu)) {
3321 cpu = tcpu;
3322 goto done;
3323 }
3324 }
3325
3326done:
0a9627f2
TH
3327 return cpu;
3328}
3329
c445477d
BH
3330#ifdef CONFIG_RFS_ACCEL
3331
3332/**
3333 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3334 * @dev: Device on which the filter was set
3335 * @rxq_index: RX queue index
3336 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3337 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3338 *
3339 * Drivers that implement ndo_rx_flow_steer() should periodically call
3340 * this function for each installed filter and remove the filters for
3341 * which it returns %true.
3342 */
3343bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3344 u32 flow_id, u16 filter_id)
3345{
3346 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3347 struct rps_dev_flow_table *flow_table;
3348 struct rps_dev_flow *rflow;
3349 bool expire = true;
a31196b0 3350 unsigned int cpu;
c445477d
BH
3351
3352 rcu_read_lock();
3353 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3354 if (flow_table && flow_id <= flow_table->mask) {
3355 rflow = &flow_table->flows[flow_id];
3356 cpu = ACCESS_ONCE(rflow->cpu);
a31196b0 3357 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
c445477d
BH
3358 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3359 rflow->last_qtail) <
3360 (int)(10 * flow_table->mask)))
3361 expire = false;
3362 }
3363 rcu_read_unlock();
3364 return expire;
3365}
3366EXPORT_SYMBOL(rps_may_expire_flow);
3367
3368#endif /* CONFIG_RFS_ACCEL */
3369
0a9627f2 3370/* Called from hardirq (IPI) context */
e36fa2f7 3371static void rps_trigger_softirq(void *data)
0a9627f2 3372{
e36fa2f7
ED
3373 struct softnet_data *sd = data;
3374
eecfd7c4 3375 ____napi_schedule(sd, &sd->backlog);
dee42870 3376 sd->received_rps++;
0a9627f2 3377}
e36fa2f7 3378
fec5e652 3379#endif /* CONFIG_RPS */
0a9627f2 3380
e36fa2f7
ED
3381/*
3382 * Check if this softnet_data structure is another cpu one
3383 * If yes, queue it to our IPI list and return 1
3384 * If no, return 0
3385 */
3386static int rps_ipi_queued(struct softnet_data *sd)
3387{
3388#ifdef CONFIG_RPS
903ceff7 3389 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
e36fa2f7
ED
3390
3391 if (sd != mysd) {
3392 sd->rps_ipi_next = mysd->rps_ipi_list;
3393 mysd->rps_ipi_list = sd;
3394
3395 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3396 return 1;
3397 }
3398#endif /* CONFIG_RPS */
3399 return 0;
3400}
3401
99bbc707
WB
3402#ifdef CONFIG_NET_FLOW_LIMIT
3403int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3404#endif
3405
3406static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3407{
3408#ifdef CONFIG_NET_FLOW_LIMIT
3409 struct sd_flow_limit *fl;
3410 struct softnet_data *sd;
3411 unsigned int old_flow, new_flow;
3412
3413 if (qlen < (netdev_max_backlog >> 1))
3414 return false;
3415
903ceff7 3416 sd = this_cpu_ptr(&softnet_data);
99bbc707
WB
3417
3418 rcu_read_lock();
3419 fl = rcu_dereference(sd->flow_limit);
3420 if (fl) {
3958afa1 3421 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
99bbc707
WB
3422 old_flow = fl->history[fl->history_head];
3423 fl->history[fl->history_head] = new_flow;
3424
3425 fl->history_head++;
3426 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3427
3428 if (likely(fl->buckets[old_flow]))
3429 fl->buckets[old_flow]--;
3430
3431 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3432 fl->count++;
3433 rcu_read_unlock();
3434 return true;
3435 }
3436 }
3437 rcu_read_unlock();
3438#endif
3439 return false;
3440}
3441
0a9627f2
TH
3442/*
3443 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3444 * queue (may be a remote CPU queue).
3445 */
fec5e652
TH
3446static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3447 unsigned int *qtail)
0a9627f2 3448{
e36fa2f7 3449 struct softnet_data *sd;
0a9627f2 3450 unsigned long flags;
99bbc707 3451 unsigned int qlen;
0a9627f2 3452
e36fa2f7 3453 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
3454
3455 local_irq_save(flags);
0a9627f2 3456
e36fa2f7 3457 rps_lock(sd);
99bbc707
WB
3458 qlen = skb_queue_len(&sd->input_pkt_queue);
3459 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
e008f3f0 3460 if (qlen) {
0a9627f2 3461enqueue:
e36fa2f7 3462 __skb_queue_tail(&sd->input_pkt_queue, skb);
76cc8b13 3463 input_queue_tail_incr_save(sd, qtail);
e36fa2f7 3464 rps_unlock(sd);
152102c7 3465 local_irq_restore(flags);
0a9627f2
TH
3466 return NET_RX_SUCCESS;
3467 }
3468
ebda37c2
ED
3469 /* Schedule NAPI for backlog device
3470 * We can use non atomic operation since we own the queue lock
3471 */
3472 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
e36fa2f7 3473 if (!rps_ipi_queued(sd))
eecfd7c4 3474 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
3475 }
3476 goto enqueue;
3477 }
3478
dee42870 3479 sd->dropped++;
e36fa2f7 3480 rps_unlock(sd);
0a9627f2 3481
0a9627f2
TH
3482 local_irq_restore(flags);
3483
caf586e5 3484 atomic_long_inc(&skb->dev->rx_dropped);
0a9627f2
TH
3485 kfree_skb(skb);
3486 return NET_RX_DROP;
3487}
1da177e4 3488
ae78dbfa 3489static int netif_rx_internal(struct sk_buff *skb)
1da177e4 3490{
b0e28f1e 3491 int ret;
1da177e4 3492
588f0330 3493 net_timestamp_check(netdev_tstamp_prequeue, skb);
1da177e4 3494
cf66ba58 3495 trace_netif_rx(skb);
df334545 3496#ifdef CONFIG_RPS
c5905afb 3497 if (static_key_false(&rps_needed)) {
fec5e652 3498 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
3499 int cpu;
3500
cece1945 3501 preempt_disable();
b0e28f1e 3502 rcu_read_lock();
fec5e652
TH
3503
3504 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
3505 if (cpu < 0)
3506 cpu = smp_processor_id();
fec5e652
TH
3507
3508 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3509
b0e28f1e 3510 rcu_read_unlock();
cece1945 3511 preempt_enable();
adc9300e
ED
3512 } else
3513#endif
fec5e652
TH
3514 {
3515 unsigned int qtail;
3516 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3517 put_cpu();
3518 }
b0e28f1e 3519 return ret;
1da177e4 3520}
ae78dbfa
BH
3521
3522/**
3523 * netif_rx - post buffer to the network code
3524 * @skb: buffer to post
3525 *
3526 * This function receives a packet from a device driver and queues it for
3527 * the upper (protocol) levels to process. It always succeeds. The buffer
3528 * may be dropped during processing for congestion control or by the
3529 * protocol layers.
3530 *
3531 * return values:
3532 * NET_RX_SUCCESS (no congestion)
3533 * NET_RX_DROP (packet was dropped)
3534 *
3535 */
3536
3537int netif_rx(struct sk_buff *skb)
3538{
3539 trace_netif_rx_entry(skb);
3540
3541 return netif_rx_internal(skb);
3542}
d1b19dff 3543EXPORT_SYMBOL(netif_rx);
1da177e4
LT
3544
3545int netif_rx_ni(struct sk_buff *skb)
3546{
3547 int err;
3548
ae78dbfa
BH
3549 trace_netif_rx_ni_entry(skb);
3550
1da177e4 3551 preempt_disable();
ae78dbfa 3552 err = netif_rx_internal(skb);
1da177e4
LT
3553 if (local_softirq_pending())
3554 do_softirq();
3555 preempt_enable();
3556
3557 return err;
3558}
1da177e4
LT
3559EXPORT_SYMBOL(netif_rx_ni);
3560
1da177e4
LT
3561static void net_tx_action(struct softirq_action *h)
3562{
903ceff7 3563 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
1da177e4
LT
3564
3565 if (sd->completion_queue) {
3566 struct sk_buff *clist;
3567
3568 local_irq_disable();
3569 clist = sd->completion_queue;
3570 sd->completion_queue = NULL;
3571 local_irq_enable();
3572
3573 while (clist) {
3574 struct sk_buff *skb = clist;
3575 clist = clist->next;
3576
547b792c 3577 WARN_ON(atomic_read(&skb->users));
e6247027
ED
3578 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3579 trace_consume_skb(skb);
3580 else
3581 trace_kfree_skb(skb, net_tx_action);
1da177e4
LT
3582 __kfree_skb(skb);
3583 }
3584 }
3585
3586 if (sd->output_queue) {
37437bb2 3587 struct Qdisc *head;
1da177e4
LT
3588
3589 local_irq_disable();
3590 head = sd->output_queue;
3591 sd->output_queue = NULL;
a9cbd588 3592 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
3593 local_irq_enable();
3594
3595 while (head) {
37437bb2
DM
3596 struct Qdisc *q = head;
3597 spinlock_t *root_lock;
3598
1da177e4
LT
3599 head = head->next_sched;
3600
5fb66229 3601 root_lock = qdisc_lock(q);
37437bb2 3602 if (spin_trylock(root_lock)) {
4e857c58 3603 smp_mb__before_atomic();
def82a1d
JP
3604 clear_bit(__QDISC_STATE_SCHED,
3605 &q->state);
37437bb2
DM
3606 qdisc_run(q);
3607 spin_unlock(root_lock);
1da177e4 3608 } else {
195648bb 3609 if (!test_bit(__QDISC_STATE_DEACTIVATED,
e8a83e10 3610 &q->state)) {
195648bb 3611 __netif_reschedule(q);
e8a83e10 3612 } else {
4e857c58 3613 smp_mb__before_atomic();
e8a83e10
JP
3614 clear_bit(__QDISC_STATE_SCHED,
3615 &q->state);
3616 }
1da177e4
LT
3617 }
3618 }
3619 }
3620}
3621
ab95bfe0
JP
3622#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3623 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
da678292
MM
3624/* This hook is defined here for ATM LANE */
3625int (*br_fdb_test_addr_hook)(struct net_device *dev,
3626 unsigned char *addr) __read_mostly;
4fb019a0 3627EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 3628#endif
1da177e4 3629
1da177e4 3630#ifdef CONFIG_NET_CLS_ACT
f697c3e8
HX
3631static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3632 struct packet_type **pt_prev,
3633 int *ret, struct net_device *orig_dev)
3634{
d2788d34
DB
3635 struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
3636 struct tcf_result cl_res;
24824a09 3637
c9e99fd0
DB
3638 /* If there's at least one ingress present somewhere (so
3639 * we get here via enabled static key), remaining devices
3640 * that are not configured with an ingress qdisc will bail
d2788d34 3641 * out here.
c9e99fd0 3642 */
d2788d34 3643 if (!cl)
4577139b 3644 return skb;
f697c3e8
HX
3645 if (*pt_prev) {
3646 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3647 *pt_prev = NULL;
1da177e4
LT
3648 }
3649
3365495c 3650 qdisc_skb_cb(skb)->pkt_len = skb->len;
c9e99fd0 3651 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3365495c 3652 qdisc_bstats_update_cpu(cl->q, skb);
c9e99fd0 3653
d2788d34
DB
3654 switch (tc_classify(skb, cl, &cl_res)) {
3655 case TC_ACT_OK:
3656 case TC_ACT_RECLASSIFY:
3657 skb->tc_index = TC_H_MIN(cl_res.classid);
3658 break;
3659 case TC_ACT_SHOT:
3660 qdisc_qstats_drop_cpu(cl->q);
3661 case TC_ACT_STOLEN:
3662 case TC_ACT_QUEUED:
3663 kfree_skb(skb);
3664 return NULL;
3665 default:
3666 break;
f697c3e8
HX
3667 }
3668
f697c3e8 3669 return skb;
1da177e4 3670}
e687ad60
PN
3671#else
3672static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3673 struct packet_type **pt_prev,
3674 int *ret, struct net_device *orig_dev)
3675{
3676 return skb;
3677}
1da177e4
LT
3678#endif
3679
ab95bfe0
JP
3680/**
3681 * netdev_rx_handler_register - register receive handler
3682 * @dev: device to register a handler for
3683 * @rx_handler: receive handler to register
93e2c32b 3684 * @rx_handler_data: data pointer that is used by rx handler
ab95bfe0 3685 *
e227867f 3686 * Register a receive handler for a device. This handler will then be
ab95bfe0
JP
3687 * called from __netif_receive_skb. A negative errno code is returned
3688 * on a failure.
3689 *
3690 * The caller must hold the rtnl_mutex.
8a4eb573
JP
3691 *
3692 * For a general description of rx_handler, see enum rx_handler_result.
ab95bfe0
JP
3693 */
3694int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
3695 rx_handler_func_t *rx_handler,
3696 void *rx_handler_data)
ab95bfe0
JP
3697{
3698 ASSERT_RTNL();
3699
3700 if (dev->rx_handler)
3701 return -EBUSY;
3702
00cfec37 3703 /* Note: rx_handler_data must be set before rx_handler */
93e2c32b 3704 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
ab95bfe0
JP
3705 rcu_assign_pointer(dev->rx_handler, rx_handler);
3706
3707 return 0;
3708}
3709EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3710
3711/**
3712 * netdev_rx_handler_unregister - unregister receive handler
3713 * @dev: device to unregister a handler from
3714 *
166ec369 3715 * Unregister a receive handler from a device.
ab95bfe0
JP
3716 *
3717 * The caller must hold the rtnl_mutex.
3718 */
3719void netdev_rx_handler_unregister(struct net_device *dev)
3720{
3721
3722 ASSERT_RTNL();
a9b3cd7f 3723 RCU_INIT_POINTER(dev->rx_handler, NULL);
00cfec37
ED
3724 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3725 * section has a guarantee to see a non NULL rx_handler_data
3726 * as well.
3727 */
3728 synchronize_net();
a9b3cd7f 3729 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
ab95bfe0
JP
3730}
3731EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3732
b4b9e355
MG
3733/*
3734 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3735 * the special handling of PFMEMALLOC skbs.
3736 */
3737static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3738{
3739 switch (skb->protocol) {
2b8837ae
JP
3740 case htons(ETH_P_ARP):
3741 case htons(ETH_P_IP):
3742 case htons(ETH_P_IPV6):
3743 case htons(ETH_P_8021Q):
3744 case htons(ETH_P_8021AD):
b4b9e355
MG
3745 return true;
3746 default:
3747 return false;
3748 }
3749}
3750
e687ad60
PN
3751#ifdef CONFIG_NETFILTER_INGRESS
3752static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
3753 int *ret, struct net_device *orig_dev)
3754{
3755 if (nf_hook_ingress_active(skb)) {
3756 if (*pt_prev) {
3757 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3758 *pt_prev = NULL;
3759 }
3760
3761 return nf_hook_ingress(skb);
3762 }
3763 return 0;
3764}
3765#else
3766static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
3767 int *ret, struct net_device *orig_dev)
3768{
3769 return 0;
3770}
3771#endif
3772
9754e293 3773static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
1da177e4
LT
3774{
3775 struct packet_type *ptype, *pt_prev;
ab95bfe0 3776 rx_handler_func_t *rx_handler;
f2ccd8fa 3777 struct net_device *orig_dev;
8a4eb573 3778 bool deliver_exact = false;
1da177e4 3779 int ret = NET_RX_DROP;
252e3346 3780 __be16 type;
1da177e4 3781
588f0330 3782 net_timestamp_check(!netdev_tstamp_prequeue, skb);
81bbb3d4 3783
cf66ba58 3784 trace_netif_receive_skb(skb);
9b22ea56 3785
cc9bd5ce 3786 orig_dev = skb->dev;
8f903c70 3787
c1d2bbe1 3788 skb_reset_network_header(skb);
fda55eca
ED
3789 if (!skb_transport_header_was_set(skb))
3790 skb_reset_transport_header(skb);
0b5c9db1 3791 skb_reset_mac_len(skb);
1da177e4
LT
3792
3793 pt_prev = NULL;
3794
3795 rcu_read_lock();
3796
63d8ea7f 3797another_round:
b6858177 3798 skb->skb_iif = skb->dev->ifindex;
63d8ea7f
DM
3799
3800 __this_cpu_inc(softnet_data.processed);
3801
8ad227ff
PM
3802 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3803 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
0d5501c1 3804 skb = skb_vlan_untag(skb);
bcc6d479 3805 if (unlikely(!skb))
b4b9e355 3806 goto unlock;
bcc6d479
JP
3807 }
3808
1da177e4
LT
3809#ifdef CONFIG_NET_CLS_ACT
3810 if (skb->tc_verd & TC_NCLS) {
3811 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3812 goto ncls;
3813 }
3814#endif
3815
9754e293 3816 if (pfmemalloc)
b4b9e355
MG
3817 goto skip_taps;
3818
1da177e4 3819 list_for_each_entry_rcu(ptype, &ptype_all, list) {
7866a621
SN
3820 if (pt_prev)
3821 ret = deliver_skb(skb, pt_prev, orig_dev);
3822 pt_prev = ptype;
3823 }
3824
3825 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
3826 if (pt_prev)
3827 ret = deliver_skb(skb, pt_prev, orig_dev);
3828 pt_prev = ptype;
1da177e4
LT
3829 }
3830
b4b9e355 3831skip_taps:
1cf51900 3832#ifdef CONFIG_NET_INGRESS
4577139b
DB
3833 if (static_key_false(&ingress_needed)) {
3834 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3835 if (!skb)
3836 goto unlock;
e687ad60
PN
3837
3838 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
3839 goto unlock;
4577139b 3840 }
1cf51900
PN
3841#endif
3842#ifdef CONFIG_NET_CLS_ACT
4577139b 3843 skb->tc_verd = 0;
1da177e4
LT
3844ncls:
3845#endif
9754e293 3846 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
b4b9e355
MG
3847 goto drop;
3848
df8a39de 3849 if (skb_vlan_tag_present(skb)) {
2425717b
JF
3850 if (pt_prev) {
3851 ret = deliver_skb(skb, pt_prev, orig_dev);
3852 pt_prev = NULL;
3853 }
48cc32d3 3854 if (vlan_do_receive(&skb))
2425717b
JF
3855 goto another_round;
3856 else if (unlikely(!skb))
b4b9e355 3857 goto unlock;
2425717b
JF
3858 }
3859
48cc32d3 3860 rx_handler = rcu_dereference(skb->dev->rx_handler);
ab95bfe0
JP
3861 if (rx_handler) {
3862 if (pt_prev) {
3863 ret = deliver_skb(skb, pt_prev, orig_dev);
3864 pt_prev = NULL;
3865 }
8a4eb573
JP
3866 switch (rx_handler(&skb)) {
3867 case RX_HANDLER_CONSUMED:
3bc1b1ad 3868 ret = NET_RX_SUCCESS;
b4b9e355 3869 goto unlock;
8a4eb573 3870 case RX_HANDLER_ANOTHER:
63d8ea7f 3871 goto another_round;
8a4eb573
JP
3872 case RX_HANDLER_EXACT:
3873 deliver_exact = true;
3874 case RX_HANDLER_PASS:
3875 break;
3876 default:
3877 BUG();
3878 }
ab95bfe0 3879 }
1da177e4 3880
df8a39de
JP
3881 if (unlikely(skb_vlan_tag_present(skb))) {
3882 if (skb_vlan_tag_get_id(skb))
d4b812de
ED
3883 skb->pkt_type = PACKET_OTHERHOST;
3884 /* Note: we might in the future use prio bits
3885 * and set skb->priority like in vlan_do_receive()
3886 * For the time being, just ignore Priority Code Point
3887 */
3888 skb->vlan_tci = 0;
3889 }
48cc32d3 3890
7866a621
SN
3891 type = skb->protocol;
3892
63d8ea7f 3893 /* deliver only exact match when indicated */
7866a621
SN
3894 if (likely(!deliver_exact)) {
3895 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3896 &ptype_base[ntohs(type) &
3897 PTYPE_HASH_MASK]);
3898 }
1f3c8804 3899
7866a621
SN
3900 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3901 &orig_dev->ptype_specific);
3902
3903 if (unlikely(skb->dev != orig_dev)) {
3904 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3905 &skb->dev->ptype_specific);
1da177e4
LT
3906 }
3907
3908 if (pt_prev) {
1080e512 3909 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
0e698bf6 3910 goto drop;
1080e512
MT
3911 else
3912 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4 3913 } else {
b4b9e355 3914drop:
caf586e5 3915 atomic_long_inc(&skb->dev->rx_dropped);
1da177e4
LT
3916 kfree_skb(skb);
3917 /* Jamal, now you will not able to escape explaining
3918 * me how you were going to use this. :-)
3919 */
3920 ret = NET_RX_DROP;
3921 }
3922
b4b9e355 3923unlock:
1da177e4 3924 rcu_read_unlock();
9754e293
DM
3925 return ret;
3926}
3927
3928static int __netif_receive_skb(struct sk_buff *skb)
3929{
3930 int ret;
3931
3932 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3933 unsigned long pflags = current->flags;
3934
3935 /*
3936 * PFMEMALLOC skbs are special, they should
3937 * - be delivered to SOCK_MEMALLOC sockets only
3938 * - stay away from userspace
3939 * - have bounded memory usage
3940 *
3941 * Use PF_MEMALLOC as this saves us from propagating the allocation
3942 * context down to all allocation sites.
3943 */
3944 current->flags |= PF_MEMALLOC;
3945 ret = __netif_receive_skb_core(skb, true);
3946 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3947 } else
3948 ret = __netif_receive_skb_core(skb, false);
3949
1da177e4
LT
3950 return ret;
3951}
0a9627f2 3952
ae78dbfa 3953static int netif_receive_skb_internal(struct sk_buff *skb)
0a9627f2 3954{
588f0330 3955 net_timestamp_check(netdev_tstamp_prequeue, skb);
3b098e2d 3956
c1f19b51
RC
3957 if (skb_defer_rx_timestamp(skb))
3958 return NET_RX_SUCCESS;
3959
df334545 3960#ifdef CONFIG_RPS
c5905afb 3961 if (static_key_false(&rps_needed)) {
3b098e2d
ED
3962 struct rps_dev_flow voidflow, *rflow = &voidflow;
3963 int cpu, ret;
fec5e652 3964
3b098e2d
ED
3965 rcu_read_lock();
3966
3967 cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 3968
3b098e2d
ED
3969 if (cpu >= 0) {
3970 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3971 rcu_read_unlock();
adc9300e 3972 return ret;
3b098e2d 3973 }
adc9300e 3974 rcu_read_unlock();
fec5e652 3975 }
1e94d72f 3976#endif
adc9300e 3977 return __netif_receive_skb(skb);
0a9627f2 3978}
ae78dbfa
BH
3979
3980/**
3981 * netif_receive_skb - process receive buffer from network
3982 * @skb: buffer to process
3983 *
3984 * netif_receive_skb() is the main receive data processing function.
3985 * It always succeeds. The buffer may be dropped during processing
3986 * for congestion control or by the protocol layers.
3987 *
3988 * This function may only be called from softirq context and interrupts
3989 * should be enabled.
3990 *
3991 * Return values (usually ignored):
3992 * NET_RX_SUCCESS: no congestion
3993 * NET_RX_DROP: packet was dropped
3994 */
7026b1dd 3995int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb)
ae78dbfa
BH
3996{
3997 trace_netif_receive_skb_entry(skb);
3998
3999 return netif_receive_skb_internal(skb);
4000}
7026b1dd 4001EXPORT_SYMBOL(netif_receive_skb_sk);
1da177e4 4002
88751275
ED
4003/* Network device is going away, flush any packets still pending
4004 * Called with irqs disabled.
4005 */
152102c7 4006static void flush_backlog(void *arg)
6e583ce5 4007{
152102c7 4008 struct net_device *dev = arg;
903ceff7 4009 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6e583ce5
SH
4010 struct sk_buff *skb, *tmp;
4011
e36fa2f7 4012 rps_lock(sd);
6e7676c1 4013 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
6e583ce5 4014 if (skb->dev == dev) {
e36fa2f7 4015 __skb_unlink(skb, &sd->input_pkt_queue);
6e583ce5 4016 kfree_skb(skb);
76cc8b13 4017 input_queue_head_incr(sd);
6e583ce5 4018 }
6e7676c1 4019 }
e36fa2f7 4020 rps_unlock(sd);
6e7676c1
CG
4021
4022 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
4023 if (skb->dev == dev) {
4024 __skb_unlink(skb, &sd->process_queue);
4025 kfree_skb(skb);
76cc8b13 4026 input_queue_head_incr(sd);
6e7676c1
CG
4027 }
4028 }
6e583ce5
SH
4029}
4030
d565b0a1
HX
4031static int napi_gro_complete(struct sk_buff *skb)
4032{
22061d80 4033 struct packet_offload *ptype;
d565b0a1 4034 __be16 type = skb->protocol;
22061d80 4035 struct list_head *head = &offload_base;
d565b0a1
HX
4036 int err = -ENOENT;
4037
c3c7c254
ED
4038 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4039
fc59f9a3
HX
4040 if (NAPI_GRO_CB(skb)->count == 1) {
4041 skb_shinfo(skb)->gso_size = 0;
d565b0a1 4042 goto out;
fc59f9a3 4043 }
d565b0a1
HX
4044
4045 rcu_read_lock();
4046 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 4047 if (ptype->type != type || !ptype->callbacks.gro_complete)
d565b0a1
HX
4048 continue;
4049
299603e8 4050 err = ptype->callbacks.gro_complete(skb, 0);
d565b0a1
HX
4051 break;
4052 }
4053 rcu_read_unlock();
4054
4055 if (err) {
4056 WARN_ON(&ptype->list == head);
4057 kfree_skb(skb);
4058 return NET_RX_SUCCESS;
4059 }
4060
4061out:
ae78dbfa 4062 return netif_receive_skb_internal(skb);
d565b0a1
HX
4063}
4064
2e71a6f8
ED
4065/* napi->gro_list contains packets ordered by age.
4066 * youngest packets at the head of it.
4067 * Complete skbs in reverse order to reduce latencies.
4068 */
4069void napi_gro_flush(struct napi_struct *napi, bool flush_old)
d565b0a1 4070{
2e71a6f8 4071 struct sk_buff *skb, *prev = NULL;
d565b0a1 4072
2e71a6f8
ED
4073 /* scan list and build reverse chain */
4074 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4075 skb->prev = prev;
4076 prev = skb;
4077 }
4078
4079 for (skb = prev; skb; skb = prev) {
d565b0a1 4080 skb->next = NULL;
2e71a6f8
ED
4081
4082 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4083 return;
4084
4085 prev = skb->prev;
d565b0a1 4086 napi_gro_complete(skb);
2e71a6f8 4087 napi->gro_count--;
d565b0a1
HX
4088 }
4089
4090 napi->gro_list = NULL;
4091}
86cac58b 4092EXPORT_SYMBOL(napi_gro_flush);
d565b0a1 4093
89c5fa33
ED
4094static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4095{
4096 struct sk_buff *p;
4097 unsigned int maclen = skb->dev->hard_header_len;
0b4cec8c 4098 u32 hash = skb_get_hash_raw(skb);
89c5fa33
ED
4099
4100 for (p = napi->gro_list; p; p = p->next) {
4101 unsigned long diffs;
4102
0b4cec8c
TH
4103 NAPI_GRO_CB(p)->flush = 0;
4104
4105 if (hash != skb_get_hash_raw(p)) {
4106 NAPI_GRO_CB(p)->same_flow = 0;
4107 continue;
4108 }
4109
89c5fa33
ED
4110 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4111 diffs |= p->vlan_tci ^ skb->vlan_tci;
4112 if (maclen == ETH_HLEN)
4113 diffs |= compare_ether_header(skb_mac_header(p),
a50e233c 4114 skb_mac_header(skb));
89c5fa33
ED
4115 else if (!diffs)
4116 diffs = memcmp(skb_mac_header(p),
a50e233c 4117 skb_mac_header(skb),
89c5fa33
ED
4118 maclen);
4119 NAPI_GRO_CB(p)->same_flow = !diffs;
89c5fa33
ED
4120 }
4121}
4122
299603e8
JC
4123static void skb_gro_reset_offset(struct sk_buff *skb)
4124{
4125 const struct skb_shared_info *pinfo = skb_shinfo(skb);
4126 const skb_frag_t *frag0 = &pinfo->frags[0];
4127
4128 NAPI_GRO_CB(skb)->data_offset = 0;
4129 NAPI_GRO_CB(skb)->frag0 = NULL;
4130 NAPI_GRO_CB(skb)->frag0_len = 0;
4131
4132 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4133 pinfo->nr_frags &&
4134 !PageHighMem(skb_frag_page(frag0))) {
4135 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
4136 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
89c5fa33
ED
4137 }
4138}
4139
a50e233c
ED
4140static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4141{
4142 struct skb_shared_info *pinfo = skb_shinfo(skb);
4143
4144 BUG_ON(skb->end - skb->tail < grow);
4145
4146 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4147
4148 skb->data_len -= grow;
4149 skb->tail += grow;
4150
4151 pinfo->frags[0].page_offset += grow;
4152 skb_frag_size_sub(&pinfo->frags[0], grow);
4153
4154 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4155 skb_frag_unref(skb, 0);
4156 memmove(pinfo->frags, pinfo->frags + 1,
4157 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4158 }
4159}
4160
bb728820 4161static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1
HX
4162{
4163 struct sk_buff **pp = NULL;
22061d80 4164 struct packet_offload *ptype;
d565b0a1 4165 __be16 type = skb->protocol;
22061d80 4166 struct list_head *head = &offload_base;
0da2afd5 4167 int same_flow;
5b252f0c 4168 enum gro_result ret;
a50e233c 4169 int grow;
d565b0a1 4170
9c62a68d 4171 if (!(skb->dev->features & NETIF_F_GRO))
d565b0a1
HX
4172 goto normal;
4173
5a212329 4174 if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
f17f5c91
HX
4175 goto normal;
4176
89c5fa33
ED
4177 gro_list_prepare(napi, skb);
4178
d565b0a1
HX
4179 rcu_read_lock();
4180 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 4181 if (ptype->type != type || !ptype->callbacks.gro_receive)
d565b0a1
HX
4182 continue;
4183
86911732 4184 skb_set_network_header(skb, skb_gro_offset(skb));
efd9450e 4185 skb_reset_mac_len(skb);
d565b0a1
HX
4186 NAPI_GRO_CB(skb)->same_flow = 0;
4187 NAPI_GRO_CB(skb)->flush = 0;
5d38a079 4188 NAPI_GRO_CB(skb)->free = 0;
b582ef09 4189 NAPI_GRO_CB(skb)->udp_mark = 0;
15e2396d 4190 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
d565b0a1 4191
662880f4
TH
4192 /* Setup for GRO checksum validation */
4193 switch (skb->ip_summed) {
4194 case CHECKSUM_COMPLETE:
4195 NAPI_GRO_CB(skb)->csum = skb->csum;
4196 NAPI_GRO_CB(skb)->csum_valid = 1;
4197 NAPI_GRO_CB(skb)->csum_cnt = 0;
4198 break;
4199 case CHECKSUM_UNNECESSARY:
4200 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4201 NAPI_GRO_CB(skb)->csum_valid = 0;
4202 break;
4203 default:
4204 NAPI_GRO_CB(skb)->csum_cnt = 0;
4205 NAPI_GRO_CB(skb)->csum_valid = 0;
4206 }
d565b0a1 4207
f191a1d1 4208 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
d565b0a1
HX
4209 break;
4210 }
4211 rcu_read_unlock();
4212
4213 if (&ptype->list == head)
4214 goto normal;
4215
0da2afd5 4216 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 4217 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 4218
d565b0a1
HX
4219 if (pp) {
4220 struct sk_buff *nskb = *pp;
4221
4222 *pp = nskb->next;
4223 nskb->next = NULL;
4224 napi_gro_complete(nskb);
4ae5544f 4225 napi->gro_count--;
d565b0a1
HX
4226 }
4227
0da2afd5 4228 if (same_flow)
d565b0a1
HX
4229 goto ok;
4230
600adc18 4231 if (NAPI_GRO_CB(skb)->flush)
d565b0a1 4232 goto normal;
d565b0a1 4233
600adc18
ED
4234 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4235 struct sk_buff *nskb = napi->gro_list;
4236
4237 /* locate the end of the list to select the 'oldest' flow */
4238 while (nskb->next) {
4239 pp = &nskb->next;
4240 nskb = *pp;
4241 }
4242 *pp = NULL;
4243 nskb->next = NULL;
4244 napi_gro_complete(nskb);
4245 } else {
4246 napi->gro_count++;
4247 }
d565b0a1 4248 NAPI_GRO_CB(skb)->count = 1;
2e71a6f8 4249 NAPI_GRO_CB(skb)->age = jiffies;
29e98242 4250 NAPI_GRO_CB(skb)->last = skb;
86911732 4251 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
d565b0a1
HX
4252 skb->next = napi->gro_list;
4253 napi->gro_list = skb;
5d0d9be8 4254 ret = GRO_HELD;
d565b0a1 4255
ad0f9904 4256pull:
a50e233c
ED
4257 grow = skb_gro_offset(skb) - skb_headlen(skb);
4258 if (grow > 0)
4259 gro_pull_from_frag0(skb, grow);
d565b0a1 4260ok:
5d0d9be8 4261 return ret;
d565b0a1
HX
4262
4263normal:
ad0f9904
HX
4264 ret = GRO_NORMAL;
4265 goto pull;
5d38a079 4266}
96e93eab 4267
bf5a755f
JC
4268struct packet_offload *gro_find_receive_by_type(__be16 type)
4269{
4270 struct list_head *offload_head = &offload_base;
4271 struct packet_offload *ptype;
4272
4273 list_for_each_entry_rcu(ptype, offload_head, list) {
4274 if (ptype->type != type || !ptype->callbacks.gro_receive)
4275 continue;
4276 return ptype;
4277 }
4278 return NULL;
4279}
e27a2f83 4280EXPORT_SYMBOL(gro_find_receive_by_type);
bf5a755f
JC
4281
4282struct packet_offload *gro_find_complete_by_type(__be16 type)
4283{
4284 struct list_head *offload_head = &offload_base;
4285 struct packet_offload *ptype;
4286
4287 list_for_each_entry_rcu(ptype, offload_head, list) {
4288 if (ptype->type != type || !ptype->callbacks.gro_complete)
4289 continue;
4290 return ptype;
4291 }
4292 return NULL;
4293}
e27a2f83 4294EXPORT_SYMBOL(gro_find_complete_by_type);
5d38a079 4295
bb728820 4296static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 4297{
5d0d9be8
HX
4298 switch (ret) {
4299 case GRO_NORMAL:
ae78dbfa 4300 if (netif_receive_skb_internal(skb))
c7c4b3b6
BH
4301 ret = GRO_DROP;
4302 break;
5d38a079 4303
5d0d9be8 4304 case GRO_DROP:
5d38a079
HX
4305 kfree_skb(skb);
4306 break;
5b252f0c 4307
daa86548 4308 case GRO_MERGED_FREE:
d7e8883c
ED
4309 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4310 kmem_cache_free(skbuff_head_cache, skb);
4311 else
4312 __kfree_skb(skb);
daa86548
ED
4313 break;
4314
5b252f0c
BH
4315 case GRO_HELD:
4316 case GRO_MERGED:
4317 break;
5d38a079
HX
4318 }
4319
c7c4b3b6 4320 return ret;
5d0d9be8 4321}
5d0d9be8 4322
c7c4b3b6 4323gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 4324{
ae78dbfa 4325 trace_napi_gro_receive_entry(skb);
86911732 4326
a50e233c
ED
4327 skb_gro_reset_offset(skb);
4328
89c5fa33 4329 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
d565b0a1
HX
4330}
4331EXPORT_SYMBOL(napi_gro_receive);
4332
d0c2b0d2 4333static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
96e93eab 4334{
93a35f59
ED
4335 if (unlikely(skb->pfmemalloc)) {
4336 consume_skb(skb);
4337 return;
4338 }
96e93eab 4339 __skb_pull(skb, skb_headlen(skb));
2a2a459e
ED
4340 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4341 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3701e513 4342 skb->vlan_tci = 0;
66c46d74 4343 skb->dev = napi->dev;
6d152e23 4344 skb->skb_iif = 0;
c3caf119
JC
4345 skb->encapsulation = 0;
4346 skb_shinfo(skb)->gso_type = 0;
e33d0ba8 4347 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
96e93eab
HX
4348
4349 napi->skb = skb;
4350}
96e93eab 4351
76620aaf 4352struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 4353{
5d38a079 4354 struct sk_buff *skb = napi->skb;
5d38a079
HX
4355
4356 if (!skb) {
fd11a83d 4357 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
84b9cd63 4358 napi->skb = skb;
80595d59 4359 }
96e93eab
HX
4360 return skb;
4361}
76620aaf 4362EXPORT_SYMBOL(napi_get_frags);
96e93eab 4363
a50e233c
ED
4364static gro_result_t napi_frags_finish(struct napi_struct *napi,
4365 struct sk_buff *skb,
4366 gro_result_t ret)
96e93eab 4367{
5d0d9be8
HX
4368 switch (ret) {
4369 case GRO_NORMAL:
a50e233c
ED
4370 case GRO_HELD:
4371 __skb_push(skb, ETH_HLEN);
4372 skb->protocol = eth_type_trans(skb, skb->dev);
4373 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
c7c4b3b6 4374 ret = GRO_DROP;
86911732 4375 break;
5d38a079 4376
5d0d9be8 4377 case GRO_DROP:
5d0d9be8
HX
4378 case GRO_MERGED_FREE:
4379 napi_reuse_skb(napi, skb);
4380 break;
5b252f0c
BH
4381
4382 case GRO_MERGED:
4383 break;
5d0d9be8 4384 }
5d38a079 4385
c7c4b3b6 4386 return ret;
5d38a079 4387}
5d0d9be8 4388
a50e233c
ED
4389/* Upper GRO stack assumes network header starts at gro_offset=0
4390 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4391 * We copy ethernet header into skb->data to have a common layout.
4392 */
4adb9c4a 4393static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
76620aaf
HX
4394{
4395 struct sk_buff *skb = napi->skb;
a50e233c
ED
4396 const struct ethhdr *eth;
4397 unsigned int hlen = sizeof(*eth);
76620aaf
HX
4398
4399 napi->skb = NULL;
4400
a50e233c
ED
4401 skb_reset_mac_header(skb);
4402 skb_gro_reset_offset(skb);
4403
4404 eth = skb_gro_header_fast(skb, 0);
4405 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4406 eth = skb_gro_header_slow(skb, hlen, 0);
4407 if (unlikely(!eth)) {
4408 napi_reuse_skb(napi, skb);
4409 return NULL;
4410 }
4411 } else {
4412 gro_pull_from_frag0(skb, hlen);
4413 NAPI_GRO_CB(skb)->frag0 += hlen;
4414 NAPI_GRO_CB(skb)->frag0_len -= hlen;
76620aaf 4415 }
a50e233c
ED
4416 __skb_pull(skb, hlen);
4417
4418 /*
4419 * This works because the only protocols we care about don't require
4420 * special handling.
4421 * We'll fix it up properly in napi_frags_finish()
4422 */
4423 skb->protocol = eth->h_proto;
76620aaf 4424
76620aaf
HX
4425 return skb;
4426}
76620aaf 4427
c7c4b3b6 4428gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 4429{
76620aaf 4430 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
4431
4432 if (!skb)
c7c4b3b6 4433 return GRO_DROP;
5d0d9be8 4434
ae78dbfa
BH
4435 trace_napi_gro_frags_entry(skb);
4436
89c5fa33 4437 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
5d0d9be8 4438}
5d38a079
HX
4439EXPORT_SYMBOL(napi_gro_frags);
4440
573e8fca
TH
4441/* Compute the checksum from gro_offset and return the folded value
4442 * after adding in any pseudo checksum.
4443 */
4444__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4445{
4446 __wsum wsum;
4447 __sum16 sum;
4448
4449 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4450
4451 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4452 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4453 if (likely(!sum)) {
4454 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4455 !skb->csum_complete_sw)
4456 netdev_rx_csum_fault(skb->dev);
4457 }
4458
4459 NAPI_GRO_CB(skb)->csum = wsum;
4460 NAPI_GRO_CB(skb)->csum_valid = 1;
4461
4462 return sum;
4463}
4464EXPORT_SYMBOL(__skb_gro_checksum_complete);
4465
e326bed2 4466/*
855abcf0 4467 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
e326bed2
ED
4468 * Note: called with local irq disabled, but exits with local irq enabled.
4469 */
4470static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4471{
4472#ifdef CONFIG_RPS
4473 struct softnet_data *remsd = sd->rps_ipi_list;
4474
4475 if (remsd) {
4476 sd->rps_ipi_list = NULL;
4477
4478 local_irq_enable();
4479
4480 /* Send pending IPI's to kick RPS processing on remote cpus. */
4481 while (remsd) {
4482 struct softnet_data *next = remsd->rps_ipi_next;
4483
4484 if (cpu_online(remsd->cpu))
c46fff2a 4485 smp_call_function_single_async(remsd->cpu,
fce8ad15 4486 &remsd->csd);
e326bed2
ED
4487 remsd = next;
4488 }
4489 } else
4490#endif
4491 local_irq_enable();
4492}
4493
d75b1ade
ED
4494static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4495{
4496#ifdef CONFIG_RPS
4497 return sd->rps_ipi_list != NULL;
4498#else
4499 return false;
4500#endif
4501}
4502
bea3348e 4503static int process_backlog(struct napi_struct *napi, int quota)
1da177e4
LT
4504{
4505 int work = 0;
eecfd7c4 4506 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
1da177e4 4507
e326bed2
ED
4508 /* Check if we have pending ipi, its better to send them now,
4509 * not waiting net_rx_action() end.
4510 */
d75b1ade 4511 if (sd_has_rps_ipi_waiting(sd)) {
e326bed2
ED
4512 local_irq_disable();
4513 net_rps_action_and_irq_enable(sd);
4514 }
d75b1ade 4515
bea3348e 4516 napi->weight = weight_p;
6e7676c1 4517 local_irq_disable();
11ef7a89 4518 while (1) {
1da177e4 4519 struct sk_buff *skb;
6e7676c1
CG
4520
4521 while ((skb = __skb_dequeue(&sd->process_queue))) {
4522 local_irq_enable();
4523 __netif_receive_skb(skb);
6e7676c1 4524 local_irq_disable();
76cc8b13
TH
4525 input_queue_head_incr(sd);
4526 if (++work >= quota) {
4527 local_irq_enable();
4528 return work;
4529 }
6e7676c1 4530 }
1da177e4 4531
e36fa2f7 4532 rps_lock(sd);
11ef7a89 4533 if (skb_queue_empty(&sd->input_pkt_queue)) {
eecfd7c4
ED
4534 /*
4535 * Inline a custom version of __napi_complete().
4536 * only current cpu owns and manipulates this napi,
11ef7a89
TH
4537 * and NAPI_STATE_SCHED is the only possible flag set
4538 * on backlog.
4539 * We can use a plain write instead of clear_bit(),
eecfd7c4
ED
4540 * and we dont need an smp_mb() memory barrier.
4541 */
eecfd7c4 4542 napi->state = 0;
11ef7a89 4543 rps_unlock(sd);
eecfd7c4 4544
11ef7a89 4545 break;
bea3348e 4546 }
11ef7a89
TH
4547
4548 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4549 &sd->process_queue);
e36fa2f7 4550 rps_unlock(sd);
6e7676c1
CG
4551 }
4552 local_irq_enable();
1da177e4 4553
bea3348e
SH
4554 return work;
4555}
1da177e4 4556
bea3348e
SH
4557/**
4558 * __napi_schedule - schedule for receive
c4ea43c5 4559 * @n: entry to schedule
bea3348e 4560 *
bc9ad166
ED
4561 * The entry's receive function will be scheduled to run.
4562 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
bea3348e 4563 */
b5606c2d 4564void __napi_schedule(struct napi_struct *n)
bea3348e
SH
4565{
4566 unsigned long flags;
1da177e4 4567
bea3348e 4568 local_irq_save(flags);
903ceff7 4569 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
bea3348e 4570 local_irq_restore(flags);
1da177e4 4571}
bea3348e
SH
4572EXPORT_SYMBOL(__napi_schedule);
4573
bc9ad166
ED
4574/**
4575 * __napi_schedule_irqoff - schedule for receive
4576 * @n: entry to schedule
4577 *
4578 * Variant of __napi_schedule() assuming hard irqs are masked
4579 */
4580void __napi_schedule_irqoff(struct napi_struct *n)
4581{
4582 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4583}
4584EXPORT_SYMBOL(__napi_schedule_irqoff);
4585
d565b0a1
HX
4586void __napi_complete(struct napi_struct *n)
4587{
4588 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
d565b0a1 4589
d75b1ade 4590 list_del_init(&n->poll_list);
4e857c58 4591 smp_mb__before_atomic();
d565b0a1
HX
4592 clear_bit(NAPI_STATE_SCHED, &n->state);
4593}
4594EXPORT_SYMBOL(__napi_complete);
4595
3b47d303 4596void napi_complete_done(struct napi_struct *n, int work_done)
d565b0a1
HX
4597{
4598 unsigned long flags;
4599
4600 /*
4601 * don't let napi dequeue from the cpu poll list
4602 * just in case its running on a different cpu
4603 */
4604 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4605 return;
4606
3b47d303
ED
4607 if (n->gro_list) {
4608 unsigned long timeout = 0;
d75b1ade 4609
3b47d303
ED
4610 if (work_done)
4611 timeout = n->dev->gro_flush_timeout;
4612
4613 if (timeout)
4614 hrtimer_start(&n->timer, ns_to_ktime(timeout),
4615 HRTIMER_MODE_REL_PINNED);
4616 else
4617 napi_gro_flush(n, false);
4618 }
d75b1ade
ED
4619 if (likely(list_empty(&n->poll_list))) {
4620 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
4621 } else {
4622 /* If n->poll_list is not empty, we need to mask irqs */
4623 local_irq_save(flags);
4624 __napi_complete(n);
4625 local_irq_restore(flags);
4626 }
d565b0a1 4627}
3b47d303 4628EXPORT_SYMBOL(napi_complete_done);
d565b0a1 4629
af12fa6e
ET
4630/* must be called under rcu_read_lock(), as we dont take a reference */
4631struct napi_struct *napi_by_id(unsigned int napi_id)
4632{
4633 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4634 struct napi_struct *napi;
4635
4636 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4637 if (napi->napi_id == napi_id)
4638 return napi;
4639
4640 return NULL;
4641}
4642EXPORT_SYMBOL_GPL(napi_by_id);
4643
4644void napi_hash_add(struct napi_struct *napi)
4645{
4646 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4647
4648 spin_lock(&napi_hash_lock);
4649
4650 /* 0 is not a valid id, we also skip an id that is taken
4651 * we expect both events to be extremely rare
4652 */
4653 napi->napi_id = 0;
4654 while (!napi->napi_id) {
4655 napi->napi_id = ++napi_gen_id;
4656 if (napi_by_id(napi->napi_id))
4657 napi->napi_id = 0;
4658 }
4659
4660 hlist_add_head_rcu(&napi->napi_hash_node,
4661 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4662
4663 spin_unlock(&napi_hash_lock);
4664 }
4665}
4666EXPORT_SYMBOL_GPL(napi_hash_add);
4667
4668/* Warning : caller is responsible to make sure rcu grace period
4669 * is respected before freeing memory containing @napi
4670 */
4671void napi_hash_del(struct napi_struct *napi)
4672{
4673 spin_lock(&napi_hash_lock);
4674
4675 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4676 hlist_del_rcu(&napi->napi_hash_node);
4677
4678 spin_unlock(&napi_hash_lock);
4679}
4680EXPORT_SYMBOL_GPL(napi_hash_del);
4681
3b47d303
ED
4682static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
4683{
4684 struct napi_struct *napi;
4685
4686 napi = container_of(timer, struct napi_struct, timer);
4687 if (napi->gro_list)
4688 napi_schedule(napi);
4689
4690 return HRTIMER_NORESTART;
4691}
4692
d565b0a1
HX
4693void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4694 int (*poll)(struct napi_struct *, int), int weight)
4695{
4696 INIT_LIST_HEAD(&napi->poll_list);
3b47d303
ED
4697 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
4698 napi->timer.function = napi_watchdog;
4ae5544f 4699 napi->gro_count = 0;
d565b0a1 4700 napi->gro_list = NULL;
5d38a079 4701 napi->skb = NULL;
d565b0a1 4702 napi->poll = poll;
82dc3c63
ED
4703 if (weight > NAPI_POLL_WEIGHT)
4704 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4705 weight, dev->name);
d565b0a1
HX
4706 napi->weight = weight;
4707 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 4708 napi->dev = dev;
5d38a079 4709#ifdef CONFIG_NETPOLL
d565b0a1
HX
4710 spin_lock_init(&napi->poll_lock);
4711 napi->poll_owner = -1;
4712#endif
4713 set_bit(NAPI_STATE_SCHED, &napi->state);
4714}
4715EXPORT_SYMBOL(netif_napi_add);
4716
3b47d303
ED
4717void napi_disable(struct napi_struct *n)
4718{
4719 might_sleep();
4720 set_bit(NAPI_STATE_DISABLE, &n->state);
4721
4722 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
4723 msleep(1);
4724
4725 hrtimer_cancel(&n->timer);
4726
4727 clear_bit(NAPI_STATE_DISABLE, &n->state);
4728}
4729EXPORT_SYMBOL(napi_disable);
4730
d565b0a1
HX
4731void netif_napi_del(struct napi_struct *napi)
4732{
d7b06636 4733 list_del_init(&napi->dev_list);
76620aaf 4734 napi_free_frags(napi);
d565b0a1 4735
289dccbe 4736 kfree_skb_list(napi->gro_list);
d565b0a1 4737 napi->gro_list = NULL;
4ae5544f 4738 napi->gro_count = 0;
d565b0a1
HX
4739}
4740EXPORT_SYMBOL(netif_napi_del);
4741
726ce70e
HX
4742static int napi_poll(struct napi_struct *n, struct list_head *repoll)
4743{
4744 void *have;
4745 int work, weight;
4746
4747 list_del_init(&n->poll_list);
4748
4749 have = netpoll_poll_lock(n);
4750
4751 weight = n->weight;
4752
4753 /* This NAPI_STATE_SCHED test is for avoiding a race
4754 * with netpoll's poll_napi(). Only the entity which
4755 * obtains the lock and sees NAPI_STATE_SCHED set will
4756 * actually make the ->poll() call. Therefore we avoid
4757 * accidentally calling ->poll() when NAPI is not scheduled.
4758 */
4759 work = 0;
4760 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4761 work = n->poll(n, weight);
4762 trace_napi_poll(n);
4763 }
4764
4765 WARN_ON_ONCE(work > weight);
4766
4767 if (likely(work < weight))
4768 goto out_unlock;
4769
4770 /* Drivers must not modify the NAPI state if they
4771 * consume the entire weight. In such cases this code
4772 * still "owns" the NAPI instance and therefore can
4773 * move the instance around on the list at-will.
4774 */
4775 if (unlikely(napi_disable_pending(n))) {
4776 napi_complete(n);
4777 goto out_unlock;
4778 }
4779
4780 if (n->gro_list) {
4781 /* flush too old packets
4782 * If HZ < 1000, flush all packets.
4783 */
4784 napi_gro_flush(n, HZ >= 1000);
4785 }
4786
001ce546
HX
4787 /* Some drivers may have called napi_schedule
4788 * prior to exhausting their budget.
4789 */
4790 if (unlikely(!list_empty(&n->poll_list))) {
4791 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
4792 n->dev ? n->dev->name : "backlog");
4793 goto out_unlock;
4794 }
4795
726ce70e
HX
4796 list_add_tail(&n->poll_list, repoll);
4797
4798out_unlock:
4799 netpoll_poll_unlock(have);
4800
4801 return work;
4802}
4803
1da177e4
LT
4804static void net_rx_action(struct softirq_action *h)
4805{
903ceff7 4806 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
24f8b238 4807 unsigned long time_limit = jiffies + 2;
51b0bded 4808 int budget = netdev_budget;
d75b1ade
ED
4809 LIST_HEAD(list);
4810 LIST_HEAD(repoll);
53fb95d3 4811
1da177e4 4812 local_irq_disable();
d75b1ade
ED
4813 list_splice_init(&sd->poll_list, &list);
4814 local_irq_enable();
1da177e4 4815
ceb8d5bf 4816 for (;;) {
bea3348e 4817 struct napi_struct *n;
1da177e4 4818
ceb8d5bf
HX
4819 if (list_empty(&list)) {
4820 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
4821 return;
4822 break;
4823 }
4824
6bd373eb
HX
4825 n = list_first_entry(&list, struct napi_struct, poll_list);
4826 budget -= napi_poll(n, &repoll);
4827
d75b1ade 4828 /* If softirq window is exhausted then punt.
24f8b238
SH
4829 * Allow this to run for 2 jiffies since which will allow
4830 * an average latency of 1.5/HZ.
bea3348e 4831 */
ceb8d5bf
HX
4832 if (unlikely(budget <= 0 ||
4833 time_after_eq(jiffies, time_limit))) {
4834 sd->time_squeeze++;
4835 break;
4836 }
1da177e4 4837 }
d75b1ade 4838
d75b1ade
ED
4839 local_irq_disable();
4840
4841 list_splice_tail_init(&sd->poll_list, &list);
4842 list_splice_tail(&repoll, &list);
4843 list_splice(&list, &sd->poll_list);
4844 if (!list_empty(&sd->poll_list))
4845 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4846
e326bed2 4847 net_rps_action_and_irq_enable(sd);
1da177e4
LT
4848}
4849
aa9d8560 4850struct netdev_adjacent {
9ff162a8 4851 struct net_device *dev;
5d261913
VF
4852
4853 /* upper master flag, there can only be one master device per list */
9ff162a8 4854 bool master;
5d261913 4855
5d261913
VF
4856 /* counter for the number of times this device was added to us */
4857 u16 ref_nr;
4858
402dae96
VF
4859 /* private field for the users */
4860 void *private;
4861
9ff162a8
JP
4862 struct list_head list;
4863 struct rcu_head rcu;
9ff162a8
JP
4864};
4865
5d261913
VF
4866static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
4867 struct net_device *adj_dev,
2f268f12 4868 struct list_head *adj_list)
9ff162a8 4869{
5d261913 4870 struct netdev_adjacent *adj;
5d261913 4871
2f268f12 4872 list_for_each_entry(adj, adj_list, list) {
5d261913
VF
4873 if (adj->dev == adj_dev)
4874 return adj;
9ff162a8
JP
4875 }
4876 return NULL;
4877}
4878
4879/**
4880 * netdev_has_upper_dev - Check if device is linked to an upper device
4881 * @dev: device
4882 * @upper_dev: upper device to check
4883 *
4884 * Find out if a device is linked to specified upper device and return true
4885 * in case it is. Note that this checks only immediate upper device,
4886 * not through a complete stack of devices. The caller must hold the RTNL lock.
4887 */
4888bool netdev_has_upper_dev(struct net_device *dev,
4889 struct net_device *upper_dev)
4890{
4891 ASSERT_RTNL();
4892
2f268f12 4893 return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
9ff162a8
JP
4894}
4895EXPORT_SYMBOL(netdev_has_upper_dev);
4896
4897/**
4898 * netdev_has_any_upper_dev - Check if device is linked to some device
4899 * @dev: device
4900 *
4901 * Find out if a device is linked to an upper device and return true in case
4902 * it is. The caller must hold the RTNL lock.
4903 */
1d143d9f 4904static bool netdev_has_any_upper_dev(struct net_device *dev)
9ff162a8
JP
4905{
4906 ASSERT_RTNL();
4907
2f268f12 4908 return !list_empty(&dev->all_adj_list.upper);
9ff162a8 4909}
9ff162a8
JP
4910
4911/**
4912 * netdev_master_upper_dev_get - Get master upper device
4913 * @dev: device
4914 *
4915 * Find a master upper device and return pointer to it or NULL in case
4916 * it's not there. The caller must hold the RTNL lock.
4917 */
4918struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4919{
aa9d8560 4920 struct netdev_adjacent *upper;
9ff162a8
JP
4921
4922 ASSERT_RTNL();
4923
2f268f12 4924 if (list_empty(&dev->adj_list.upper))
9ff162a8
JP
4925 return NULL;
4926
2f268f12 4927 upper = list_first_entry(&dev->adj_list.upper,
aa9d8560 4928 struct netdev_adjacent, list);
9ff162a8
JP
4929 if (likely(upper->master))
4930 return upper->dev;
4931 return NULL;
4932}
4933EXPORT_SYMBOL(netdev_master_upper_dev_get);
4934
b6ccba4c
VF
4935void *netdev_adjacent_get_private(struct list_head *adj_list)
4936{
4937 struct netdev_adjacent *adj;
4938
4939 adj = list_entry(adj_list, struct netdev_adjacent, list);
4940
4941 return adj->private;
4942}
4943EXPORT_SYMBOL(netdev_adjacent_get_private);
4944
44a40855
VY
4945/**
4946 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4947 * @dev: device
4948 * @iter: list_head ** of the current position
4949 *
4950 * Gets the next device from the dev's upper list, starting from iter
4951 * position. The caller must hold RCU read lock.
4952 */
4953struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4954 struct list_head **iter)
4955{
4956 struct netdev_adjacent *upper;
4957
4958 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4959
4960 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4961
4962 if (&upper->list == &dev->adj_list.upper)
4963 return NULL;
4964
4965 *iter = &upper->list;
4966
4967 return upper->dev;
4968}
4969EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4970
31088a11
VF
4971/**
4972 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
48311f46
VF
4973 * @dev: device
4974 * @iter: list_head ** of the current position
4975 *
4976 * Gets the next device from the dev's upper list, starting from iter
4977 * position. The caller must hold RCU read lock.
4978 */
2f268f12
VF
4979struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4980 struct list_head **iter)
48311f46
VF
4981{
4982 struct netdev_adjacent *upper;
4983
85328240 4984 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
48311f46
VF
4985
4986 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4987
2f268f12 4988 if (&upper->list == &dev->all_adj_list.upper)
48311f46
VF
4989 return NULL;
4990
4991 *iter = &upper->list;
4992
4993 return upper->dev;
4994}
2f268f12 4995EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
48311f46 4996
31088a11
VF
4997/**
4998 * netdev_lower_get_next_private - Get the next ->private from the
4999 * lower neighbour list
5000 * @dev: device
5001 * @iter: list_head ** of the current position
5002 *
5003 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5004 * list, starting from iter position. The caller must hold either hold the
5005 * RTNL lock or its own locking that guarantees that the neighbour lower
5006 * list will remain unchainged.
5007 */
5008void *netdev_lower_get_next_private(struct net_device *dev,
5009 struct list_head **iter)
5010{
5011 struct netdev_adjacent *lower;
5012
5013 lower = list_entry(*iter, struct netdev_adjacent, list);
5014
5015 if (&lower->list == &dev->adj_list.lower)
5016 return NULL;
5017
6859e7df 5018 *iter = lower->list.next;
31088a11
VF
5019
5020 return lower->private;
5021}
5022EXPORT_SYMBOL(netdev_lower_get_next_private);
5023
5024/**
5025 * netdev_lower_get_next_private_rcu - Get the next ->private from the
5026 * lower neighbour list, RCU
5027 * variant
5028 * @dev: device
5029 * @iter: list_head ** of the current position
5030 *
5031 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5032 * list, starting from iter position. The caller must hold RCU read lock.
5033 */
5034void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5035 struct list_head **iter)
5036{
5037 struct netdev_adjacent *lower;
5038
5039 WARN_ON_ONCE(!rcu_read_lock_held());
5040
5041 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5042
5043 if (&lower->list == &dev->adj_list.lower)
5044 return NULL;
5045
6859e7df 5046 *iter = &lower->list;
31088a11
VF
5047
5048 return lower->private;
5049}
5050EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
5051
4085ebe8
VY
5052/**
5053 * netdev_lower_get_next - Get the next device from the lower neighbour
5054 * list
5055 * @dev: device
5056 * @iter: list_head ** of the current position
5057 *
5058 * Gets the next netdev_adjacent from the dev's lower neighbour
5059 * list, starting from iter position. The caller must hold RTNL lock or
5060 * its own locking that guarantees that the neighbour lower
5061 * list will remain unchainged.
5062 */
5063void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5064{
5065 struct netdev_adjacent *lower;
5066
5067 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
5068
5069 if (&lower->list == &dev->adj_list.lower)
5070 return NULL;
5071
5072 *iter = &lower->list;
5073
5074 return lower->dev;
5075}
5076EXPORT_SYMBOL(netdev_lower_get_next);
5077
e001bfad 5078/**
5079 * netdev_lower_get_first_private_rcu - Get the first ->private from the
5080 * lower neighbour list, RCU
5081 * variant
5082 * @dev: device
5083 *
5084 * Gets the first netdev_adjacent->private from the dev's lower neighbour
5085 * list. The caller must hold RCU read lock.
5086 */
5087void *netdev_lower_get_first_private_rcu(struct net_device *dev)
5088{
5089 struct netdev_adjacent *lower;
5090
5091 lower = list_first_or_null_rcu(&dev->adj_list.lower,
5092 struct netdev_adjacent, list);
5093 if (lower)
5094 return lower->private;
5095 return NULL;
5096}
5097EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
5098
9ff162a8
JP
5099/**
5100 * netdev_master_upper_dev_get_rcu - Get master upper device
5101 * @dev: device
5102 *
5103 * Find a master upper device and return pointer to it or NULL in case
5104 * it's not there. The caller must hold the RCU read lock.
5105 */
5106struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
5107{
aa9d8560 5108 struct netdev_adjacent *upper;
9ff162a8 5109
2f268f12 5110 upper = list_first_or_null_rcu(&dev->adj_list.upper,
aa9d8560 5111 struct netdev_adjacent, list);
9ff162a8
JP
5112 if (upper && likely(upper->master))
5113 return upper->dev;
5114 return NULL;
5115}
5116EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
5117
0a59f3a9 5118static int netdev_adjacent_sysfs_add(struct net_device *dev,
3ee32707
VF
5119 struct net_device *adj_dev,
5120 struct list_head *dev_list)
5121{
5122 char linkname[IFNAMSIZ+7];
5123 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5124 "upper_%s" : "lower_%s", adj_dev->name);
5125 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
5126 linkname);
5127}
0a59f3a9 5128static void netdev_adjacent_sysfs_del(struct net_device *dev,
3ee32707
VF
5129 char *name,
5130 struct list_head *dev_list)
5131{
5132 char linkname[IFNAMSIZ+7];
5133 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5134 "upper_%s" : "lower_%s", name);
5135 sysfs_remove_link(&(dev->dev.kobj), linkname);
5136}
5137
7ce64c79
AF
5138static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
5139 struct net_device *adj_dev,
5140 struct list_head *dev_list)
5141{
5142 return (dev_list == &dev->adj_list.upper ||
5143 dev_list == &dev->adj_list.lower) &&
5144 net_eq(dev_net(dev), dev_net(adj_dev));
5145}
3ee32707 5146
5d261913
VF
5147static int __netdev_adjacent_dev_insert(struct net_device *dev,
5148 struct net_device *adj_dev,
7863c054 5149 struct list_head *dev_list,
402dae96 5150 void *private, bool master)
5d261913
VF
5151{
5152 struct netdev_adjacent *adj;
842d67a7 5153 int ret;
5d261913 5154
7863c054 5155 adj = __netdev_find_adj(dev, adj_dev, dev_list);
5d261913
VF
5156
5157 if (adj) {
5d261913
VF
5158 adj->ref_nr++;
5159 return 0;
5160 }
5161
5162 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
5163 if (!adj)
5164 return -ENOMEM;
5165
5166 adj->dev = adj_dev;
5167 adj->master = master;
5d261913 5168 adj->ref_nr = 1;
402dae96 5169 adj->private = private;
5d261913 5170 dev_hold(adj_dev);
2f268f12
VF
5171
5172 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
5173 adj_dev->name, dev->name, adj_dev->name);
5d261913 5174
7ce64c79 5175 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
3ee32707 5176 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5831d66e
VF
5177 if (ret)
5178 goto free_adj;
5179 }
5180
7863c054 5181 /* Ensure that master link is always the first item in list. */
842d67a7
VF
5182 if (master) {
5183 ret = sysfs_create_link(&(dev->dev.kobj),
5184 &(adj_dev->dev.kobj), "master");
5185 if (ret)
5831d66e 5186 goto remove_symlinks;
842d67a7 5187
7863c054 5188 list_add_rcu(&adj->list, dev_list);
842d67a7 5189 } else {
7863c054 5190 list_add_tail_rcu(&adj->list, dev_list);
842d67a7 5191 }
5d261913
VF
5192
5193 return 0;
842d67a7 5194
5831d66e 5195remove_symlinks:
7ce64c79 5196 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 5197 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
842d67a7
VF
5198free_adj:
5199 kfree(adj);
974daef7 5200 dev_put(adj_dev);
842d67a7
VF
5201
5202 return ret;
5d261913
VF
5203}
5204
1d143d9f 5205static void __netdev_adjacent_dev_remove(struct net_device *dev,
5206 struct net_device *adj_dev,
5207 struct list_head *dev_list)
5d261913
VF
5208{
5209 struct netdev_adjacent *adj;
5210
7863c054 5211 adj = __netdev_find_adj(dev, adj_dev, dev_list);
5d261913 5212
2f268f12
VF
5213 if (!adj) {
5214 pr_err("tried to remove device %s from %s\n",
5215 dev->name, adj_dev->name);
5d261913 5216 BUG();
2f268f12 5217 }
5d261913
VF
5218
5219 if (adj->ref_nr > 1) {
2f268f12
VF
5220 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
5221 adj->ref_nr-1);
5d261913
VF
5222 adj->ref_nr--;
5223 return;
5224 }
5225
842d67a7
VF
5226 if (adj->master)
5227 sysfs_remove_link(&(dev->dev.kobj), "master");
5228
7ce64c79 5229 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 5230 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5831d66e 5231
5d261913 5232 list_del_rcu(&adj->list);
2f268f12
VF
5233 pr_debug("dev_put for %s, because link removed from %s to %s\n",
5234 adj_dev->name, dev->name, adj_dev->name);
5d261913
VF
5235 dev_put(adj_dev);
5236 kfree_rcu(adj, rcu);
5237}
5238
1d143d9f 5239static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
5240 struct net_device *upper_dev,
5241 struct list_head *up_list,
5242 struct list_head *down_list,
5243 void *private, bool master)
5d261913
VF
5244{
5245 int ret;
5246
402dae96
VF
5247 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
5248 master);
5d261913
VF
5249 if (ret)
5250 return ret;
5251
402dae96
VF
5252 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
5253 false);
5d261913 5254 if (ret) {
2f268f12 5255 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5d261913
VF
5256 return ret;
5257 }
5258
5259 return 0;
5260}
5261
1d143d9f 5262static int __netdev_adjacent_dev_link(struct net_device *dev,
5263 struct net_device *upper_dev)
5d261913 5264{
2f268f12
VF
5265 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
5266 &dev->all_adj_list.upper,
5267 &upper_dev->all_adj_list.lower,
402dae96 5268 NULL, false);
5d261913
VF
5269}
5270
1d143d9f 5271static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5272 struct net_device *upper_dev,
5273 struct list_head *up_list,
5274 struct list_head *down_list)
5d261913 5275{
2f268f12
VF
5276 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5277 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
5d261913
VF
5278}
5279
1d143d9f 5280static void __netdev_adjacent_dev_unlink(struct net_device *dev,
5281 struct net_device *upper_dev)
5d261913 5282{
2f268f12
VF
5283 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5284 &dev->all_adj_list.upper,
5285 &upper_dev->all_adj_list.lower);
5286}
5287
1d143d9f 5288static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5289 struct net_device *upper_dev,
5290 void *private, bool master)
2f268f12
VF
5291{
5292 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
5293
5294 if (ret)
5295 return ret;
5296
5297 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
5298 &dev->adj_list.upper,
5299 &upper_dev->adj_list.lower,
402dae96 5300 private, master);
2f268f12
VF
5301 if (ret) {
5302 __netdev_adjacent_dev_unlink(dev, upper_dev);
5303 return ret;
5304 }
5305
5306 return 0;
5d261913
VF
5307}
5308
1d143d9f 5309static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5310 struct net_device *upper_dev)
2f268f12
VF
5311{
5312 __netdev_adjacent_dev_unlink(dev, upper_dev);
5313 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5314 &dev->adj_list.upper,
5315 &upper_dev->adj_list.lower);
5316}
5d261913 5317
9ff162a8 5318static int __netdev_upper_dev_link(struct net_device *dev,
402dae96
VF
5319 struct net_device *upper_dev, bool master,
5320 void *private)
9ff162a8 5321{
5d261913
VF
5322 struct netdev_adjacent *i, *j, *to_i, *to_j;
5323 int ret = 0;
9ff162a8
JP
5324
5325 ASSERT_RTNL();
5326
5327 if (dev == upper_dev)
5328 return -EBUSY;
5329
5330 /* To prevent loops, check if dev is not upper device to upper_dev. */
2f268f12 5331 if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
9ff162a8
JP
5332 return -EBUSY;
5333
d66bf7dd 5334 if (__netdev_find_adj(dev, upper_dev, &dev->adj_list.upper))
9ff162a8
JP
5335 return -EEXIST;
5336
5337 if (master && netdev_master_upper_dev_get(dev))
5338 return -EBUSY;
5339
402dae96
VF
5340 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
5341 master);
5d261913
VF
5342 if (ret)
5343 return ret;
9ff162a8 5344
5d261913 5345 /* Now that we linked these devs, make all the upper_dev's
2f268f12 5346 * all_adj_list.upper visible to every dev's all_adj_list.lower an
5d261913
VF
5347 * versa, and don't forget the devices itself. All of these
5348 * links are non-neighbours.
5349 */
2f268f12
VF
5350 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5351 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5352 pr_debug("Interlinking %s with %s, non-neighbour\n",
5353 i->dev->name, j->dev->name);
5d261913
VF
5354 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
5355 if (ret)
5356 goto rollback_mesh;
5357 }
5358 }
5359
5360 /* add dev to every upper_dev's upper device */
2f268f12
VF
5361 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5362 pr_debug("linking %s's upper device %s with %s\n",
5363 upper_dev->name, i->dev->name, dev->name);
5d261913
VF
5364 ret = __netdev_adjacent_dev_link(dev, i->dev);
5365 if (ret)
5366 goto rollback_upper_mesh;
5367 }
5368
5369 /* add upper_dev to every dev's lower device */
2f268f12
VF
5370 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5371 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5372 i->dev->name, upper_dev->name);
5d261913
VF
5373 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5374 if (ret)
5375 goto rollback_lower_mesh;
5376 }
9ff162a8 5377
42e52bf9 5378 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
9ff162a8 5379 return 0;
5d261913
VF
5380
5381rollback_lower_mesh:
5382 to_i = i;
2f268f12 5383 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5d261913
VF
5384 if (i == to_i)
5385 break;
5386 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5387 }
5388
5389 i = NULL;
5390
5391rollback_upper_mesh:
5392 to_i = i;
2f268f12 5393 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5d261913
VF
5394 if (i == to_i)
5395 break;
5396 __netdev_adjacent_dev_unlink(dev, i->dev);
5397 }
5398
5399 i = j = NULL;
5400
5401rollback_mesh:
5402 to_i = i;
5403 to_j = j;
2f268f12
VF
5404 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5405 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5d261913
VF
5406 if (i == to_i && j == to_j)
5407 break;
5408 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5409 }
5410 if (i == to_i)
5411 break;
5412 }
5413
2f268f12 5414 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
5415
5416 return ret;
9ff162a8
JP
5417}
5418
5419/**
5420 * netdev_upper_dev_link - Add a link to the upper device
5421 * @dev: device
5422 * @upper_dev: new upper device
5423 *
5424 * Adds a link to device which is upper to this one. The caller must hold
5425 * the RTNL lock. On a failure a negative errno code is returned.
5426 * On success the reference counts are adjusted and the function
5427 * returns zero.
5428 */
5429int netdev_upper_dev_link(struct net_device *dev,
5430 struct net_device *upper_dev)
5431{
402dae96 5432 return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
9ff162a8
JP
5433}
5434EXPORT_SYMBOL(netdev_upper_dev_link);
5435
5436/**
5437 * netdev_master_upper_dev_link - Add a master link to the upper device
5438 * @dev: device
5439 * @upper_dev: new upper device
5440 *
5441 * Adds a link to device which is upper to this one. In this case, only
5442 * one master upper device can be linked, although other non-master devices
5443 * might be linked as well. The caller must hold the RTNL lock.
5444 * On a failure a negative errno code is returned. On success the reference
5445 * counts are adjusted and the function returns zero.
5446 */
5447int netdev_master_upper_dev_link(struct net_device *dev,
5448 struct net_device *upper_dev)
5449{
402dae96 5450 return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
9ff162a8
JP
5451}
5452EXPORT_SYMBOL(netdev_master_upper_dev_link);
5453
402dae96
VF
5454int netdev_master_upper_dev_link_private(struct net_device *dev,
5455 struct net_device *upper_dev,
5456 void *private)
5457{
5458 return __netdev_upper_dev_link(dev, upper_dev, true, private);
5459}
5460EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
5461
9ff162a8
JP
5462/**
5463 * netdev_upper_dev_unlink - Removes a link to upper device
5464 * @dev: device
5465 * @upper_dev: new upper device
5466 *
5467 * Removes a link to device which is upper to this one. The caller must hold
5468 * the RTNL lock.
5469 */
5470void netdev_upper_dev_unlink(struct net_device *dev,
5471 struct net_device *upper_dev)
5472{
5d261913 5473 struct netdev_adjacent *i, *j;
9ff162a8
JP
5474 ASSERT_RTNL();
5475
2f268f12 5476 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
5477
5478 /* Here is the tricky part. We must remove all dev's lower
5479 * devices from all upper_dev's upper devices and vice
5480 * versa, to maintain the graph relationship.
5481 */
2f268f12
VF
5482 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5483 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
5d261913
VF
5484 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5485
5486 /* remove also the devices itself from lower/upper device
5487 * list
5488 */
2f268f12 5489 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5d261913
VF
5490 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5491
2f268f12 5492 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
5d261913
VF
5493 __netdev_adjacent_dev_unlink(dev, i->dev);
5494
42e52bf9 5495 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
9ff162a8
JP
5496}
5497EXPORT_SYMBOL(netdev_upper_dev_unlink);
5498
61bd3857
MS
5499/**
5500 * netdev_bonding_info_change - Dispatch event about slave change
5501 * @dev: device
4a26e453 5502 * @bonding_info: info to dispatch
61bd3857
MS
5503 *
5504 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
5505 * The caller must hold the RTNL lock.
5506 */
5507void netdev_bonding_info_change(struct net_device *dev,
5508 struct netdev_bonding_info *bonding_info)
5509{
5510 struct netdev_notifier_bonding_info info;
5511
5512 memcpy(&info.bonding_info, bonding_info,
5513 sizeof(struct netdev_bonding_info));
5514 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
5515 &info.info);
5516}
5517EXPORT_SYMBOL(netdev_bonding_info_change);
5518
2ce1ee17 5519static void netdev_adjacent_add_links(struct net_device *dev)
4c75431a
AF
5520{
5521 struct netdev_adjacent *iter;
5522
5523 struct net *net = dev_net(dev);
5524
5525 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5526 if (!net_eq(net,dev_net(iter->dev)))
5527 continue;
5528 netdev_adjacent_sysfs_add(iter->dev, dev,
5529 &iter->dev->adj_list.lower);
5530 netdev_adjacent_sysfs_add(dev, iter->dev,
5531 &dev->adj_list.upper);
5532 }
5533
5534 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5535 if (!net_eq(net,dev_net(iter->dev)))
5536 continue;
5537 netdev_adjacent_sysfs_add(iter->dev, dev,
5538 &iter->dev->adj_list.upper);
5539 netdev_adjacent_sysfs_add(dev, iter->dev,
5540 &dev->adj_list.lower);
5541 }
5542}
5543
2ce1ee17 5544static void netdev_adjacent_del_links(struct net_device *dev)
4c75431a
AF
5545{
5546 struct netdev_adjacent *iter;
5547
5548 struct net *net = dev_net(dev);
5549
5550 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5551 if (!net_eq(net,dev_net(iter->dev)))
5552 continue;
5553 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5554 &iter->dev->adj_list.lower);
5555 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5556 &dev->adj_list.upper);
5557 }
5558
5559 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5560 if (!net_eq(net,dev_net(iter->dev)))
5561 continue;
5562 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5563 &iter->dev->adj_list.upper);
5564 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5565 &dev->adj_list.lower);
5566 }
5567}
5568
5bb025fa 5569void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
402dae96 5570{
5bb025fa 5571 struct netdev_adjacent *iter;
402dae96 5572
4c75431a
AF
5573 struct net *net = dev_net(dev);
5574
5bb025fa 5575 list_for_each_entry(iter, &dev->adj_list.upper, list) {
4c75431a
AF
5576 if (!net_eq(net,dev_net(iter->dev)))
5577 continue;
5bb025fa
VF
5578 netdev_adjacent_sysfs_del(iter->dev, oldname,
5579 &iter->dev->adj_list.lower);
5580 netdev_adjacent_sysfs_add(iter->dev, dev,
5581 &iter->dev->adj_list.lower);
5582 }
402dae96 5583
5bb025fa 5584 list_for_each_entry(iter, &dev->adj_list.lower, list) {
4c75431a
AF
5585 if (!net_eq(net,dev_net(iter->dev)))
5586 continue;
5bb025fa
VF
5587 netdev_adjacent_sysfs_del(iter->dev, oldname,
5588 &iter->dev->adj_list.upper);
5589 netdev_adjacent_sysfs_add(iter->dev, dev,
5590 &iter->dev->adj_list.upper);
5591 }
402dae96 5592}
402dae96
VF
5593
5594void *netdev_lower_dev_get_private(struct net_device *dev,
5595 struct net_device *lower_dev)
5596{
5597 struct netdev_adjacent *lower;
5598
5599 if (!lower_dev)
5600 return NULL;
5601 lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
5602 if (!lower)
5603 return NULL;
5604
5605 return lower->private;
5606}
5607EXPORT_SYMBOL(netdev_lower_dev_get_private);
5608
4085ebe8
VY
5609
5610int dev_get_nest_level(struct net_device *dev,
5611 bool (*type_check)(struct net_device *dev))
5612{
5613 struct net_device *lower = NULL;
5614 struct list_head *iter;
5615 int max_nest = -1;
5616 int nest;
5617
5618 ASSERT_RTNL();
5619
5620 netdev_for_each_lower_dev(dev, lower, iter) {
5621 nest = dev_get_nest_level(lower, type_check);
5622 if (max_nest < nest)
5623 max_nest = nest;
5624 }
5625
5626 if (type_check(dev))
5627 max_nest++;
5628
5629 return max_nest;
5630}
5631EXPORT_SYMBOL(dev_get_nest_level);
5632
b6c40d68
PM
5633static void dev_change_rx_flags(struct net_device *dev, int flags)
5634{
d314774c
SH
5635 const struct net_device_ops *ops = dev->netdev_ops;
5636
d2615bf4 5637 if (ops->ndo_change_rx_flags)
d314774c 5638 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
5639}
5640
991fb3f7 5641static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
1da177e4 5642{
b536db93 5643 unsigned int old_flags = dev->flags;
d04a48b0
EB
5644 kuid_t uid;
5645 kgid_t gid;
1da177e4 5646
24023451
PM
5647 ASSERT_RTNL();
5648
dad9b335
WC
5649 dev->flags |= IFF_PROMISC;
5650 dev->promiscuity += inc;
5651 if (dev->promiscuity == 0) {
5652 /*
5653 * Avoid overflow.
5654 * If inc causes overflow, untouch promisc and return error.
5655 */
5656 if (inc < 0)
5657 dev->flags &= ~IFF_PROMISC;
5658 else {
5659 dev->promiscuity -= inc;
7b6cd1ce
JP
5660 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5661 dev->name);
dad9b335
WC
5662 return -EOVERFLOW;
5663 }
5664 }
52609c0b 5665 if (dev->flags != old_flags) {
7b6cd1ce
JP
5666 pr_info("device %s %s promiscuous mode\n",
5667 dev->name,
5668 dev->flags & IFF_PROMISC ? "entered" : "left");
8192b0c4
DH
5669 if (audit_enabled) {
5670 current_uid_gid(&uid, &gid);
7759db82
KHK
5671 audit_log(current->audit_context, GFP_ATOMIC,
5672 AUDIT_ANOM_PROMISCUOUS,
5673 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5674 dev->name, (dev->flags & IFF_PROMISC),
5675 (old_flags & IFF_PROMISC),
e1760bd5 5676 from_kuid(&init_user_ns, audit_get_loginuid(current)),
d04a48b0
EB
5677 from_kuid(&init_user_ns, uid),
5678 from_kgid(&init_user_ns, gid),
7759db82 5679 audit_get_sessionid(current));
8192b0c4 5680 }
24023451 5681
b6c40d68 5682 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 5683 }
991fb3f7
ND
5684 if (notify)
5685 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
dad9b335 5686 return 0;
1da177e4
LT
5687}
5688
4417da66
PM
5689/**
5690 * dev_set_promiscuity - update promiscuity count on a device
5691 * @dev: device
5692 * @inc: modifier
5693 *
5694 * Add or remove promiscuity from a device. While the count in the device
5695 * remains above zero the interface remains promiscuous. Once it hits zero
5696 * the device reverts back to normal filtering operation. A negative inc
5697 * value is used to drop promiscuity on the device.
dad9b335 5698 * Return 0 if successful or a negative errno code on error.
4417da66 5699 */
dad9b335 5700int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66 5701{
b536db93 5702 unsigned int old_flags = dev->flags;
dad9b335 5703 int err;
4417da66 5704
991fb3f7 5705 err = __dev_set_promiscuity(dev, inc, true);
4b5a698e 5706 if (err < 0)
dad9b335 5707 return err;
4417da66
PM
5708 if (dev->flags != old_flags)
5709 dev_set_rx_mode(dev);
dad9b335 5710 return err;
4417da66 5711}
d1b19dff 5712EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 5713
991fb3f7 5714static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
1da177e4 5715{
991fb3f7 5716 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
1da177e4 5717
24023451
PM
5718 ASSERT_RTNL();
5719
1da177e4 5720 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
5721 dev->allmulti += inc;
5722 if (dev->allmulti == 0) {
5723 /*
5724 * Avoid overflow.
5725 * If inc causes overflow, untouch allmulti and return error.
5726 */
5727 if (inc < 0)
5728 dev->flags &= ~IFF_ALLMULTI;
5729 else {
5730 dev->allmulti -= inc;
7b6cd1ce
JP
5731 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5732 dev->name);
dad9b335
WC
5733 return -EOVERFLOW;
5734 }
5735 }
24023451 5736 if (dev->flags ^ old_flags) {
b6c40d68 5737 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 5738 dev_set_rx_mode(dev);
991fb3f7
ND
5739 if (notify)
5740 __dev_notify_flags(dev, old_flags,
5741 dev->gflags ^ old_gflags);
24023451 5742 }
dad9b335 5743 return 0;
4417da66 5744}
991fb3f7
ND
5745
5746/**
5747 * dev_set_allmulti - update allmulti count on a device
5748 * @dev: device
5749 * @inc: modifier
5750 *
5751 * Add or remove reception of all multicast frames to a device. While the
5752 * count in the device remains above zero the interface remains listening
5753 * to all interfaces. Once it hits zero the device reverts back to normal
5754 * filtering operation. A negative @inc value is used to drop the counter
5755 * when releasing a resource needing all multicasts.
5756 * Return 0 if successful or a negative errno code on error.
5757 */
5758
5759int dev_set_allmulti(struct net_device *dev, int inc)
5760{
5761 return __dev_set_allmulti(dev, inc, true);
5762}
d1b19dff 5763EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
5764
5765/*
5766 * Upload unicast and multicast address lists to device and
5767 * configure RX filtering. When the device doesn't support unicast
53ccaae1 5768 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
5769 * are present.
5770 */
5771void __dev_set_rx_mode(struct net_device *dev)
5772{
d314774c
SH
5773 const struct net_device_ops *ops = dev->netdev_ops;
5774
4417da66
PM
5775 /* dev_open will call this function so the list will stay sane. */
5776 if (!(dev->flags&IFF_UP))
5777 return;
5778
5779 if (!netif_device_present(dev))
40b77c94 5780 return;
4417da66 5781
01789349 5782 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4417da66
PM
5783 /* Unicast addresses changes may only happen under the rtnl,
5784 * therefore calling __dev_set_promiscuity here is safe.
5785 */
32e7bfc4 5786 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
991fb3f7 5787 __dev_set_promiscuity(dev, 1, false);
2d348d1f 5788 dev->uc_promisc = true;
32e7bfc4 5789 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
991fb3f7 5790 __dev_set_promiscuity(dev, -1, false);
2d348d1f 5791 dev->uc_promisc = false;
4417da66 5792 }
4417da66 5793 }
01789349
JP
5794
5795 if (ops->ndo_set_rx_mode)
5796 ops->ndo_set_rx_mode(dev);
4417da66
PM
5797}
5798
5799void dev_set_rx_mode(struct net_device *dev)
5800{
b9e40857 5801 netif_addr_lock_bh(dev);
4417da66 5802 __dev_set_rx_mode(dev);
b9e40857 5803 netif_addr_unlock_bh(dev);
1da177e4
LT
5804}
5805
f0db275a
SH
5806/**
5807 * dev_get_flags - get flags reported to userspace
5808 * @dev: device
5809 *
5810 * Get the combination of flag bits exported through APIs to userspace.
5811 */
95c96174 5812unsigned int dev_get_flags(const struct net_device *dev)
1da177e4 5813{
95c96174 5814 unsigned int flags;
1da177e4
LT
5815
5816 flags = (dev->flags & ~(IFF_PROMISC |
5817 IFF_ALLMULTI |
b00055aa
SR
5818 IFF_RUNNING |
5819 IFF_LOWER_UP |
5820 IFF_DORMANT)) |
1da177e4
LT
5821 (dev->gflags & (IFF_PROMISC |
5822 IFF_ALLMULTI));
5823
b00055aa
SR
5824 if (netif_running(dev)) {
5825 if (netif_oper_up(dev))
5826 flags |= IFF_RUNNING;
5827 if (netif_carrier_ok(dev))
5828 flags |= IFF_LOWER_UP;
5829 if (netif_dormant(dev))
5830 flags |= IFF_DORMANT;
5831 }
1da177e4
LT
5832
5833 return flags;
5834}
d1b19dff 5835EXPORT_SYMBOL(dev_get_flags);
1da177e4 5836
bd380811 5837int __dev_change_flags(struct net_device *dev, unsigned int flags)
1da177e4 5838{
b536db93 5839 unsigned int old_flags = dev->flags;
bd380811 5840 int ret;
1da177e4 5841
24023451
PM
5842 ASSERT_RTNL();
5843
1da177e4
LT
5844 /*
5845 * Set the flags on our device.
5846 */
5847
5848 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5849 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5850 IFF_AUTOMEDIA)) |
5851 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5852 IFF_ALLMULTI));
5853
5854 /*
5855 * Load in the correct multicast list now the flags have changed.
5856 */
5857
b6c40d68
PM
5858 if ((old_flags ^ flags) & IFF_MULTICAST)
5859 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 5860
4417da66 5861 dev_set_rx_mode(dev);
1da177e4
LT
5862
5863 /*
5864 * Have we downed the interface. We handle IFF_UP ourselves
5865 * according to user attempts to set it, rather than blindly
5866 * setting it.
5867 */
5868
5869 ret = 0;
d215d10f 5870 if ((old_flags ^ flags) & IFF_UP)
bd380811 5871 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
1da177e4 5872
1da177e4 5873 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff 5874 int inc = (flags & IFF_PROMISC) ? 1 : -1;
991fb3f7 5875 unsigned int old_flags = dev->flags;
d1b19dff 5876
1da177e4 5877 dev->gflags ^= IFF_PROMISC;
991fb3f7
ND
5878
5879 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5880 if (dev->flags != old_flags)
5881 dev_set_rx_mode(dev);
1da177e4
LT
5882 }
5883
5884 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5885 is important. Some (broken) drivers set IFF_PROMISC, when
5886 IFF_ALLMULTI is requested not asking us and not reporting.
5887 */
5888 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
5889 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5890
1da177e4 5891 dev->gflags ^= IFF_ALLMULTI;
991fb3f7 5892 __dev_set_allmulti(dev, inc, false);
1da177e4
LT
5893 }
5894
bd380811
PM
5895 return ret;
5896}
5897
a528c219
ND
5898void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5899 unsigned int gchanges)
bd380811
PM
5900{
5901 unsigned int changes = dev->flags ^ old_flags;
5902
a528c219 5903 if (gchanges)
7f294054 5904 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
a528c219 5905
bd380811
PM
5906 if (changes & IFF_UP) {
5907 if (dev->flags & IFF_UP)
5908 call_netdevice_notifiers(NETDEV_UP, dev);
5909 else
5910 call_netdevice_notifiers(NETDEV_DOWN, dev);
5911 }
5912
5913 if (dev->flags & IFF_UP &&
be9efd36
JP
5914 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5915 struct netdev_notifier_change_info change_info;
5916
5917 change_info.flags_changed = changes;
5918 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5919 &change_info.info);
5920 }
bd380811
PM
5921}
5922
5923/**
5924 * dev_change_flags - change device settings
5925 * @dev: device
5926 * @flags: device state flags
5927 *
5928 * Change settings on device based state flags. The flags are
5929 * in the userspace exported format.
5930 */
b536db93 5931int dev_change_flags(struct net_device *dev, unsigned int flags)
bd380811 5932{
b536db93 5933 int ret;
991fb3f7 5934 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
bd380811
PM
5935
5936 ret = __dev_change_flags(dev, flags);
5937 if (ret < 0)
5938 return ret;
5939
991fb3f7 5940 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
a528c219 5941 __dev_notify_flags(dev, old_flags, changes);
1da177e4
LT
5942 return ret;
5943}
d1b19dff 5944EXPORT_SYMBOL(dev_change_flags);
1da177e4 5945
2315dc91
VF
5946static int __dev_set_mtu(struct net_device *dev, int new_mtu)
5947{
5948 const struct net_device_ops *ops = dev->netdev_ops;
5949
5950 if (ops->ndo_change_mtu)
5951 return ops->ndo_change_mtu(dev, new_mtu);
5952
5953 dev->mtu = new_mtu;
5954 return 0;
5955}
5956
f0db275a
SH
5957/**
5958 * dev_set_mtu - Change maximum transfer unit
5959 * @dev: device
5960 * @new_mtu: new transfer unit
5961 *
5962 * Change the maximum transfer size of the network device.
5963 */
1da177e4
LT
5964int dev_set_mtu(struct net_device *dev, int new_mtu)
5965{
2315dc91 5966 int err, orig_mtu;
1da177e4
LT
5967
5968 if (new_mtu == dev->mtu)
5969 return 0;
5970
5971 /* MTU must be positive. */
5972 if (new_mtu < 0)
5973 return -EINVAL;
5974
5975 if (!netif_device_present(dev))
5976 return -ENODEV;
5977
1d486bfb
VF
5978 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
5979 err = notifier_to_errno(err);
5980 if (err)
5981 return err;
d314774c 5982
2315dc91
VF
5983 orig_mtu = dev->mtu;
5984 err = __dev_set_mtu(dev, new_mtu);
d314774c 5985
2315dc91
VF
5986 if (!err) {
5987 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5988 err = notifier_to_errno(err);
5989 if (err) {
5990 /* setting mtu back and notifying everyone again,
5991 * so that they have a chance to revert changes.
5992 */
5993 __dev_set_mtu(dev, orig_mtu);
5994 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5995 }
5996 }
1da177e4
LT
5997 return err;
5998}
d1b19dff 5999EXPORT_SYMBOL(dev_set_mtu);
1da177e4 6000
cbda10fa
VD
6001/**
6002 * dev_set_group - Change group this device belongs to
6003 * @dev: device
6004 * @new_group: group this device should belong to
6005 */
6006void dev_set_group(struct net_device *dev, int new_group)
6007{
6008 dev->group = new_group;
6009}
6010EXPORT_SYMBOL(dev_set_group);
6011
f0db275a
SH
6012/**
6013 * dev_set_mac_address - Change Media Access Control Address
6014 * @dev: device
6015 * @sa: new address
6016 *
6017 * Change the hardware (MAC) address of the device
6018 */
1da177e4
LT
6019int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
6020{
d314774c 6021 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
6022 int err;
6023
d314774c 6024 if (!ops->ndo_set_mac_address)
1da177e4
LT
6025 return -EOPNOTSUPP;
6026 if (sa->sa_family != dev->type)
6027 return -EINVAL;
6028 if (!netif_device_present(dev))
6029 return -ENODEV;
d314774c 6030 err = ops->ndo_set_mac_address(dev, sa);
f6521516
JP
6031 if (err)
6032 return err;
fbdeca2d 6033 dev->addr_assign_type = NET_ADDR_SET;
f6521516 6034 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
7bf23575 6035 add_device_randomness(dev->dev_addr, dev->addr_len);
f6521516 6036 return 0;
1da177e4 6037}
d1b19dff 6038EXPORT_SYMBOL(dev_set_mac_address);
1da177e4 6039
4bf84c35
JP
6040/**
6041 * dev_change_carrier - Change device carrier
6042 * @dev: device
691b3b7e 6043 * @new_carrier: new value
4bf84c35
JP
6044 *
6045 * Change device carrier
6046 */
6047int dev_change_carrier(struct net_device *dev, bool new_carrier)
6048{
6049 const struct net_device_ops *ops = dev->netdev_ops;
6050
6051 if (!ops->ndo_change_carrier)
6052 return -EOPNOTSUPP;
6053 if (!netif_device_present(dev))
6054 return -ENODEV;
6055 return ops->ndo_change_carrier(dev, new_carrier);
6056}
6057EXPORT_SYMBOL(dev_change_carrier);
6058
66b52b0d
JP
6059/**
6060 * dev_get_phys_port_id - Get device physical port ID
6061 * @dev: device
6062 * @ppid: port ID
6063 *
6064 * Get device physical port ID
6065 */
6066int dev_get_phys_port_id(struct net_device *dev,
02637fce 6067 struct netdev_phys_item_id *ppid)
66b52b0d
JP
6068{
6069 const struct net_device_ops *ops = dev->netdev_ops;
6070
6071 if (!ops->ndo_get_phys_port_id)
6072 return -EOPNOTSUPP;
6073 return ops->ndo_get_phys_port_id(dev, ppid);
6074}
6075EXPORT_SYMBOL(dev_get_phys_port_id);
6076
db24a904
DA
6077/**
6078 * dev_get_phys_port_name - Get device physical port name
6079 * @dev: device
6080 * @name: port name
6081 *
6082 * Get device physical port name
6083 */
6084int dev_get_phys_port_name(struct net_device *dev,
6085 char *name, size_t len)
6086{
6087 const struct net_device_ops *ops = dev->netdev_ops;
6088
6089 if (!ops->ndo_get_phys_port_name)
6090 return -EOPNOTSUPP;
6091 return ops->ndo_get_phys_port_name(dev, name, len);
6092}
6093EXPORT_SYMBOL(dev_get_phys_port_name);
6094
1da177e4
LT
6095/**
6096 * dev_new_index - allocate an ifindex
c4ea43c5 6097 * @net: the applicable net namespace
1da177e4
LT
6098 *
6099 * Returns a suitable unique value for a new device interface
6100 * number. The caller must hold the rtnl semaphore or the
6101 * dev_base_lock to be sure it remains unique.
6102 */
881d966b 6103static int dev_new_index(struct net *net)
1da177e4 6104{
aa79e66e 6105 int ifindex = net->ifindex;
1da177e4
LT
6106 for (;;) {
6107 if (++ifindex <= 0)
6108 ifindex = 1;
881d966b 6109 if (!__dev_get_by_index(net, ifindex))
aa79e66e 6110 return net->ifindex = ifindex;
1da177e4
LT
6111 }
6112}
6113
1da177e4 6114/* Delayed registration/unregisteration */
3b5b34fd 6115static LIST_HEAD(net_todo_list);
200b916f 6116DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
1da177e4 6117
6f05f629 6118static void net_set_todo(struct net_device *dev)
1da177e4 6119{
1da177e4 6120 list_add_tail(&dev->todo_list, &net_todo_list);
50624c93 6121 dev_net(dev)->dev_unreg_count++;
1da177e4
LT
6122}
6123
9b5e383c 6124static void rollback_registered_many(struct list_head *head)
93ee31f1 6125{
e93737b0 6126 struct net_device *dev, *tmp;
5cde2829 6127 LIST_HEAD(close_head);
9b5e383c 6128
93ee31f1
DL
6129 BUG_ON(dev_boot_phase);
6130 ASSERT_RTNL();
6131
e93737b0 6132 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 6133 /* Some devices call without registering
e93737b0
KK
6134 * for initialization unwind. Remove those
6135 * devices and proceed with the remaining.
9b5e383c
ED
6136 */
6137 if (dev->reg_state == NETREG_UNINITIALIZED) {
7b6cd1ce
JP
6138 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
6139 dev->name, dev);
93ee31f1 6140
9b5e383c 6141 WARN_ON(1);
e93737b0
KK
6142 list_del(&dev->unreg_list);
6143 continue;
9b5e383c 6144 }
449f4544 6145 dev->dismantle = true;
9b5e383c 6146 BUG_ON(dev->reg_state != NETREG_REGISTERED);
44345724 6147 }
93ee31f1 6148
44345724 6149 /* If device is running, close it first. */
5cde2829
EB
6150 list_for_each_entry(dev, head, unreg_list)
6151 list_add_tail(&dev->close_list, &close_head);
99c4a26a 6152 dev_close_many(&close_head, true);
93ee31f1 6153
44345724 6154 list_for_each_entry(dev, head, unreg_list) {
9b5e383c
ED
6155 /* And unlink it from device chain. */
6156 unlist_netdevice(dev);
93ee31f1 6157
9b5e383c
ED
6158 dev->reg_state = NETREG_UNREGISTERING;
6159 }
93ee31f1
DL
6160
6161 synchronize_net();
6162
9b5e383c 6163 list_for_each_entry(dev, head, unreg_list) {
395eea6c
MB
6164 struct sk_buff *skb = NULL;
6165
9b5e383c
ED
6166 /* Shutdown queueing discipline. */
6167 dev_shutdown(dev);
93ee31f1
DL
6168
6169
9b5e383c
ED
6170 /* Notify protocols, that we are about to destroy
6171 this device. They should clean all the things.
6172 */
6173 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 6174
395eea6c
MB
6175 if (!dev->rtnl_link_ops ||
6176 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6177 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
6178 GFP_KERNEL);
6179
9b5e383c
ED
6180 /*
6181 * Flush the unicast and multicast chains
6182 */
a748ee24 6183 dev_uc_flush(dev);
22bedad3 6184 dev_mc_flush(dev);
93ee31f1 6185
9b5e383c
ED
6186 if (dev->netdev_ops->ndo_uninit)
6187 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 6188
395eea6c
MB
6189 if (skb)
6190 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
56bfa7ee 6191
9ff162a8
JP
6192 /* Notifier chain MUST detach us all upper devices. */
6193 WARN_ON(netdev_has_any_upper_dev(dev));
93ee31f1 6194
9b5e383c
ED
6195 /* Remove entries from kobject tree */
6196 netdev_unregister_kobject(dev);
024e9679
AD
6197#ifdef CONFIG_XPS
6198 /* Remove XPS queueing entries */
6199 netif_reset_xps_queues_gt(dev, 0);
6200#endif
9b5e383c 6201 }
93ee31f1 6202
850a545b 6203 synchronize_net();
395264d5 6204
a5ee1551 6205 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
6206 dev_put(dev);
6207}
6208
6209static void rollback_registered(struct net_device *dev)
6210{
6211 LIST_HEAD(single);
6212
6213 list_add(&dev->unreg_list, &single);
6214 rollback_registered_many(&single);
ceaaec98 6215 list_del(&single);
93ee31f1
DL
6216}
6217
c8f44aff
MM
6218static netdev_features_t netdev_fix_features(struct net_device *dev,
6219 netdev_features_t features)
b63365a2 6220{
57422dc5
MM
6221 /* Fix illegal checksum combinations */
6222 if ((features & NETIF_F_HW_CSUM) &&
6223 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 6224 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
57422dc5
MM
6225 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
6226 }
6227
b63365a2 6228 /* TSO requires that SG is present as well. */
ea2d3688 6229 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6f404e44 6230 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
ea2d3688 6231 features &= ~NETIF_F_ALL_TSO;
b63365a2
HX
6232 }
6233
ec5f0615
PS
6234 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
6235 !(features & NETIF_F_IP_CSUM)) {
6236 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
6237 features &= ~NETIF_F_TSO;
6238 features &= ~NETIF_F_TSO_ECN;
6239 }
6240
6241 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
6242 !(features & NETIF_F_IPV6_CSUM)) {
6243 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
6244 features &= ~NETIF_F_TSO6;
6245 }
6246
31d8b9e0
BH
6247 /* TSO ECN requires that TSO is present as well. */
6248 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
6249 features &= ~NETIF_F_TSO_ECN;
6250
212b573f
MM
6251 /* Software GSO depends on SG. */
6252 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6f404e44 6253 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
212b573f
MM
6254 features &= ~NETIF_F_GSO;
6255 }
6256
acd1130e 6257 /* UFO needs SG and checksumming */
b63365a2 6258 if (features & NETIF_F_UFO) {
79032644
MM
6259 /* maybe split UFO into V4 and V6? */
6260 if (!((features & NETIF_F_GEN_CSUM) ||
6261 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
6262 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 6263 netdev_dbg(dev,
acd1130e 6264 "Dropping NETIF_F_UFO since no checksum offload features.\n");
b63365a2
HX
6265 features &= ~NETIF_F_UFO;
6266 }
6267
6268 if (!(features & NETIF_F_SG)) {
6f404e44 6269 netdev_dbg(dev,
acd1130e 6270 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
b63365a2
HX
6271 features &= ~NETIF_F_UFO;
6272 }
6273 }
6274
d0290214
JP
6275#ifdef CONFIG_NET_RX_BUSY_POLL
6276 if (dev->netdev_ops->ndo_busy_poll)
6277 features |= NETIF_F_BUSY_POLL;
6278 else
6279#endif
6280 features &= ~NETIF_F_BUSY_POLL;
6281
b63365a2
HX
6282 return features;
6283}
b63365a2 6284
6cb6a27c 6285int __netdev_update_features(struct net_device *dev)
5455c699 6286{
c8f44aff 6287 netdev_features_t features;
5455c699
MM
6288 int err = 0;
6289
87267485
MM
6290 ASSERT_RTNL();
6291
5455c699
MM
6292 features = netdev_get_wanted_features(dev);
6293
6294 if (dev->netdev_ops->ndo_fix_features)
6295 features = dev->netdev_ops->ndo_fix_features(dev, features);
6296
6297 /* driver might be less strict about feature dependencies */
6298 features = netdev_fix_features(dev, features);
6299
6300 if (dev->features == features)
6cb6a27c 6301 return 0;
5455c699 6302
c8f44aff
MM
6303 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6304 &dev->features, &features);
5455c699
MM
6305
6306 if (dev->netdev_ops->ndo_set_features)
6307 err = dev->netdev_ops->ndo_set_features(dev, features);
6308
6cb6a27c 6309 if (unlikely(err < 0)) {
5455c699 6310 netdev_err(dev,
c8f44aff
MM
6311 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6312 err, &features, &dev->features);
6cb6a27c
MM
6313 return -1;
6314 }
6315
6316 if (!err)
6317 dev->features = features;
6318
6319 return 1;
6320}
6321
afe12cc8
MM
6322/**
6323 * netdev_update_features - recalculate device features
6324 * @dev: the device to check
6325 *
6326 * Recalculate dev->features set and send notifications if it
6327 * has changed. Should be called after driver or hardware dependent
6328 * conditions might have changed that influence the features.
6329 */
6cb6a27c
MM
6330void netdev_update_features(struct net_device *dev)
6331{
6332 if (__netdev_update_features(dev))
6333 netdev_features_change(dev);
5455c699
MM
6334}
6335EXPORT_SYMBOL(netdev_update_features);
6336
afe12cc8
MM
6337/**
6338 * netdev_change_features - recalculate device features
6339 * @dev: the device to check
6340 *
6341 * Recalculate dev->features set and send notifications even
6342 * if they have not changed. Should be called instead of
6343 * netdev_update_features() if also dev->vlan_features might
6344 * have changed to allow the changes to be propagated to stacked
6345 * VLAN devices.
6346 */
6347void netdev_change_features(struct net_device *dev)
6348{
6349 __netdev_update_features(dev);
6350 netdev_features_change(dev);
6351}
6352EXPORT_SYMBOL(netdev_change_features);
6353
fc4a7489
PM
6354/**
6355 * netif_stacked_transfer_operstate - transfer operstate
6356 * @rootdev: the root or lower level device to transfer state from
6357 * @dev: the device to transfer operstate to
6358 *
6359 * Transfer operational state from root to device. This is normally
6360 * called when a stacking relationship exists between the root
6361 * device and the device(a leaf device).
6362 */
6363void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6364 struct net_device *dev)
6365{
6366 if (rootdev->operstate == IF_OPER_DORMANT)
6367 netif_dormant_on(dev);
6368 else
6369 netif_dormant_off(dev);
6370
6371 if (netif_carrier_ok(rootdev)) {
6372 if (!netif_carrier_ok(dev))
6373 netif_carrier_on(dev);
6374 } else {
6375 if (netif_carrier_ok(dev))
6376 netif_carrier_off(dev);
6377 }
6378}
6379EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6380
a953be53 6381#ifdef CONFIG_SYSFS
1b4bf461
ED
6382static int netif_alloc_rx_queues(struct net_device *dev)
6383{
1b4bf461 6384 unsigned int i, count = dev->num_rx_queues;
bd25fa7b 6385 struct netdev_rx_queue *rx;
10595902 6386 size_t sz = count * sizeof(*rx);
1b4bf461 6387
bd25fa7b 6388 BUG_ON(count < 1);
1b4bf461 6389
10595902
PG
6390 rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6391 if (!rx) {
6392 rx = vzalloc(sz);
6393 if (!rx)
6394 return -ENOMEM;
6395 }
bd25fa7b
TH
6396 dev->_rx = rx;
6397
bd25fa7b 6398 for (i = 0; i < count; i++)
fe822240 6399 rx[i].dev = dev;
1b4bf461
ED
6400 return 0;
6401}
bf264145 6402#endif
1b4bf461 6403
aa942104
CG
6404static void netdev_init_one_queue(struct net_device *dev,
6405 struct netdev_queue *queue, void *_unused)
6406{
6407 /* Initialize queue lock */
6408 spin_lock_init(&queue->_xmit_lock);
6409 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6410 queue->xmit_lock_owner = -1;
b236da69 6411 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
aa942104 6412 queue->dev = dev;
114cf580
TH
6413#ifdef CONFIG_BQL
6414 dql_init(&queue->dql, HZ);
6415#endif
aa942104
CG
6416}
6417
60877a32
ED
6418static void netif_free_tx_queues(struct net_device *dev)
6419{
4cb28970 6420 kvfree(dev->_tx);
60877a32
ED
6421}
6422
e6484930
TH
6423static int netif_alloc_netdev_queues(struct net_device *dev)
6424{
6425 unsigned int count = dev->num_tx_queues;
6426 struct netdev_queue *tx;
60877a32 6427 size_t sz = count * sizeof(*tx);
e6484930 6428
60877a32 6429 BUG_ON(count < 1 || count > 0xffff);
62b5942a 6430
60877a32
ED
6431 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6432 if (!tx) {
6433 tx = vzalloc(sz);
6434 if (!tx)
6435 return -ENOMEM;
6436 }
e6484930 6437 dev->_tx = tx;
1d24eb48 6438
e6484930
TH
6439 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6440 spin_lock_init(&dev->tx_global_lock);
aa942104
CG
6441
6442 return 0;
e6484930
TH
6443}
6444
a2029240
DV
6445void netif_tx_stop_all_queues(struct net_device *dev)
6446{
6447 unsigned int i;
6448
6449 for (i = 0; i < dev->num_tx_queues; i++) {
6450 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
6451 netif_tx_stop_queue(txq);
6452 }
6453}
6454EXPORT_SYMBOL(netif_tx_stop_all_queues);
6455
1da177e4
LT
6456/**
6457 * register_netdevice - register a network device
6458 * @dev: device to register
6459 *
6460 * Take a completed network device structure and add it to the kernel
6461 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6462 * chain. 0 is returned on success. A negative errno code is returned
6463 * on a failure to set up the device, or if the name is a duplicate.
6464 *
6465 * Callers must hold the rtnl semaphore. You may want
6466 * register_netdev() instead of this.
6467 *
6468 * BUGS:
6469 * The locking appears insufficient to guarantee two parallel registers
6470 * will not get the same name.
6471 */
6472
6473int register_netdevice(struct net_device *dev)
6474{
1da177e4 6475 int ret;
d314774c 6476 struct net *net = dev_net(dev);
1da177e4
LT
6477
6478 BUG_ON(dev_boot_phase);
6479 ASSERT_RTNL();
6480
b17a7c17
SH
6481 might_sleep();
6482
1da177e4
LT
6483 /* When net_device's are persistent, this will be fatal. */
6484 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 6485 BUG_ON(!net);
1da177e4 6486
f1f28aa3 6487 spin_lock_init(&dev->addr_list_lock);
cf508b12 6488 netdev_set_addr_lockdep_class(dev);
1da177e4 6489
828de4f6 6490 ret = dev_get_valid_name(net, dev, dev->name);
0696c3a8
PP
6491 if (ret < 0)
6492 goto out;
6493
1da177e4 6494 /* Init, if this function is available */
d314774c
SH
6495 if (dev->netdev_ops->ndo_init) {
6496 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
6497 if (ret) {
6498 if (ret > 0)
6499 ret = -EIO;
90833aa4 6500 goto out;
1da177e4
LT
6501 }
6502 }
4ec93edb 6503
f646968f
PM
6504 if (((dev->hw_features | dev->features) &
6505 NETIF_F_HW_VLAN_CTAG_FILTER) &&
d2ed273d
MM
6506 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6507 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6508 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6509 ret = -EINVAL;
6510 goto err_uninit;
6511 }
6512
9c7dafbf
PE
6513 ret = -EBUSY;
6514 if (!dev->ifindex)
6515 dev->ifindex = dev_new_index(net);
6516 else if (__dev_get_by_index(net, dev->ifindex))
6517 goto err_uninit;
6518
5455c699
MM
6519 /* Transfer changeable features to wanted_features and enable
6520 * software offloads (GSO and GRO).
6521 */
6522 dev->hw_features |= NETIF_F_SOFT_FEATURES;
14d1232f
MM
6523 dev->features |= NETIF_F_SOFT_FEATURES;
6524 dev->wanted_features = dev->features & dev->hw_features;
1da177e4 6525
34324dc2
MM
6526 if (!(dev->flags & IFF_LOOPBACK)) {
6527 dev->hw_features |= NETIF_F_NOCACHE_COPY;
c6e1a0d1
TH
6528 }
6529
1180e7d6 6530 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
16c3ea78 6531 */
1180e7d6 6532 dev->vlan_features |= NETIF_F_HIGHDMA;
16c3ea78 6533
ee579677
PS
6534 /* Make NETIF_F_SG inheritable to tunnel devices.
6535 */
6536 dev->hw_enc_features |= NETIF_F_SG;
6537
0d89d203
SH
6538 /* Make NETIF_F_SG inheritable to MPLS.
6539 */
6540 dev->mpls_features |= NETIF_F_SG;
6541
7ffbe3fd
JB
6542 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6543 ret = notifier_to_errno(ret);
6544 if (ret)
6545 goto err_uninit;
6546
8b41d188 6547 ret = netdev_register_kobject(dev);
b17a7c17 6548 if (ret)
7ce1b0ed 6549 goto err_uninit;
b17a7c17
SH
6550 dev->reg_state = NETREG_REGISTERED;
6551
6cb6a27c 6552 __netdev_update_features(dev);
8e9b59b2 6553
1da177e4
LT
6554 /*
6555 * Default initial state at registry is that the
6556 * device is present.
6557 */
6558
6559 set_bit(__LINK_STATE_PRESENT, &dev->state);
6560
8f4cccbb
BH
6561 linkwatch_init_dev(dev);
6562
1da177e4 6563 dev_init_scheduler(dev);
1da177e4 6564 dev_hold(dev);
ce286d32 6565 list_netdevice(dev);
7bf23575 6566 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 6567
948b337e
JP
6568 /* If the device has permanent device address, driver should
6569 * set dev_addr and also addr_assign_type should be set to
6570 * NET_ADDR_PERM (default value).
6571 */
6572 if (dev->addr_assign_type == NET_ADDR_PERM)
6573 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6574
1da177e4 6575 /* Notify protocols, that a new device appeared. */
056925ab 6576 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 6577 ret = notifier_to_errno(ret);
93ee31f1
DL
6578 if (ret) {
6579 rollback_registered(dev);
6580 dev->reg_state = NETREG_UNREGISTERED;
6581 }
d90a909e
EB
6582 /*
6583 * Prevent userspace races by waiting until the network
6584 * device is fully setup before sending notifications.
6585 */
a2835763
PM
6586 if (!dev->rtnl_link_ops ||
6587 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7f294054 6588 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
1da177e4
LT
6589
6590out:
6591 return ret;
7ce1b0ed
HX
6592
6593err_uninit:
d314774c
SH
6594 if (dev->netdev_ops->ndo_uninit)
6595 dev->netdev_ops->ndo_uninit(dev);
7ce1b0ed 6596 goto out;
1da177e4 6597}
d1b19dff 6598EXPORT_SYMBOL(register_netdevice);
1da177e4 6599
937f1ba5
BH
6600/**
6601 * init_dummy_netdev - init a dummy network device for NAPI
6602 * @dev: device to init
6603 *
6604 * This takes a network device structure and initialize the minimum
6605 * amount of fields so it can be used to schedule NAPI polls without
6606 * registering a full blown interface. This is to be used by drivers
6607 * that need to tie several hardware interfaces to a single NAPI
6608 * poll scheduler due to HW limitations.
6609 */
6610int init_dummy_netdev(struct net_device *dev)
6611{
6612 /* Clear everything. Note we don't initialize spinlocks
6613 * are they aren't supposed to be taken by any of the
6614 * NAPI code and this dummy netdev is supposed to be
6615 * only ever used for NAPI polls
6616 */
6617 memset(dev, 0, sizeof(struct net_device));
6618
6619 /* make sure we BUG if trying to hit standard
6620 * register/unregister code path
6621 */
6622 dev->reg_state = NETREG_DUMMY;
6623
937f1ba5
BH
6624 /* NAPI wants this */
6625 INIT_LIST_HEAD(&dev->napi_list);
6626
6627 /* a dummy interface is started by default */
6628 set_bit(__LINK_STATE_PRESENT, &dev->state);
6629 set_bit(__LINK_STATE_START, &dev->state);
6630
29b4433d
ED
6631 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6632 * because users of this 'device' dont need to change
6633 * its refcount.
6634 */
6635
937f1ba5
BH
6636 return 0;
6637}
6638EXPORT_SYMBOL_GPL(init_dummy_netdev);
6639
6640
1da177e4
LT
6641/**
6642 * register_netdev - register a network device
6643 * @dev: device to register
6644 *
6645 * Take a completed network device structure and add it to the kernel
6646 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6647 * chain. 0 is returned on success. A negative errno code is returned
6648 * on a failure to set up the device, or if the name is a duplicate.
6649 *
38b4da38 6650 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
6651 * and expands the device name if you passed a format string to
6652 * alloc_netdev.
6653 */
6654int register_netdev(struct net_device *dev)
6655{
6656 int err;
6657
6658 rtnl_lock();
1da177e4 6659 err = register_netdevice(dev);
1da177e4
LT
6660 rtnl_unlock();
6661 return err;
6662}
6663EXPORT_SYMBOL(register_netdev);
6664
29b4433d
ED
6665int netdev_refcnt_read(const struct net_device *dev)
6666{
6667 int i, refcnt = 0;
6668
6669 for_each_possible_cpu(i)
6670 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6671 return refcnt;
6672}
6673EXPORT_SYMBOL(netdev_refcnt_read);
6674
2c53040f 6675/**
1da177e4 6676 * netdev_wait_allrefs - wait until all references are gone.
3de7a37b 6677 * @dev: target net_device
1da177e4
LT
6678 *
6679 * This is called when unregistering network devices.
6680 *
6681 * Any protocol or device that holds a reference should register
6682 * for netdevice notification, and cleanup and put back the
6683 * reference if they receive an UNREGISTER event.
6684 * We can get stuck here if buggy protocols don't correctly
4ec93edb 6685 * call dev_put.
1da177e4
LT
6686 */
6687static void netdev_wait_allrefs(struct net_device *dev)
6688{
6689 unsigned long rebroadcast_time, warning_time;
29b4433d 6690 int refcnt;
1da177e4 6691
e014debe
ED
6692 linkwatch_forget_dev(dev);
6693
1da177e4 6694 rebroadcast_time = warning_time = jiffies;
29b4433d
ED
6695 refcnt = netdev_refcnt_read(dev);
6696
6697 while (refcnt != 0) {
1da177e4 6698 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 6699 rtnl_lock();
1da177e4
LT
6700
6701 /* Rebroadcast unregister notification */
056925ab 6702 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
1da177e4 6703
748e2d93 6704 __rtnl_unlock();
0115e8e3 6705 rcu_barrier();
748e2d93
ED
6706 rtnl_lock();
6707
0115e8e3 6708 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
1da177e4
LT
6709 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6710 &dev->state)) {
6711 /* We must not have linkwatch events
6712 * pending on unregister. If this
6713 * happens, we simply run the queue
6714 * unscheduled, resulting in a noop
6715 * for this device.
6716 */
6717 linkwatch_run_queue();
6718 }
6719
6756ae4b 6720 __rtnl_unlock();
1da177e4
LT
6721
6722 rebroadcast_time = jiffies;
6723 }
6724
6725 msleep(250);
6726
29b4433d
ED
6727 refcnt = netdev_refcnt_read(dev);
6728
1da177e4 6729 if (time_after(jiffies, warning_time + 10 * HZ)) {
7b6cd1ce
JP
6730 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6731 dev->name, refcnt);
1da177e4
LT
6732 warning_time = jiffies;
6733 }
6734 }
6735}
6736
6737/* The sequence is:
6738 *
6739 * rtnl_lock();
6740 * ...
6741 * register_netdevice(x1);
6742 * register_netdevice(x2);
6743 * ...
6744 * unregister_netdevice(y1);
6745 * unregister_netdevice(y2);
6746 * ...
6747 * rtnl_unlock();
6748 * free_netdev(y1);
6749 * free_netdev(y2);
6750 *
58ec3b4d 6751 * We are invoked by rtnl_unlock().
1da177e4 6752 * This allows us to deal with problems:
b17a7c17 6753 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
6754 * without deadlocking with linkwatch via keventd.
6755 * 2) Since we run with the RTNL semaphore not held, we can sleep
6756 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
6757 *
6758 * We must not return until all unregister events added during
6759 * the interval the lock was held have been completed.
1da177e4 6760 */
1da177e4
LT
6761void netdev_run_todo(void)
6762{
626ab0e6 6763 struct list_head list;
1da177e4 6764
1da177e4 6765 /* Snapshot list, allow later requests */
626ab0e6 6766 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
6767
6768 __rtnl_unlock();
626ab0e6 6769
0115e8e3
ED
6770
6771 /* Wait for rcu callbacks to finish before next phase */
850a545b
EB
6772 if (!list_empty(&list))
6773 rcu_barrier();
6774
1da177e4
LT
6775 while (!list_empty(&list)) {
6776 struct net_device *dev
e5e26d75 6777 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
6778 list_del(&dev->todo_list);
6779
748e2d93 6780 rtnl_lock();
0115e8e3 6781 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
748e2d93 6782 __rtnl_unlock();
0115e8e3 6783
b17a7c17 6784 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
7b6cd1ce 6785 pr_err("network todo '%s' but state %d\n",
b17a7c17
SH
6786 dev->name, dev->reg_state);
6787 dump_stack();
6788 continue;
6789 }
1da177e4 6790
b17a7c17 6791 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 6792
152102c7 6793 on_each_cpu(flush_backlog, dev, 1);
6e583ce5 6794
b17a7c17 6795 netdev_wait_allrefs(dev);
1da177e4 6796
b17a7c17 6797 /* paranoia */
29b4433d 6798 BUG_ON(netdev_refcnt_read(dev));
7866a621
SN
6799 BUG_ON(!list_empty(&dev->ptype_all));
6800 BUG_ON(!list_empty(&dev->ptype_specific));
33d480ce
ED
6801 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6802 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
547b792c 6803 WARN_ON(dev->dn_ptr);
1da177e4 6804
b17a7c17
SH
6805 if (dev->destructor)
6806 dev->destructor(dev);
9093bbb2 6807
50624c93
EB
6808 /* Report a network device has been unregistered */
6809 rtnl_lock();
6810 dev_net(dev)->dev_unreg_count--;
6811 __rtnl_unlock();
6812 wake_up(&netdev_unregistering_wq);
6813
9093bbb2
SH
6814 /* Free network device */
6815 kobject_put(&dev->dev.kobj);
1da177e4 6816 }
1da177e4
LT
6817}
6818
3cfde79c
BH
6819/* Convert net_device_stats to rtnl_link_stats64. They have the same
6820 * fields in the same order, with only the type differing.
6821 */
77a1abf5
ED
6822void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6823 const struct net_device_stats *netdev_stats)
3cfde79c
BH
6824{
6825#if BITS_PER_LONG == 64
77a1abf5
ED
6826 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6827 memcpy(stats64, netdev_stats, sizeof(*stats64));
3cfde79c
BH
6828#else
6829 size_t i, n = sizeof(*stats64) / sizeof(u64);
6830 const unsigned long *src = (const unsigned long *)netdev_stats;
6831 u64 *dst = (u64 *)stats64;
6832
6833 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6834 sizeof(*stats64) / sizeof(u64));
6835 for (i = 0; i < n; i++)
6836 dst[i] = src[i];
6837#endif
6838}
77a1abf5 6839EXPORT_SYMBOL(netdev_stats_to_stats64);
3cfde79c 6840
eeda3fd6
SH
6841/**
6842 * dev_get_stats - get network device statistics
6843 * @dev: device to get statistics from
28172739 6844 * @storage: place to store stats
eeda3fd6 6845 *
d7753516
BH
6846 * Get network statistics from device. Return @storage.
6847 * The device driver may provide its own method by setting
6848 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6849 * otherwise the internal statistics structure is used.
eeda3fd6 6850 */
d7753516
BH
6851struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6852 struct rtnl_link_stats64 *storage)
7004bf25 6853{
eeda3fd6
SH
6854 const struct net_device_ops *ops = dev->netdev_ops;
6855
28172739
ED
6856 if (ops->ndo_get_stats64) {
6857 memset(storage, 0, sizeof(*storage));
caf586e5
ED
6858 ops->ndo_get_stats64(dev, storage);
6859 } else if (ops->ndo_get_stats) {
3cfde79c 6860 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
caf586e5
ED
6861 } else {
6862 netdev_stats_to_stats64(storage, &dev->stats);
28172739 6863 }
caf586e5 6864 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
015f0688 6865 storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
28172739 6866 return storage;
c45d286e 6867}
eeda3fd6 6868EXPORT_SYMBOL(dev_get_stats);
c45d286e 6869
24824a09 6870struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
dc2b4847 6871{
24824a09 6872 struct netdev_queue *queue = dev_ingress_queue(dev);
dc2b4847 6873
24824a09
ED
6874#ifdef CONFIG_NET_CLS_ACT
6875 if (queue)
6876 return queue;
6877 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6878 if (!queue)
6879 return NULL;
6880 netdev_init_one_queue(dev, queue, NULL);
2ce1ee17 6881 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
24824a09
ED
6882 queue->qdisc_sleeping = &noop_qdisc;
6883 rcu_assign_pointer(dev->ingress_queue, queue);
6884#endif
6885 return queue;
bb949fbd
DM
6886}
6887
2c60db03
ED
6888static const struct ethtool_ops default_ethtool_ops;
6889
d07d7507
SG
6890void netdev_set_default_ethtool_ops(struct net_device *dev,
6891 const struct ethtool_ops *ops)
6892{
6893 if (dev->ethtool_ops == &default_ethtool_ops)
6894 dev->ethtool_ops = ops;
6895}
6896EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6897
74d332c1
ED
6898void netdev_freemem(struct net_device *dev)
6899{
6900 char *addr = (char *)dev - dev->padded;
6901
4cb28970 6902 kvfree(addr);
74d332c1
ED
6903}
6904
1da177e4 6905/**
36909ea4 6906 * alloc_netdev_mqs - allocate network device
c835a677
TG
6907 * @sizeof_priv: size of private data to allocate space for
6908 * @name: device name format string
6909 * @name_assign_type: origin of device name
6910 * @setup: callback to initialize device
6911 * @txqs: the number of TX subqueues to allocate
6912 * @rxqs: the number of RX subqueues to allocate
1da177e4
LT
6913 *
6914 * Allocates a struct net_device with private data area for driver use
90e51adf 6915 * and performs basic initialization. Also allocates subqueue structs
36909ea4 6916 * for each queue on the device.
1da177e4 6917 */
36909ea4 6918struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
c835a677 6919 unsigned char name_assign_type,
36909ea4
TH
6920 void (*setup)(struct net_device *),
6921 unsigned int txqs, unsigned int rxqs)
1da177e4 6922{
1da177e4 6923 struct net_device *dev;
7943986c 6924 size_t alloc_size;
1ce8e7b5 6925 struct net_device *p;
1da177e4 6926
b6fe17d6
SH
6927 BUG_ON(strlen(name) >= sizeof(dev->name));
6928
36909ea4 6929 if (txqs < 1) {
7b6cd1ce 6930 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
55513fb4
TH
6931 return NULL;
6932 }
6933
a953be53 6934#ifdef CONFIG_SYSFS
36909ea4 6935 if (rxqs < 1) {
7b6cd1ce 6936 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
36909ea4
TH
6937 return NULL;
6938 }
6939#endif
6940
fd2ea0a7 6941 alloc_size = sizeof(struct net_device);
d1643d24
AD
6942 if (sizeof_priv) {
6943 /* ensure 32-byte alignment of private area */
1ce8e7b5 6944 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
6945 alloc_size += sizeof_priv;
6946 }
6947 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 6948 alloc_size += NETDEV_ALIGN - 1;
1da177e4 6949
74d332c1
ED
6950 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6951 if (!p)
6952 p = vzalloc(alloc_size);
62b5942a 6953 if (!p)
1da177e4 6954 return NULL;
1da177e4 6955
1ce8e7b5 6956 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 6957 dev->padded = (char *)dev - (char *)p;
ab9c73cc 6958
29b4433d
ED
6959 dev->pcpu_refcnt = alloc_percpu(int);
6960 if (!dev->pcpu_refcnt)
74d332c1 6961 goto free_dev;
ab9c73cc 6962
ab9c73cc 6963 if (dev_addr_init(dev))
29b4433d 6964 goto free_pcpu;
ab9c73cc 6965
22bedad3 6966 dev_mc_init(dev);
a748ee24 6967 dev_uc_init(dev);
ccffad25 6968
c346dca1 6969 dev_net_set(dev, &init_net);
1da177e4 6970
8d3bdbd5 6971 dev->gso_max_size = GSO_MAX_SIZE;
30b678d8 6972 dev->gso_max_segs = GSO_MAX_SEGS;
fcbeb976 6973 dev->gso_min_segs = 0;
8d3bdbd5 6974
8d3bdbd5
DM
6975 INIT_LIST_HEAD(&dev->napi_list);
6976 INIT_LIST_HEAD(&dev->unreg_list);
5cde2829 6977 INIT_LIST_HEAD(&dev->close_list);
8d3bdbd5 6978 INIT_LIST_HEAD(&dev->link_watch_list);
2f268f12
VF
6979 INIT_LIST_HEAD(&dev->adj_list.upper);
6980 INIT_LIST_HEAD(&dev->adj_list.lower);
6981 INIT_LIST_HEAD(&dev->all_adj_list.upper);
6982 INIT_LIST_HEAD(&dev->all_adj_list.lower);
7866a621
SN
6983 INIT_LIST_HEAD(&dev->ptype_all);
6984 INIT_LIST_HEAD(&dev->ptype_specific);
02875878 6985 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
8d3bdbd5
DM
6986 setup(dev);
6987
36909ea4
TH
6988 dev->num_tx_queues = txqs;
6989 dev->real_num_tx_queues = txqs;
ed9af2e8 6990 if (netif_alloc_netdev_queues(dev))
8d3bdbd5 6991 goto free_all;
e8a0464c 6992
a953be53 6993#ifdef CONFIG_SYSFS
36909ea4
TH
6994 dev->num_rx_queues = rxqs;
6995 dev->real_num_rx_queues = rxqs;
fe822240 6996 if (netif_alloc_rx_queues(dev))
8d3bdbd5 6997 goto free_all;
df334545 6998#endif
0a9627f2 6999
1da177e4 7000 strcpy(dev->name, name);
c835a677 7001 dev->name_assign_type = name_assign_type;
cbda10fa 7002 dev->group = INIT_NETDEV_GROUP;
2c60db03
ED
7003 if (!dev->ethtool_ops)
7004 dev->ethtool_ops = &default_ethtool_ops;
e687ad60
PN
7005
7006 nf_hook_ingress_init(dev);
7007
1da177e4 7008 return dev;
ab9c73cc 7009
8d3bdbd5
DM
7010free_all:
7011 free_netdev(dev);
7012 return NULL;
7013
29b4433d
ED
7014free_pcpu:
7015 free_percpu(dev->pcpu_refcnt);
74d332c1
ED
7016free_dev:
7017 netdev_freemem(dev);
ab9c73cc 7018 return NULL;
1da177e4 7019}
36909ea4 7020EXPORT_SYMBOL(alloc_netdev_mqs);
1da177e4
LT
7021
7022/**
7023 * free_netdev - free network device
7024 * @dev: device
7025 *
4ec93edb
YH
7026 * This function does the last stage of destroying an allocated device
7027 * interface. The reference to the device object is released.
1da177e4
LT
7028 * If this is the last reference then it will be freed.
7029 */
7030void free_netdev(struct net_device *dev)
7031{
d565b0a1
HX
7032 struct napi_struct *p, *n;
7033
60877a32 7034 netif_free_tx_queues(dev);
a953be53 7035#ifdef CONFIG_SYSFS
10595902 7036 kvfree(dev->_rx);
fe822240 7037#endif
e8a0464c 7038
33d480ce 7039 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
24824a09 7040
f001fde5
JP
7041 /* Flush device addresses */
7042 dev_addr_flush(dev);
7043
d565b0a1
HX
7044 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
7045 netif_napi_del(p);
7046
29b4433d
ED
7047 free_percpu(dev->pcpu_refcnt);
7048 dev->pcpu_refcnt = NULL;
7049
3041a069 7050 /* Compatibility with error handling in drivers */
1da177e4 7051 if (dev->reg_state == NETREG_UNINITIALIZED) {
74d332c1 7052 netdev_freemem(dev);
1da177e4
LT
7053 return;
7054 }
7055
7056 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
7057 dev->reg_state = NETREG_RELEASED;
7058
43cb76d9
GKH
7059 /* will free via device release */
7060 put_device(&dev->dev);
1da177e4 7061}
d1b19dff 7062EXPORT_SYMBOL(free_netdev);
4ec93edb 7063
f0db275a
SH
7064/**
7065 * synchronize_net - Synchronize with packet receive processing
7066 *
7067 * Wait for packets currently being received to be done.
7068 * Does not block later packets from starting.
7069 */
4ec93edb 7070void synchronize_net(void)
1da177e4
LT
7071{
7072 might_sleep();
be3fc413
ED
7073 if (rtnl_is_locked())
7074 synchronize_rcu_expedited();
7075 else
7076 synchronize_rcu();
1da177e4 7077}
d1b19dff 7078EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
7079
7080/**
44a0873d 7081 * unregister_netdevice_queue - remove device from the kernel
1da177e4 7082 * @dev: device
44a0873d 7083 * @head: list
6ebfbc06 7084 *
1da177e4 7085 * This function shuts down a device interface and removes it
d59b54b1 7086 * from the kernel tables.
44a0873d 7087 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
7088 *
7089 * Callers must hold the rtnl semaphore. You may want
7090 * unregister_netdev() instead of this.
7091 */
7092
44a0873d 7093void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 7094{
a6620712
HX
7095 ASSERT_RTNL();
7096
44a0873d 7097 if (head) {
9fdce099 7098 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
7099 } else {
7100 rollback_registered(dev);
7101 /* Finish processing unregister after unlock */
7102 net_set_todo(dev);
7103 }
1da177e4 7104}
44a0873d 7105EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 7106
9b5e383c
ED
7107/**
7108 * unregister_netdevice_many - unregister many devices
7109 * @head: list of devices
87757a91
ED
7110 *
7111 * Note: As most callers use a stack allocated list_head,
7112 * we force a list_del() to make sure stack wont be corrupted later.
9b5e383c
ED
7113 */
7114void unregister_netdevice_many(struct list_head *head)
7115{
7116 struct net_device *dev;
7117
7118 if (!list_empty(head)) {
7119 rollback_registered_many(head);
7120 list_for_each_entry(dev, head, unreg_list)
7121 net_set_todo(dev);
87757a91 7122 list_del(head);
9b5e383c
ED
7123 }
7124}
63c8099d 7125EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 7126
1da177e4
LT
7127/**
7128 * unregister_netdev - remove device from the kernel
7129 * @dev: device
7130 *
7131 * This function shuts down a device interface and removes it
d59b54b1 7132 * from the kernel tables.
1da177e4
LT
7133 *
7134 * This is just a wrapper for unregister_netdevice that takes
7135 * the rtnl semaphore. In general you want to use this and not
7136 * unregister_netdevice.
7137 */
7138void unregister_netdev(struct net_device *dev)
7139{
7140 rtnl_lock();
7141 unregister_netdevice(dev);
7142 rtnl_unlock();
7143}
1da177e4
LT
7144EXPORT_SYMBOL(unregister_netdev);
7145
ce286d32
EB
7146/**
7147 * dev_change_net_namespace - move device to different nethost namespace
7148 * @dev: device
7149 * @net: network namespace
7150 * @pat: If not NULL name pattern to try if the current device name
7151 * is already taken in the destination network namespace.
7152 *
7153 * This function shuts down a device interface and moves it
7154 * to a new network namespace. On success 0 is returned, on
7155 * a failure a netagive errno code is returned.
7156 *
7157 * Callers must hold the rtnl semaphore.
7158 */
7159
7160int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
7161{
ce286d32
EB
7162 int err;
7163
7164 ASSERT_RTNL();
7165
7166 /* Don't allow namespace local devices to be moved. */
7167 err = -EINVAL;
7168 if (dev->features & NETIF_F_NETNS_LOCAL)
7169 goto out;
7170
7171 /* Ensure the device has been registrered */
ce286d32
EB
7172 if (dev->reg_state != NETREG_REGISTERED)
7173 goto out;
7174
7175 /* Get out if there is nothing todo */
7176 err = 0;
878628fb 7177 if (net_eq(dev_net(dev), net))
ce286d32
EB
7178 goto out;
7179
7180 /* Pick the destination device name, and ensure
7181 * we can use it in the destination network namespace.
7182 */
7183 err = -EEXIST;
d9031024 7184 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
7185 /* We get here if we can't use the current device name */
7186 if (!pat)
7187 goto out;
828de4f6 7188 if (dev_get_valid_name(net, dev, pat) < 0)
ce286d32
EB
7189 goto out;
7190 }
7191
7192 /*
7193 * And now a mini version of register_netdevice unregister_netdevice.
7194 */
7195
7196 /* If device is running close it first. */
9b772652 7197 dev_close(dev);
ce286d32
EB
7198
7199 /* And unlink it from device chain */
7200 err = -ENODEV;
7201 unlist_netdevice(dev);
7202
7203 synchronize_net();
7204
7205 /* Shutdown queueing discipline. */
7206 dev_shutdown(dev);
7207
7208 /* Notify protocols, that we are about to destroy
7209 this device. They should clean all the things.
3b27e105
DL
7210
7211 Note that dev->reg_state stays at NETREG_REGISTERED.
7212 This is wanted because this way 8021q and macvlan know
7213 the device is just moving and can keep their slaves up.
ce286d32
EB
7214 */
7215 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6549dd43
G
7216 rcu_barrier();
7217 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7f294054 7218 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
ce286d32
EB
7219
7220 /*
7221 * Flush the unicast and multicast chains
7222 */
a748ee24 7223 dev_uc_flush(dev);
22bedad3 7224 dev_mc_flush(dev);
ce286d32 7225
4e66ae2e
SH
7226 /* Send a netdev-removed uevent to the old namespace */
7227 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
4c75431a 7228 netdev_adjacent_del_links(dev);
4e66ae2e 7229
ce286d32 7230 /* Actually switch the network namespace */
c346dca1 7231 dev_net_set(dev, net);
ce286d32 7232
ce286d32 7233 /* If there is an ifindex conflict assign a new one */
7a66bbc9 7234 if (__dev_get_by_index(net, dev->ifindex))
ce286d32 7235 dev->ifindex = dev_new_index(net);
ce286d32 7236
4e66ae2e
SH
7237 /* Send a netdev-add uevent to the new namespace */
7238 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
4c75431a 7239 netdev_adjacent_add_links(dev);
4e66ae2e 7240
8b41d188 7241 /* Fixup kobjects */
a1b3f594 7242 err = device_rename(&dev->dev, dev->name);
8b41d188 7243 WARN_ON(err);
ce286d32
EB
7244
7245 /* Add the device back in the hashes */
7246 list_netdevice(dev);
7247
7248 /* Notify protocols, that a new device appeared. */
7249 call_netdevice_notifiers(NETDEV_REGISTER, dev);
7250
d90a909e
EB
7251 /*
7252 * Prevent userspace races by waiting until the network
7253 * device is fully setup before sending notifications.
7254 */
7f294054 7255 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
d90a909e 7256
ce286d32
EB
7257 synchronize_net();
7258 err = 0;
7259out:
7260 return err;
7261}
463d0183 7262EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 7263
1da177e4
LT
7264static int dev_cpu_callback(struct notifier_block *nfb,
7265 unsigned long action,
7266 void *ocpu)
7267{
7268 struct sk_buff **list_skb;
1da177e4
LT
7269 struct sk_buff *skb;
7270 unsigned int cpu, oldcpu = (unsigned long)ocpu;
7271 struct softnet_data *sd, *oldsd;
7272
8bb78442 7273 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1da177e4
LT
7274 return NOTIFY_OK;
7275
7276 local_irq_disable();
7277 cpu = smp_processor_id();
7278 sd = &per_cpu(softnet_data, cpu);
7279 oldsd = &per_cpu(softnet_data, oldcpu);
7280
7281 /* Find end of our completion_queue. */
7282 list_skb = &sd->completion_queue;
7283 while (*list_skb)
7284 list_skb = &(*list_skb)->next;
7285 /* Append completion queue from offline CPU. */
7286 *list_skb = oldsd->completion_queue;
7287 oldsd->completion_queue = NULL;
7288
1da177e4 7289 /* Append output queue from offline CPU. */
a9cbd588
CG
7290 if (oldsd->output_queue) {
7291 *sd->output_queue_tailp = oldsd->output_queue;
7292 sd->output_queue_tailp = oldsd->output_queue_tailp;
7293 oldsd->output_queue = NULL;
7294 oldsd->output_queue_tailp = &oldsd->output_queue;
7295 }
ac64da0b
ED
7296 /* Append NAPI poll list from offline CPU, with one exception :
7297 * process_backlog() must be called by cpu owning percpu backlog.
7298 * We properly handle process_queue & input_pkt_queue later.
7299 */
7300 while (!list_empty(&oldsd->poll_list)) {
7301 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
7302 struct napi_struct,
7303 poll_list);
7304
7305 list_del_init(&napi->poll_list);
7306 if (napi->poll == process_backlog)
7307 napi->state = 0;
7308 else
7309 ____napi_schedule(sd, napi);
264524d5 7310 }
1da177e4
LT
7311
7312 raise_softirq_irqoff(NET_TX_SOFTIRQ);
7313 local_irq_enable();
7314
7315 /* Process offline CPU's input_pkt_queue */
76cc8b13 7316 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
91e83133 7317 netif_rx_ni(skb);
76cc8b13 7318 input_queue_head_incr(oldsd);
fec5e652 7319 }
ac64da0b 7320 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
91e83133 7321 netif_rx_ni(skb);
76cc8b13
TH
7322 input_queue_head_incr(oldsd);
7323 }
1da177e4
LT
7324
7325 return NOTIFY_OK;
7326}
1da177e4
LT
7327
7328
7f353bf2 7329/**
b63365a2
HX
7330 * netdev_increment_features - increment feature set by one
7331 * @all: current feature set
7332 * @one: new feature set
7333 * @mask: mask feature set
7f353bf2
HX
7334 *
7335 * Computes a new feature set after adding a device with feature set
b63365a2
HX
7336 * @one to the master device with current feature set @all. Will not
7337 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 7338 */
c8f44aff
MM
7339netdev_features_t netdev_increment_features(netdev_features_t all,
7340 netdev_features_t one, netdev_features_t mask)
b63365a2 7341{
1742f183
MM
7342 if (mask & NETIF_F_GEN_CSUM)
7343 mask |= NETIF_F_ALL_CSUM;
7344 mask |= NETIF_F_VLAN_CHALLENGED;
7f353bf2 7345
1742f183
MM
7346 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
7347 all &= one | ~NETIF_F_ALL_FOR_ALL;
c6e1a0d1 7348
1742f183
MM
7349 /* If one device supports hw checksumming, set for all. */
7350 if (all & NETIF_F_GEN_CSUM)
7351 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
7f353bf2
HX
7352
7353 return all;
7354}
b63365a2 7355EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 7356
430f03cd 7357static struct hlist_head * __net_init netdev_create_hash(void)
30d97d35
PE
7358{
7359 int i;
7360 struct hlist_head *hash;
7361
7362 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7363 if (hash != NULL)
7364 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7365 INIT_HLIST_HEAD(&hash[i]);
7366
7367 return hash;
7368}
7369
881d966b 7370/* Initialize per network namespace state */
4665079c 7371static int __net_init netdev_init(struct net *net)
881d966b 7372{
734b6541
RM
7373 if (net != &init_net)
7374 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 7375
30d97d35
PE
7376 net->dev_name_head = netdev_create_hash();
7377 if (net->dev_name_head == NULL)
7378 goto err_name;
881d966b 7379
30d97d35
PE
7380 net->dev_index_head = netdev_create_hash();
7381 if (net->dev_index_head == NULL)
7382 goto err_idx;
881d966b
EB
7383
7384 return 0;
30d97d35
PE
7385
7386err_idx:
7387 kfree(net->dev_name_head);
7388err_name:
7389 return -ENOMEM;
881d966b
EB
7390}
7391
f0db275a
SH
7392/**
7393 * netdev_drivername - network driver for the device
7394 * @dev: network device
f0db275a
SH
7395 *
7396 * Determine network driver for device.
7397 */
3019de12 7398const char *netdev_drivername(const struct net_device *dev)
6579e57b 7399{
cf04a4c7
SH
7400 const struct device_driver *driver;
7401 const struct device *parent;
3019de12 7402 const char *empty = "";
6579e57b
AV
7403
7404 parent = dev->dev.parent;
6579e57b 7405 if (!parent)
3019de12 7406 return empty;
6579e57b
AV
7407
7408 driver = parent->driver;
7409 if (driver && driver->name)
3019de12
DM
7410 return driver->name;
7411 return empty;
6579e57b
AV
7412}
7413
6ea754eb
JP
7414static void __netdev_printk(const char *level, const struct net_device *dev,
7415 struct va_format *vaf)
256df2f3 7416{
b004ff49 7417 if (dev && dev->dev.parent) {
6ea754eb
JP
7418 dev_printk_emit(level[1] - '0',
7419 dev->dev.parent,
7420 "%s %s %s%s: %pV",
7421 dev_driver_string(dev->dev.parent),
7422 dev_name(dev->dev.parent),
7423 netdev_name(dev), netdev_reg_state(dev),
7424 vaf);
b004ff49 7425 } else if (dev) {
6ea754eb
JP
7426 printk("%s%s%s: %pV",
7427 level, netdev_name(dev), netdev_reg_state(dev), vaf);
b004ff49 7428 } else {
6ea754eb 7429 printk("%s(NULL net_device): %pV", level, vaf);
b004ff49 7430 }
256df2f3
JP
7431}
7432
6ea754eb
JP
7433void netdev_printk(const char *level, const struct net_device *dev,
7434 const char *format, ...)
256df2f3
JP
7435{
7436 struct va_format vaf;
7437 va_list args;
256df2f3
JP
7438
7439 va_start(args, format);
7440
7441 vaf.fmt = format;
7442 vaf.va = &args;
7443
6ea754eb 7444 __netdev_printk(level, dev, &vaf);
b004ff49 7445
256df2f3 7446 va_end(args);
256df2f3
JP
7447}
7448EXPORT_SYMBOL(netdev_printk);
7449
7450#define define_netdev_printk_level(func, level) \
6ea754eb 7451void func(const struct net_device *dev, const char *fmt, ...) \
256df2f3 7452{ \
256df2f3
JP
7453 struct va_format vaf; \
7454 va_list args; \
7455 \
7456 va_start(args, fmt); \
7457 \
7458 vaf.fmt = fmt; \
7459 vaf.va = &args; \
7460 \
6ea754eb 7461 __netdev_printk(level, dev, &vaf); \
b004ff49 7462 \
256df2f3 7463 va_end(args); \
256df2f3
JP
7464} \
7465EXPORT_SYMBOL(func);
7466
7467define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7468define_netdev_printk_level(netdev_alert, KERN_ALERT);
7469define_netdev_printk_level(netdev_crit, KERN_CRIT);
7470define_netdev_printk_level(netdev_err, KERN_ERR);
7471define_netdev_printk_level(netdev_warn, KERN_WARNING);
7472define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7473define_netdev_printk_level(netdev_info, KERN_INFO);
7474
4665079c 7475static void __net_exit netdev_exit(struct net *net)
881d966b
EB
7476{
7477 kfree(net->dev_name_head);
7478 kfree(net->dev_index_head);
7479}
7480
022cbae6 7481static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
7482 .init = netdev_init,
7483 .exit = netdev_exit,
7484};
7485
4665079c 7486static void __net_exit default_device_exit(struct net *net)
ce286d32 7487{
e008b5fc 7488 struct net_device *dev, *aux;
ce286d32 7489 /*
e008b5fc 7490 * Push all migratable network devices back to the
ce286d32
EB
7491 * initial network namespace
7492 */
7493 rtnl_lock();
e008b5fc 7494 for_each_netdev_safe(net, dev, aux) {
ce286d32 7495 int err;
aca51397 7496 char fb_name[IFNAMSIZ];
ce286d32
EB
7497
7498 /* Ignore unmoveable devices (i.e. loopback) */
7499 if (dev->features & NETIF_F_NETNS_LOCAL)
7500 continue;
7501
e008b5fc
EB
7502 /* Leave virtual devices for the generic cleanup */
7503 if (dev->rtnl_link_ops)
7504 continue;
d0c082ce 7505
25985edc 7506 /* Push remaining network devices to init_net */
aca51397
PE
7507 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7508 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 7509 if (err) {
7b6cd1ce
JP
7510 pr_emerg("%s: failed to move %s to init_net: %d\n",
7511 __func__, dev->name, err);
aca51397 7512 BUG();
ce286d32
EB
7513 }
7514 }
7515 rtnl_unlock();
7516}
7517
50624c93
EB
7518static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7519{
7520 /* Return with the rtnl_lock held when there are no network
7521 * devices unregistering in any network namespace in net_list.
7522 */
7523 struct net *net;
7524 bool unregistering;
ff960a73 7525 DEFINE_WAIT_FUNC(wait, woken_wake_function);
50624c93 7526
ff960a73 7527 add_wait_queue(&netdev_unregistering_wq, &wait);
50624c93 7528 for (;;) {
50624c93
EB
7529 unregistering = false;
7530 rtnl_lock();
7531 list_for_each_entry(net, net_list, exit_list) {
7532 if (net->dev_unreg_count > 0) {
7533 unregistering = true;
7534 break;
7535 }
7536 }
7537 if (!unregistering)
7538 break;
7539 __rtnl_unlock();
ff960a73
PZ
7540
7541 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
50624c93 7542 }
ff960a73 7543 remove_wait_queue(&netdev_unregistering_wq, &wait);
50624c93
EB
7544}
7545
04dc7f6b
EB
7546static void __net_exit default_device_exit_batch(struct list_head *net_list)
7547{
7548 /* At exit all network devices most be removed from a network
b595076a 7549 * namespace. Do this in the reverse order of registration.
04dc7f6b
EB
7550 * Do this across as many network namespaces as possible to
7551 * improve batching efficiency.
7552 */
7553 struct net_device *dev;
7554 struct net *net;
7555 LIST_HEAD(dev_kill_list);
7556
50624c93
EB
7557 /* To prevent network device cleanup code from dereferencing
7558 * loopback devices or network devices that have been freed
7559 * wait here for all pending unregistrations to complete,
7560 * before unregistring the loopback device and allowing the
7561 * network namespace be freed.
7562 *
7563 * The netdev todo list containing all network devices
7564 * unregistrations that happen in default_device_exit_batch
7565 * will run in the rtnl_unlock() at the end of
7566 * default_device_exit_batch.
7567 */
7568 rtnl_lock_unregistering(net_list);
04dc7f6b
EB
7569 list_for_each_entry(net, net_list, exit_list) {
7570 for_each_netdev_reverse(net, dev) {
b0ab2fab 7571 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
04dc7f6b
EB
7572 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7573 else
7574 unregister_netdevice_queue(dev, &dev_kill_list);
7575 }
7576 }
7577 unregister_netdevice_many(&dev_kill_list);
7578 rtnl_unlock();
7579}
7580
022cbae6 7581static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 7582 .exit = default_device_exit,
04dc7f6b 7583 .exit_batch = default_device_exit_batch,
ce286d32
EB
7584};
7585
1da177e4
LT
7586/*
7587 * Initialize the DEV module. At boot time this walks the device list and
7588 * unhooks any devices that fail to initialise (normally hardware not
7589 * present) and leaves us with a valid list of present and active devices.
7590 *
7591 */
7592
7593/*
7594 * This is called single threaded during boot, so no need
7595 * to take the rtnl semaphore.
7596 */
7597static int __init net_dev_init(void)
7598{
7599 int i, rc = -ENOMEM;
7600
7601 BUG_ON(!dev_boot_phase);
7602
1da177e4
LT
7603 if (dev_proc_init())
7604 goto out;
7605
8b41d188 7606 if (netdev_kobject_init())
1da177e4
LT
7607 goto out;
7608
7609 INIT_LIST_HEAD(&ptype_all);
82d8a867 7610 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
7611 INIT_LIST_HEAD(&ptype_base[i]);
7612
62532da9
VY
7613 INIT_LIST_HEAD(&offload_base);
7614
881d966b
EB
7615 if (register_pernet_subsys(&netdev_net_ops))
7616 goto out;
1da177e4
LT
7617
7618 /*
7619 * Initialise the packet receive queues.
7620 */
7621
6f912042 7622 for_each_possible_cpu(i) {
e36fa2f7 7623 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 7624
e36fa2f7 7625 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 7626 skb_queue_head_init(&sd->process_queue);
e36fa2f7 7627 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588 7628 sd->output_queue_tailp = &sd->output_queue;
df334545 7629#ifdef CONFIG_RPS
e36fa2f7
ED
7630 sd->csd.func = rps_trigger_softirq;
7631 sd->csd.info = sd;
e36fa2f7 7632 sd->cpu = i;
1e94d72f 7633#endif
0a9627f2 7634
e36fa2f7
ED
7635 sd->backlog.poll = process_backlog;
7636 sd->backlog.weight = weight_p;
1da177e4
LT
7637 }
7638
1da177e4
LT
7639 dev_boot_phase = 0;
7640
505d4f73
EB
7641 /* The loopback device is special if any other network devices
7642 * is present in a network namespace the loopback device must
7643 * be present. Since we now dynamically allocate and free the
7644 * loopback device ensure this invariant is maintained by
7645 * keeping the loopback device as the first device on the
7646 * list of network devices. Ensuring the loopback devices
7647 * is the first device that appears and the last network device
7648 * that disappears.
7649 */
7650 if (register_pernet_device(&loopback_net_ops))
7651 goto out;
7652
7653 if (register_pernet_device(&default_device_ops))
7654 goto out;
7655
962cf36c
CM
7656 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7657 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4
LT
7658
7659 hotcpu_notifier(dev_cpu_callback, 0);
7660 dst_init();
1da177e4
LT
7661 rc = 0;
7662out:
7663 return rc;
7664}
7665
7666subsys_initcall(net_dev_init);
This page took 3.092878 seconds and 4 git commands to generate.