Merge branch 'openvswitch-net'
[deliverable/linux.git] / net / core / dev.c
CommitLineData
1da177e4
LT
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
1da177e4 76#include <linux/bitops.h>
4fc268d2 77#include <linux/capability.h>
1da177e4
LT
78#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
08e9897d 81#include <linux/hash.h>
5a0e3ad6 82#include <linux/slab.h>
1da177e4 83#include <linux/sched.h>
4a3e2f71 84#include <linux/mutex.h>
1da177e4
LT
85#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
0187bdfb 94#include <linux/ethtool.h>
1da177e4
LT
95#include <linux/notifier.h>
96#include <linux/skbuff.h>
457c4cbc 97#include <net/net_namespace.h>
1da177e4
LT
98#include <net/sock.h>
99#include <linux/rtnetlink.h>
1da177e4 100#include <linux/stat.h>
1da177e4
LT
101#include <net/dst.h>
102#include <net/pkt_sched.h>
103#include <net/checksum.h>
44540960 104#include <net/xfrm.h>
1da177e4
LT
105#include <linux/highmem.h>
106#include <linux/init.h>
1da177e4 107#include <linux/module.h>
1da177e4
LT
108#include <linux/netpoll.h>
109#include <linux/rcupdate.h>
110#include <linux/delay.h>
1da177e4 111#include <net/iw_handler.h>
1da177e4 112#include <asm/current.h>
5bdb9886 113#include <linux/audit.h>
db217334 114#include <linux/dmaengine.h>
f6a78bfc 115#include <linux/err.h>
c7fa9d18 116#include <linux/ctype.h>
723e98b7 117#include <linux/if_arp.h>
6de329e2 118#include <linux/if_vlan.h>
8f0f2223 119#include <linux/ip.h>
ad55dcaf 120#include <net/ip.h>
25cd9ba0 121#include <net/mpls.h>
8f0f2223
DM
122#include <linux/ipv6.h>
123#include <linux/in.h>
b6b2fed1
DM
124#include <linux/jhash.h>
125#include <linux/random.h>
9cbc1cb8 126#include <trace/events/napi.h>
cf66ba58 127#include <trace/events/net.h>
07dc22e7 128#include <trace/events/skb.h>
5acbbd42 129#include <linux/pci.h>
caeda9b9 130#include <linux/inetdevice.h>
c445477d 131#include <linux/cpu_rmap.h>
c5905afb 132#include <linux/static_key.h>
af12fa6e 133#include <linux/hashtable.h>
60877a32 134#include <linux/vmalloc.h>
529d0489 135#include <linux/if_macvlan.h>
e7fd2885 136#include <linux/errqueue.h>
3b47d303 137#include <linux/hrtimer.h>
1da177e4 138
342709ef
PE
139#include "net-sysfs.h"
140
d565b0a1
HX
141/* Instead of increasing this, you should create a hash table. */
142#define MAX_GRO_SKBS 8
143
5d38a079
HX
144/* This should be increased if a protocol with a bigger head is added. */
145#define GRO_MAX_HEAD (MAX_HEADER + 128)
146
1da177e4 147static DEFINE_SPINLOCK(ptype_lock);
62532da9 148static DEFINE_SPINLOCK(offload_lock);
900ff8c6
CW
149struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
150struct list_head ptype_all __read_mostly; /* Taps */
62532da9 151static struct list_head offload_base __read_mostly;
1da177e4 152
ae78dbfa 153static int netif_rx_internal(struct sk_buff *skb);
54951194
LP
154static int call_netdevice_notifiers_info(unsigned long val,
155 struct net_device *dev,
156 struct netdev_notifier_info *info);
ae78dbfa 157
1da177e4 158/*
7562f876 159 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
160 * semaphore.
161 *
c6d14c84 162 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
163 *
164 * Writers must hold the rtnl semaphore while they loop through the
7562f876 165 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
166 * actual updates. This allows pure readers to access the list even
167 * while a writer is preparing to update it.
168 *
169 * To put it another way, dev_base_lock is held for writing only to
170 * protect against pure readers; the rtnl semaphore provides the
171 * protection against other writers.
172 *
173 * See, for example usages, register_netdevice() and
174 * unregister_netdevice(), which must be called with the rtnl
175 * semaphore held.
176 */
1da177e4 177DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
178EXPORT_SYMBOL(dev_base_lock);
179
af12fa6e
ET
180/* protects napi_hash addition/deletion and napi_gen_id */
181static DEFINE_SPINLOCK(napi_hash_lock);
182
183static unsigned int napi_gen_id;
184static DEFINE_HASHTABLE(napi_hash, 8);
185
18afa4b0 186static seqcount_t devnet_rename_seq;
c91f6df2 187
4e985ada
TG
188static inline void dev_base_seq_inc(struct net *net)
189{
190 while (++net->dev_base_seq == 0);
191}
192
881d966b 193static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4 194{
95c96174
ED
195 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
196
08e9897d 197 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
198}
199
881d966b 200static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 201{
7c28bd0b 202 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
203}
204
e36fa2f7 205static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
206{
207#ifdef CONFIG_RPS
e36fa2f7 208 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
209#endif
210}
211
e36fa2f7 212static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
213{
214#ifdef CONFIG_RPS
e36fa2f7 215 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
216#endif
217}
218
ce286d32 219/* Device list insertion */
53759be9 220static void list_netdevice(struct net_device *dev)
ce286d32 221{
c346dca1 222 struct net *net = dev_net(dev);
ce286d32
EB
223
224 ASSERT_RTNL();
225
226 write_lock_bh(&dev_base_lock);
c6d14c84 227 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
72c9528b 228 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
fb699dfd
ED
229 hlist_add_head_rcu(&dev->index_hlist,
230 dev_index_hash(net, dev->ifindex));
ce286d32 231 write_unlock_bh(&dev_base_lock);
4e985ada
TG
232
233 dev_base_seq_inc(net);
ce286d32
EB
234}
235
fb699dfd
ED
236/* Device list removal
237 * caller must respect a RCU grace period before freeing/reusing dev
238 */
ce286d32
EB
239static void unlist_netdevice(struct net_device *dev)
240{
241 ASSERT_RTNL();
242
243 /* Unlink dev from the device chain */
244 write_lock_bh(&dev_base_lock);
c6d14c84 245 list_del_rcu(&dev->dev_list);
72c9528b 246 hlist_del_rcu(&dev->name_hlist);
fb699dfd 247 hlist_del_rcu(&dev->index_hlist);
ce286d32 248 write_unlock_bh(&dev_base_lock);
4e985ada
TG
249
250 dev_base_seq_inc(dev_net(dev));
ce286d32
EB
251}
252
1da177e4
LT
253/*
254 * Our notifier list
255 */
256
f07d5b94 257static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
258
259/*
260 * Device drivers call our routines to queue packets here. We empty the
261 * queue in the local softnet handler.
262 */
bea3348e 263
9958da05 264DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 265EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 266
cf508b12 267#ifdef CONFIG_LOCKDEP
723e98b7 268/*
c773e847 269 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
270 * according to dev->type
271 */
272static const unsigned short netdev_lock_type[] =
273 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
274 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
275 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
276 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
277 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
278 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
279 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
280 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
281 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
282 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
283 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
284 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
211ed865
PG
285 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
286 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
287 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
723e98b7 288
36cbd3dc 289static const char *const netdev_lock_name[] =
723e98b7
JP
290 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
291 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
292 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
293 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
294 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
295 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
296 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
297 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
298 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
299 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
300 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
301 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
211ed865
PG
302 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
303 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
304 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
305
306static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 307static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
308
309static inline unsigned short netdev_lock_pos(unsigned short dev_type)
310{
311 int i;
312
313 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
314 if (netdev_lock_type[i] == dev_type)
315 return i;
316 /* the last key is used by default */
317 return ARRAY_SIZE(netdev_lock_type) - 1;
318}
319
cf508b12
DM
320static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
321 unsigned short dev_type)
723e98b7
JP
322{
323 int i;
324
325 i = netdev_lock_pos(dev_type);
326 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
327 netdev_lock_name[i]);
328}
cf508b12
DM
329
330static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
331{
332 int i;
333
334 i = netdev_lock_pos(dev->type);
335 lockdep_set_class_and_name(&dev->addr_list_lock,
336 &netdev_addr_lock_key[i],
337 netdev_lock_name[i]);
338}
723e98b7 339#else
cf508b12
DM
340static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
341 unsigned short dev_type)
342{
343}
344static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
345{
346}
347#endif
1da177e4
LT
348
349/*******************************************************************************
350
351 Protocol management and registration routines
352
353*******************************************************************************/
354
1da177e4
LT
355/*
356 * Add a protocol ID to the list. Now that the input handler is
357 * smarter we can dispense with all the messy stuff that used to be
358 * here.
359 *
360 * BEWARE!!! Protocol handlers, mangling input packets,
361 * MUST BE last in hash buckets and checking protocol handlers
362 * MUST start from promiscuous ptype_all chain in net_bh.
363 * It is true now, do not change it.
364 * Explanation follows: if protocol handler, mangling packet, will
365 * be the first on list, it is not able to sense, that packet
366 * is cloned and should be copied-on-write, so that it will
367 * change it and subsequent readers will get broken packet.
368 * --ANK (980803)
369 */
370
c07b68e8
ED
371static inline struct list_head *ptype_head(const struct packet_type *pt)
372{
373 if (pt->type == htons(ETH_P_ALL))
374 return &ptype_all;
375 else
376 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
377}
378
1da177e4
LT
379/**
380 * dev_add_pack - add packet handler
381 * @pt: packet type declaration
382 *
383 * Add a protocol handler to the networking stack. The passed &packet_type
384 * is linked into kernel lists and may not be freed until it has been
385 * removed from the kernel lists.
386 *
4ec93edb 387 * This call does not sleep therefore it can not
1da177e4
LT
388 * guarantee all CPU's that are in middle of receiving packets
389 * will see the new packet type (until the next received packet).
390 */
391
392void dev_add_pack(struct packet_type *pt)
393{
c07b68e8 394 struct list_head *head = ptype_head(pt);
1da177e4 395
c07b68e8
ED
396 spin_lock(&ptype_lock);
397 list_add_rcu(&pt->list, head);
398 spin_unlock(&ptype_lock);
1da177e4 399}
d1b19dff 400EXPORT_SYMBOL(dev_add_pack);
1da177e4 401
1da177e4
LT
402/**
403 * __dev_remove_pack - remove packet handler
404 * @pt: packet type declaration
405 *
406 * Remove a protocol handler that was previously added to the kernel
407 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
408 * from the kernel lists and can be freed or reused once this function
4ec93edb 409 * returns.
1da177e4
LT
410 *
411 * The packet type might still be in use by receivers
412 * and must not be freed until after all the CPU's have gone
413 * through a quiescent state.
414 */
415void __dev_remove_pack(struct packet_type *pt)
416{
c07b68e8 417 struct list_head *head = ptype_head(pt);
1da177e4
LT
418 struct packet_type *pt1;
419
c07b68e8 420 spin_lock(&ptype_lock);
1da177e4
LT
421
422 list_for_each_entry(pt1, head, list) {
423 if (pt == pt1) {
424 list_del_rcu(&pt->list);
425 goto out;
426 }
427 }
428
7b6cd1ce 429 pr_warn("dev_remove_pack: %p not found\n", pt);
1da177e4 430out:
c07b68e8 431 spin_unlock(&ptype_lock);
1da177e4 432}
d1b19dff
ED
433EXPORT_SYMBOL(__dev_remove_pack);
434
1da177e4
LT
435/**
436 * dev_remove_pack - remove packet handler
437 * @pt: packet type declaration
438 *
439 * Remove a protocol handler that was previously added to the kernel
440 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
441 * from the kernel lists and can be freed or reused once this function
442 * returns.
443 *
444 * This call sleeps to guarantee that no CPU is looking at the packet
445 * type after return.
446 */
447void dev_remove_pack(struct packet_type *pt)
448{
449 __dev_remove_pack(pt);
4ec93edb 450
1da177e4
LT
451 synchronize_net();
452}
d1b19dff 453EXPORT_SYMBOL(dev_remove_pack);
1da177e4 454
62532da9
VY
455
456/**
457 * dev_add_offload - register offload handlers
458 * @po: protocol offload declaration
459 *
460 * Add protocol offload handlers to the networking stack. The passed
461 * &proto_offload is linked into kernel lists and may not be freed until
462 * it has been removed from the kernel lists.
463 *
464 * This call does not sleep therefore it can not
465 * guarantee all CPU's that are in middle of receiving packets
466 * will see the new offload handlers (until the next received packet).
467 */
468void dev_add_offload(struct packet_offload *po)
469{
470 struct list_head *head = &offload_base;
471
472 spin_lock(&offload_lock);
473 list_add_rcu(&po->list, head);
474 spin_unlock(&offload_lock);
475}
476EXPORT_SYMBOL(dev_add_offload);
477
478/**
479 * __dev_remove_offload - remove offload handler
480 * @po: packet offload declaration
481 *
482 * Remove a protocol offload handler that was previously added to the
483 * kernel offload handlers by dev_add_offload(). The passed &offload_type
484 * is removed from the kernel lists and can be freed or reused once this
485 * function returns.
486 *
487 * The packet type might still be in use by receivers
488 * and must not be freed until after all the CPU's have gone
489 * through a quiescent state.
490 */
1d143d9f 491static void __dev_remove_offload(struct packet_offload *po)
62532da9
VY
492{
493 struct list_head *head = &offload_base;
494 struct packet_offload *po1;
495
c53aa505 496 spin_lock(&offload_lock);
62532da9
VY
497
498 list_for_each_entry(po1, head, list) {
499 if (po == po1) {
500 list_del_rcu(&po->list);
501 goto out;
502 }
503 }
504
505 pr_warn("dev_remove_offload: %p not found\n", po);
506out:
c53aa505 507 spin_unlock(&offload_lock);
62532da9 508}
62532da9
VY
509
510/**
511 * dev_remove_offload - remove packet offload handler
512 * @po: packet offload declaration
513 *
514 * Remove a packet offload handler that was previously added to the kernel
515 * offload handlers by dev_add_offload(). The passed &offload_type is
516 * removed from the kernel lists and can be freed or reused once this
517 * function returns.
518 *
519 * This call sleeps to guarantee that no CPU is looking at the packet
520 * type after return.
521 */
522void dev_remove_offload(struct packet_offload *po)
523{
524 __dev_remove_offload(po);
525
526 synchronize_net();
527}
528EXPORT_SYMBOL(dev_remove_offload);
529
1da177e4
LT
530/******************************************************************************
531
532 Device Boot-time Settings Routines
533
534*******************************************************************************/
535
536/* Boot time configuration table */
537static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
538
539/**
540 * netdev_boot_setup_add - add new setup entry
541 * @name: name of the device
542 * @map: configured settings for the device
543 *
544 * Adds new setup entry to the dev_boot_setup list. The function
545 * returns 0 on error and 1 on success. This is a generic routine to
546 * all netdevices.
547 */
548static int netdev_boot_setup_add(char *name, struct ifmap *map)
549{
550 struct netdev_boot_setup *s;
551 int i;
552
553 s = dev_boot_setup;
554 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
555 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
556 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 557 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
558 memcpy(&s[i].map, map, sizeof(s[i].map));
559 break;
560 }
561 }
562
563 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
564}
565
566/**
567 * netdev_boot_setup_check - check boot time settings
568 * @dev: the netdevice
569 *
570 * Check boot time settings for the device.
571 * The found settings are set for the device to be used
572 * later in the device probing.
573 * Returns 0 if no settings found, 1 if they are.
574 */
575int netdev_boot_setup_check(struct net_device *dev)
576{
577 struct netdev_boot_setup *s = dev_boot_setup;
578 int i;
579
580 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
581 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 582 !strcmp(dev->name, s[i].name)) {
1da177e4
LT
583 dev->irq = s[i].map.irq;
584 dev->base_addr = s[i].map.base_addr;
585 dev->mem_start = s[i].map.mem_start;
586 dev->mem_end = s[i].map.mem_end;
587 return 1;
588 }
589 }
590 return 0;
591}
d1b19dff 592EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
593
594
595/**
596 * netdev_boot_base - get address from boot time settings
597 * @prefix: prefix for network device
598 * @unit: id for network device
599 *
600 * Check boot time settings for the base address of device.
601 * The found settings are set for the device to be used
602 * later in the device probing.
603 * Returns 0 if no settings found.
604 */
605unsigned long netdev_boot_base(const char *prefix, int unit)
606{
607 const struct netdev_boot_setup *s = dev_boot_setup;
608 char name[IFNAMSIZ];
609 int i;
610
611 sprintf(name, "%s%d", prefix, unit);
612
613 /*
614 * If device already registered then return base of 1
615 * to indicate not to probe for this interface
616 */
881d966b 617 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
618 return 1;
619
620 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
621 if (!strcmp(name, s[i].name))
622 return s[i].map.base_addr;
623 return 0;
624}
625
626/*
627 * Saves at boot time configured settings for any netdevice.
628 */
629int __init netdev_boot_setup(char *str)
630{
631 int ints[5];
632 struct ifmap map;
633
634 str = get_options(str, ARRAY_SIZE(ints), ints);
635 if (!str || !*str)
636 return 0;
637
638 /* Save settings */
639 memset(&map, 0, sizeof(map));
640 if (ints[0] > 0)
641 map.irq = ints[1];
642 if (ints[0] > 1)
643 map.base_addr = ints[2];
644 if (ints[0] > 2)
645 map.mem_start = ints[3];
646 if (ints[0] > 3)
647 map.mem_end = ints[4];
648
649 /* Add new entry to the list */
650 return netdev_boot_setup_add(str, &map);
651}
652
653__setup("netdev=", netdev_boot_setup);
654
655/*******************************************************************************
656
657 Device Interface Subroutines
658
659*******************************************************************************/
660
661/**
662 * __dev_get_by_name - find a device by its name
c4ea43c5 663 * @net: the applicable net namespace
1da177e4
LT
664 * @name: name to find
665 *
666 * Find an interface by name. Must be called under RTNL semaphore
667 * or @dev_base_lock. If the name is found a pointer to the device
668 * is returned. If the name is not found then %NULL is returned. The
669 * reference counters are not incremented so the caller must be
670 * careful with locks.
671 */
672
881d966b 673struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4 674{
0bd8d536
ED
675 struct net_device *dev;
676 struct hlist_head *head = dev_name_hash(net, name);
1da177e4 677
b67bfe0d 678 hlist_for_each_entry(dev, head, name_hlist)
1da177e4
LT
679 if (!strncmp(dev->name, name, IFNAMSIZ))
680 return dev;
0bd8d536 681
1da177e4
LT
682 return NULL;
683}
d1b19dff 684EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 685
72c9528b
ED
686/**
687 * dev_get_by_name_rcu - find a device by its name
688 * @net: the applicable net namespace
689 * @name: name to find
690 *
691 * Find an interface by name.
692 * If the name is found a pointer to the device is returned.
693 * If the name is not found then %NULL is returned.
694 * The reference counters are not incremented so the caller must be
695 * careful with locks. The caller must hold RCU lock.
696 */
697
698struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
699{
72c9528b
ED
700 struct net_device *dev;
701 struct hlist_head *head = dev_name_hash(net, name);
702
b67bfe0d 703 hlist_for_each_entry_rcu(dev, head, name_hlist)
72c9528b
ED
704 if (!strncmp(dev->name, name, IFNAMSIZ))
705 return dev;
706
707 return NULL;
708}
709EXPORT_SYMBOL(dev_get_by_name_rcu);
710
1da177e4
LT
711/**
712 * dev_get_by_name - find a device by its name
c4ea43c5 713 * @net: the applicable net namespace
1da177e4
LT
714 * @name: name to find
715 *
716 * Find an interface by name. This can be called from any
717 * context and does its own locking. The returned handle has
718 * the usage count incremented and the caller must use dev_put() to
719 * release it when it is no longer needed. %NULL is returned if no
720 * matching device is found.
721 */
722
881d966b 723struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
724{
725 struct net_device *dev;
726
72c9528b
ED
727 rcu_read_lock();
728 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
729 if (dev)
730 dev_hold(dev);
72c9528b 731 rcu_read_unlock();
1da177e4
LT
732 return dev;
733}
d1b19dff 734EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
735
736/**
737 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 738 * @net: the applicable net namespace
1da177e4
LT
739 * @ifindex: index of device
740 *
741 * Search for an interface by index. Returns %NULL if the device
742 * is not found or a pointer to the device. The device has not
743 * had its reference counter increased so the caller must be careful
744 * about locking. The caller must hold either the RTNL semaphore
745 * or @dev_base_lock.
746 */
747
881d966b 748struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4 749{
0bd8d536
ED
750 struct net_device *dev;
751 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 752
b67bfe0d 753 hlist_for_each_entry(dev, head, index_hlist)
1da177e4
LT
754 if (dev->ifindex == ifindex)
755 return dev;
0bd8d536 756
1da177e4
LT
757 return NULL;
758}
d1b19dff 759EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 760
fb699dfd
ED
761/**
762 * dev_get_by_index_rcu - find a device by its ifindex
763 * @net: the applicable net namespace
764 * @ifindex: index of device
765 *
766 * Search for an interface by index. Returns %NULL if the device
767 * is not found or a pointer to the device. The device has not
768 * had its reference counter increased so the caller must be careful
769 * about locking. The caller must hold RCU lock.
770 */
771
772struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
773{
fb699dfd
ED
774 struct net_device *dev;
775 struct hlist_head *head = dev_index_hash(net, ifindex);
776
b67bfe0d 777 hlist_for_each_entry_rcu(dev, head, index_hlist)
fb699dfd
ED
778 if (dev->ifindex == ifindex)
779 return dev;
780
781 return NULL;
782}
783EXPORT_SYMBOL(dev_get_by_index_rcu);
784
1da177e4
LT
785
786/**
787 * dev_get_by_index - find a device by its ifindex
c4ea43c5 788 * @net: the applicable net namespace
1da177e4
LT
789 * @ifindex: index of device
790 *
791 * Search for an interface by index. Returns NULL if the device
792 * is not found or a pointer to the device. The device returned has
793 * had a reference added and the pointer is safe until the user calls
794 * dev_put to indicate they have finished with it.
795 */
796
881d966b 797struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
798{
799 struct net_device *dev;
800
fb699dfd
ED
801 rcu_read_lock();
802 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
803 if (dev)
804 dev_hold(dev);
fb699dfd 805 rcu_read_unlock();
1da177e4
LT
806 return dev;
807}
d1b19dff 808EXPORT_SYMBOL(dev_get_by_index);
1da177e4 809
5dbe7c17
NS
810/**
811 * netdev_get_name - get a netdevice name, knowing its ifindex.
812 * @net: network namespace
813 * @name: a pointer to the buffer where the name will be stored.
814 * @ifindex: the ifindex of the interface to get the name from.
815 *
816 * The use of raw_seqcount_begin() and cond_resched() before
817 * retrying is required as we want to give the writers a chance
818 * to complete when CONFIG_PREEMPT is not set.
819 */
820int netdev_get_name(struct net *net, char *name, int ifindex)
821{
822 struct net_device *dev;
823 unsigned int seq;
824
825retry:
826 seq = raw_seqcount_begin(&devnet_rename_seq);
827 rcu_read_lock();
828 dev = dev_get_by_index_rcu(net, ifindex);
829 if (!dev) {
830 rcu_read_unlock();
831 return -ENODEV;
832 }
833
834 strcpy(name, dev->name);
835 rcu_read_unlock();
836 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
837 cond_resched();
838 goto retry;
839 }
840
841 return 0;
842}
843
1da177e4 844/**
941666c2 845 * dev_getbyhwaddr_rcu - find a device by its hardware address
c4ea43c5 846 * @net: the applicable net namespace
1da177e4
LT
847 * @type: media type of device
848 * @ha: hardware address
849 *
850 * Search for an interface by MAC address. Returns NULL if the device
c506653d
ED
851 * is not found or a pointer to the device.
852 * The caller must hold RCU or RTNL.
941666c2 853 * The returned device has not had its ref count increased
1da177e4
LT
854 * and the caller must therefore be careful about locking
855 *
1da177e4
LT
856 */
857
941666c2
ED
858struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
859 const char *ha)
1da177e4
LT
860{
861 struct net_device *dev;
862
941666c2 863 for_each_netdev_rcu(net, dev)
1da177e4
LT
864 if (dev->type == type &&
865 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
866 return dev;
867
868 return NULL;
1da177e4 869}
941666c2 870EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
cf309e3f 871
881d966b 872struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
873{
874 struct net_device *dev;
875
4e9cac2b 876 ASSERT_RTNL();
881d966b 877 for_each_netdev(net, dev)
4e9cac2b 878 if (dev->type == type)
7562f876
PE
879 return dev;
880
881 return NULL;
4e9cac2b 882}
4e9cac2b
PM
883EXPORT_SYMBOL(__dev_getfirstbyhwtype);
884
881d966b 885struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 886{
99fe3c39 887 struct net_device *dev, *ret = NULL;
4e9cac2b 888
99fe3c39
ED
889 rcu_read_lock();
890 for_each_netdev_rcu(net, dev)
891 if (dev->type == type) {
892 dev_hold(dev);
893 ret = dev;
894 break;
895 }
896 rcu_read_unlock();
897 return ret;
1da177e4 898}
1da177e4
LT
899EXPORT_SYMBOL(dev_getfirstbyhwtype);
900
901/**
6c555490 902 * __dev_get_by_flags - find any device with given flags
c4ea43c5 903 * @net: the applicable net namespace
1da177e4
LT
904 * @if_flags: IFF_* values
905 * @mask: bitmask of bits in if_flags to check
906 *
907 * Search for any interface with the given flags. Returns NULL if a device
bb69ae04 908 * is not found or a pointer to the device. Must be called inside
6c555490 909 * rtnl_lock(), and result refcount is unchanged.
1da177e4
LT
910 */
911
6c555490
WC
912struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
913 unsigned short mask)
1da177e4 914{
7562f876 915 struct net_device *dev, *ret;
1da177e4 916
6c555490
WC
917 ASSERT_RTNL();
918
7562f876 919 ret = NULL;
6c555490 920 for_each_netdev(net, dev) {
1da177e4 921 if (((dev->flags ^ if_flags) & mask) == 0) {
7562f876 922 ret = dev;
1da177e4
LT
923 break;
924 }
925 }
7562f876 926 return ret;
1da177e4 927}
6c555490 928EXPORT_SYMBOL(__dev_get_by_flags);
1da177e4
LT
929
930/**
931 * dev_valid_name - check if name is okay for network device
932 * @name: name string
933 *
934 * Network device names need to be valid file names to
c7fa9d18
DM
935 * to allow sysfs to work. We also disallow any kind of
936 * whitespace.
1da177e4 937 */
95f050bf 938bool dev_valid_name(const char *name)
1da177e4 939{
c7fa9d18 940 if (*name == '\0')
95f050bf 941 return false;
b6fe17d6 942 if (strlen(name) >= IFNAMSIZ)
95f050bf 943 return false;
c7fa9d18 944 if (!strcmp(name, ".") || !strcmp(name, ".."))
95f050bf 945 return false;
c7fa9d18
DM
946
947 while (*name) {
948 if (*name == '/' || isspace(*name))
95f050bf 949 return false;
c7fa9d18
DM
950 name++;
951 }
95f050bf 952 return true;
1da177e4 953}
d1b19dff 954EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
955
956/**
b267b179
EB
957 * __dev_alloc_name - allocate a name for a device
958 * @net: network namespace to allocate the device name in
1da177e4 959 * @name: name format string
b267b179 960 * @buf: scratch buffer and result name string
1da177e4
LT
961 *
962 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
963 * id. It scans list of devices to build up a free map, then chooses
964 * the first empty slot. The caller must hold the dev_base or rtnl lock
965 * while allocating the name and adding the device in order to avoid
966 * duplicates.
967 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
968 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
969 */
970
b267b179 971static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
972{
973 int i = 0;
1da177e4
LT
974 const char *p;
975 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 976 unsigned long *inuse;
1da177e4
LT
977 struct net_device *d;
978
979 p = strnchr(name, IFNAMSIZ-1, '%');
980 if (p) {
981 /*
982 * Verify the string as this thing may have come from
983 * the user. There must be either one "%d" and no other "%"
984 * characters.
985 */
986 if (p[1] != 'd' || strchr(p + 2, '%'))
987 return -EINVAL;
988
989 /* Use one page as a bit array of possible slots */
cfcabdcc 990 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
991 if (!inuse)
992 return -ENOMEM;
993
881d966b 994 for_each_netdev(net, d) {
1da177e4
LT
995 if (!sscanf(d->name, name, &i))
996 continue;
997 if (i < 0 || i >= max_netdevices)
998 continue;
999
1000 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 1001 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
1002 if (!strncmp(buf, d->name, IFNAMSIZ))
1003 set_bit(i, inuse);
1004 }
1005
1006 i = find_first_zero_bit(inuse, max_netdevices);
1007 free_page((unsigned long) inuse);
1008 }
1009
d9031024
OP
1010 if (buf != name)
1011 snprintf(buf, IFNAMSIZ, name, i);
b267b179 1012 if (!__dev_get_by_name(net, buf))
1da177e4 1013 return i;
1da177e4
LT
1014
1015 /* It is possible to run out of possible slots
1016 * when the name is long and there isn't enough space left
1017 * for the digits, or if all bits are used.
1018 */
1019 return -ENFILE;
1020}
1021
b267b179
EB
1022/**
1023 * dev_alloc_name - allocate a name for a device
1024 * @dev: device
1025 * @name: name format string
1026 *
1027 * Passed a format string - eg "lt%d" it will try and find a suitable
1028 * id. It scans list of devices to build up a free map, then chooses
1029 * the first empty slot. The caller must hold the dev_base or rtnl lock
1030 * while allocating the name and adding the device in order to avoid
1031 * duplicates.
1032 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1033 * Returns the number of the unit assigned or a negative errno code.
1034 */
1035
1036int dev_alloc_name(struct net_device *dev, const char *name)
1037{
1038 char buf[IFNAMSIZ];
1039 struct net *net;
1040 int ret;
1041
c346dca1
YH
1042 BUG_ON(!dev_net(dev));
1043 net = dev_net(dev);
b267b179
EB
1044 ret = __dev_alloc_name(net, name, buf);
1045 if (ret >= 0)
1046 strlcpy(dev->name, buf, IFNAMSIZ);
1047 return ret;
1048}
d1b19dff 1049EXPORT_SYMBOL(dev_alloc_name);
b267b179 1050
828de4f6
G
1051static int dev_alloc_name_ns(struct net *net,
1052 struct net_device *dev,
1053 const char *name)
d9031024 1054{
828de4f6
G
1055 char buf[IFNAMSIZ];
1056 int ret;
8ce6cebc 1057
828de4f6
G
1058 ret = __dev_alloc_name(net, name, buf);
1059 if (ret >= 0)
1060 strlcpy(dev->name, buf, IFNAMSIZ);
1061 return ret;
1062}
1063
1064static int dev_get_valid_name(struct net *net,
1065 struct net_device *dev,
1066 const char *name)
1067{
1068 BUG_ON(!net);
8ce6cebc 1069
d9031024
OP
1070 if (!dev_valid_name(name))
1071 return -EINVAL;
1072
1c5cae81 1073 if (strchr(name, '%'))
828de4f6 1074 return dev_alloc_name_ns(net, dev, name);
d9031024
OP
1075 else if (__dev_get_by_name(net, name))
1076 return -EEXIST;
8ce6cebc
DL
1077 else if (dev->name != name)
1078 strlcpy(dev->name, name, IFNAMSIZ);
d9031024
OP
1079
1080 return 0;
1081}
1da177e4
LT
1082
1083/**
1084 * dev_change_name - change name of a device
1085 * @dev: device
1086 * @newname: name (or format string) must be at least IFNAMSIZ
1087 *
1088 * Change name of a device, can pass format strings "eth%d".
1089 * for wildcarding.
1090 */
cf04a4c7 1091int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 1092{
238fa362 1093 unsigned char old_assign_type;
fcc5a03a 1094 char oldname[IFNAMSIZ];
1da177e4 1095 int err = 0;
fcc5a03a 1096 int ret;
881d966b 1097 struct net *net;
1da177e4
LT
1098
1099 ASSERT_RTNL();
c346dca1 1100 BUG_ON(!dev_net(dev));
1da177e4 1101
c346dca1 1102 net = dev_net(dev);
1da177e4
LT
1103 if (dev->flags & IFF_UP)
1104 return -EBUSY;
1105
30e6c9fa 1106 write_seqcount_begin(&devnet_rename_seq);
c91f6df2
BH
1107
1108 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
30e6c9fa 1109 write_seqcount_end(&devnet_rename_seq);
c8d90dca 1110 return 0;
c91f6df2 1111 }
c8d90dca 1112
fcc5a03a
HX
1113 memcpy(oldname, dev->name, IFNAMSIZ);
1114
828de4f6 1115 err = dev_get_valid_name(net, dev, newname);
c91f6df2 1116 if (err < 0) {
30e6c9fa 1117 write_seqcount_end(&devnet_rename_seq);
d9031024 1118 return err;
c91f6df2 1119 }
1da177e4 1120
6fe82a39
VF
1121 if (oldname[0] && !strchr(oldname, '%'))
1122 netdev_info(dev, "renamed from %s\n", oldname);
1123
238fa362
TG
1124 old_assign_type = dev->name_assign_type;
1125 dev->name_assign_type = NET_NAME_RENAMED;
1126
fcc5a03a 1127rollback:
a1b3f594
EB
1128 ret = device_rename(&dev->dev, dev->name);
1129 if (ret) {
1130 memcpy(dev->name, oldname, IFNAMSIZ);
238fa362 1131 dev->name_assign_type = old_assign_type;
30e6c9fa 1132 write_seqcount_end(&devnet_rename_seq);
a1b3f594 1133 return ret;
dcc99773 1134 }
7f988eab 1135
30e6c9fa 1136 write_seqcount_end(&devnet_rename_seq);
c91f6df2 1137
5bb025fa
VF
1138 netdev_adjacent_rename_links(dev, oldname);
1139
7f988eab 1140 write_lock_bh(&dev_base_lock);
372b2312 1141 hlist_del_rcu(&dev->name_hlist);
72c9528b
ED
1142 write_unlock_bh(&dev_base_lock);
1143
1144 synchronize_rcu();
1145
1146 write_lock_bh(&dev_base_lock);
1147 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
1148 write_unlock_bh(&dev_base_lock);
1149
056925ab 1150 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1151 ret = notifier_to_errno(ret);
1152
1153 if (ret) {
91e9c07b
ED
1154 /* err >= 0 after dev_alloc_name() or stores the first errno */
1155 if (err >= 0) {
fcc5a03a 1156 err = ret;
30e6c9fa 1157 write_seqcount_begin(&devnet_rename_seq);
fcc5a03a 1158 memcpy(dev->name, oldname, IFNAMSIZ);
5bb025fa 1159 memcpy(oldname, newname, IFNAMSIZ);
238fa362
TG
1160 dev->name_assign_type = old_assign_type;
1161 old_assign_type = NET_NAME_RENAMED;
fcc5a03a 1162 goto rollback;
91e9c07b 1163 } else {
7b6cd1ce 1164 pr_err("%s: name change rollback failed: %d\n",
91e9c07b 1165 dev->name, ret);
fcc5a03a
HX
1166 }
1167 }
1da177e4
LT
1168
1169 return err;
1170}
1171
0b815a1a
SH
1172/**
1173 * dev_set_alias - change ifalias of a device
1174 * @dev: device
1175 * @alias: name up to IFALIASZ
f0db275a 1176 * @len: limit of bytes to copy from info
0b815a1a
SH
1177 *
1178 * Set ifalias for a device,
1179 */
1180int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1181{
7364e445
AK
1182 char *new_ifalias;
1183
0b815a1a
SH
1184 ASSERT_RTNL();
1185
1186 if (len >= IFALIASZ)
1187 return -EINVAL;
1188
96ca4a2c 1189 if (!len) {
388dfc2d
SK
1190 kfree(dev->ifalias);
1191 dev->ifalias = NULL;
96ca4a2c
OH
1192 return 0;
1193 }
1194
7364e445
AK
1195 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1196 if (!new_ifalias)
0b815a1a 1197 return -ENOMEM;
7364e445 1198 dev->ifalias = new_ifalias;
0b815a1a
SH
1199
1200 strlcpy(dev->ifalias, alias, len+1);
1201 return len;
1202}
1203
1204
d8a33ac4 1205/**
3041a069 1206 * netdev_features_change - device changes features
d8a33ac4
SH
1207 * @dev: device to cause notification
1208 *
1209 * Called to indicate a device has changed features.
1210 */
1211void netdev_features_change(struct net_device *dev)
1212{
056925ab 1213 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1214}
1215EXPORT_SYMBOL(netdev_features_change);
1216
1da177e4
LT
1217/**
1218 * netdev_state_change - device changes state
1219 * @dev: device to cause notification
1220 *
1221 * Called to indicate a device has changed state. This function calls
1222 * the notifier chains for netdev_chain and sends a NEWLINK message
1223 * to the routing socket.
1224 */
1225void netdev_state_change(struct net_device *dev)
1226{
1227 if (dev->flags & IFF_UP) {
54951194
LP
1228 struct netdev_notifier_change_info change_info;
1229
1230 change_info.flags_changed = 0;
1231 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1232 &change_info.info);
7f294054 1233 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1da177e4
LT
1234 }
1235}
d1b19dff 1236EXPORT_SYMBOL(netdev_state_change);
1da177e4 1237
ee89bab1
AW
1238/**
1239 * netdev_notify_peers - notify network peers about existence of @dev
1240 * @dev: network device
1241 *
1242 * Generate traffic such that interested network peers are aware of
1243 * @dev, such as by generating a gratuitous ARP. This may be used when
1244 * a device wants to inform the rest of the network about some sort of
1245 * reconfiguration such as a failover event or virtual machine
1246 * migration.
1247 */
1248void netdev_notify_peers(struct net_device *dev)
c1da4ac7 1249{
ee89bab1
AW
1250 rtnl_lock();
1251 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1252 rtnl_unlock();
c1da4ac7 1253}
ee89bab1 1254EXPORT_SYMBOL(netdev_notify_peers);
c1da4ac7 1255
bd380811 1256static int __dev_open(struct net_device *dev)
1da177e4 1257{
d314774c 1258 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1259 int ret;
1da177e4 1260
e46b66bc
BH
1261 ASSERT_RTNL();
1262
1da177e4
LT
1263 if (!netif_device_present(dev))
1264 return -ENODEV;
1265
ca99ca14
NH
1266 /* Block netpoll from trying to do any rx path servicing.
1267 * If we don't do this there is a chance ndo_poll_controller
1268 * or ndo_poll may be running while we open the device
1269 */
66b5552f 1270 netpoll_poll_disable(dev);
ca99ca14 1271
3b8bcfd5
JB
1272 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1273 ret = notifier_to_errno(ret);
1274 if (ret)
1275 return ret;
1276
1da177e4 1277 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1278
d314774c
SH
1279 if (ops->ndo_validate_addr)
1280 ret = ops->ndo_validate_addr(dev);
bada339b 1281
d314774c
SH
1282 if (!ret && ops->ndo_open)
1283 ret = ops->ndo_open(dev);
1da177e4 1284
66b5552f 1285 netpoll_poll_enable(dev);
ca99ca14 1286
bada339b
JG
1287 if (ret)
1288 clear_bit(__LINK_STATE_START, &dev->state);
1289 else {
1da177e4 1290 dev->flags |= IFF_UP;
4417da66 1291 dev_set_rx_mode(dev);
1da177e4 1292 dev_activate(dev);
7bf23575 1293 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 1294 }
bada339b 1295
1da177e4
LT
1296 return ret;
1297}
1298
1299/**
bd380811
PM
1300 * dev_open - prepare an interface for use.
1301 * @dev: device to open
1da177e4 1302 *
bd380811
PM
1303 * Takes a device from down to up state. The device's private open
1304 * function is invoked and then the multicast lists are loaded. Finally
1305 * the device is moved into the up state and a %NETDEV_UP message is
1306 * sent to the netdev notifier chain.
1307 *
1308 * Calling this function on an active interface is a nop. On a failure
1309 * a negative errno code is returned.
1da177e4 1310 */
bd380811
PM
1311int dev_open(struct net_device *dev)
1312{
1313 int ret;
1314
bd380811
PM
1315 if (dev->flags & IFF_UP)
1316 return 0;
1317
bd380811
PM
1318 ret = __dev_open(dev);
1319 if (ret < 0)
1320 return ret;
1321
7f294054 1322 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
bd380811
PM
1323 call_netdevice_notifiers(NETDEV_UP, dev);
1324
1325 return ret;
1326}
1327EXPORT_SYMBOL(dev_open);
1328
44345724 1329static int __dev_close_many(struct list_head *head)
1da177e4 1330{
44345724 1331 struct net_device *dev;
e46b66bc 1332
bd380811 1333 ASSERT_RTNL();
9d5010db
DM
1334 might_sleep();
1335
5cde2829 1336 list_for_each_entry(dev, head, close_list) {
3f4df206 1337 /* Temporarily disable netpoll until the interface is down */
66b5552f 1338 netpoll_poll_disable(dev);
3f4df206 1339
44345724 1340 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1341
44345724 1342 clear_bit(__LINK_STATE_START, &dev->state);
1da177e4 1343
44345724
OP
1344 /* Synchronize to scheduled poll. We cannot touch poll list, it
1345 * can be even on different cpu. So just clear netif_running().
1346 *
1347 * dev->stop() will invoke napi_disable() on all of it's
1348 * napi_struct instances on this device.
1349 */
4e857c58 1350 smp_mb__after_atomic(); /* Commit netif_running(). */
44345724 1351 }
1da177e4 1352
44345724 1353 dev_deactivate_many(head);
d8b2a4d2 1354
5cde2829 1355 list_for_each_entry(dev, head, close_list) {
44345724 1356 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4 1357
44345724
OP
1358 /*
1359 * Call the device specific close. This cannot fail.
1360 * Only if device is UP
1361 *
1362 * We allow it to be called even after a DETACH hot-plug
1363 * event.
1364 */
1365 if (ops->ndo_stop)
1366 ops->ndo_stop(dev);
1367
44345724 1368 dev->flags &= ~IFF_UP;
66b5552f 1369 netpoll_poll_enable(dev);
44345724
OP
1370 }
1371
1372 return 0;
1373}
1374
1375static int __dev_close(struct net_device *dev)
1376{
f87e6f47 1377 int retval;
44345724
OP
1378 LIST_HEAD(single);
1379
5cde2829 1380 list_add(&dev->close_list, &single);
f87e6f47
LT
1381 retval = __dev_close_many(&single);
1382 list_del(&single);
ca99ca14 1383
f87e6f47 1384 return retval;
44345724
OP
1385}
1386
3fbd8758 1387static int dev_close_many(struct list_head *head)
44345724
OP
1388{
1389 struct net_device *dev, *tmp;
1da177e4 1390
5cde2829
EB
1391 /* Remove the devices that don't need to be closed */
1392 list_for_each_entry_safe(dev, tmp, head, close_list)
44345724 1393 if (!(dev->flags & IFF_UP))
5cde2829 1394 list_del_init(&dev->close_list);
44345724
OP
1395
1396 __dev_close_many(head);
1da177e4 1397
5cde2829 1398 list_for_each_entry_safe(dev, tmp, head, close_list) {
7f294054 1399 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
44345724 1400 call_netdevice_notifiers(NETDEV_DOWN, dev);
5cde2829 1401 list_del_init(&dev->close_list);
44345724 1402 }
bd380811
PM
1403
1404 return 0;
1405}
1406
1407/**
1408 * dev_close - shutdown an interface.
1409 * @dev: device to shutdown
1410 *
1411 * This function moves an active device into down state. A
1412 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1413 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1414 * chain.
1415 */
1416int dev_close(struct net_device *dev)
1417{
e14a5993
ED
1418 if (dev->flags & IFF_UP) {
1419 LIST_HEAD(single);
1da177e4 1420
5cde2829 1421 list_add(&dev->close_list, &single);
e14a5993
ED
1422 dev_close_many(&single);
1423 list_del(&single);
1424 }
da6e378b 1425 return 0;
1da177e4 1426}
d1b19dff 1427EXPORT_SYMBOL(dev_close);
1da177e4
LT
1428
1429
0187bdfb
BH
1430/**
1431 * dev_disable_lro - disable Large Receive Offload on a device
1432 * @dev: device
1433 *
1434 * Disable Large Receive Offload (LRO) on a net device. Must be
1435 * called under RTNL. This is needed if received packets may be
1436 * forwarded to another interface.
1437 */
1438void dev_disable_lro(struct net_device *dev)
1439{
fbe168ba
MK
1440 struct net_device *lower_dev;
1441 struct list_head *iter;
529d0489 1442
bc5787c6
MM
1443 dev->wanted_features &= ~NETIF_F_LRO;
1444 netdev_update_features(dev);
27660515 1445
22d5969f
MM
1446 if (unlikely(dev->features & NETIF_F_LRO))
1447 netdev_WARN(dev, "failed to disable LRO!\n");
fbe168ba
MK
1448
1449 netdev_for_each_lower_dev(dev, lower_dev, iter)
1450 dev_disable_lro(lower_dev);
0187bdfb
BH
1451}
1452EXPORT_SYMBOL(dev_disable_lro);
1453
351638e7
JP
1454static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1455 struct net_device *dev)
1456{
1457 struct netdev_notifier_info info;
1458
1459 netdev_notifier_info_init(&info, dev);
1460 return nb->notifier_call(nb, val, &info);
1461}
0187bdfb 1462
881d966b
EB
1463static int dev_boot_phase = 1;
1464
1da177e4
LT
1465/**
1466 * register_netdevice_notifier - register a network notifier block
1467 * @nb: notifier
1468 *
1469 * Register a notifier to be called when network device events occur.
1470 * The notifier passed is linked into the kernel structures and must
1471 * not be reused until it has been unregistered. A negative errno code
1472 * is returned on a failure.
1473 *
1474 * When registered all registration and up events are replayed
4ec93edb 1475 * to the new notifier to allow device to have a race free
1da177e4
LT
1476 * view of the network device list.
1477 */
1478
1479int register_netdevice_notifier(struct notifier_block *nb)
1480{
1481 struct net_device *dev;
fcc5a03a 1482 struct net_device *last;
881d966b 1483 struct net *net;
1da177e4
LT
1484 int err;
1485
1486 rtnl_lock();
f07d5b94 1487 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1488 if (err)
1489 goto unlock;
881d966b
EB
1490 if (dev_boot_phase)
1491 goto unlock;
1492 for_each_net(net) {
1493 for_each_netdev(net, dev) {
351638e7 1494 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
881d966b
EB
1495 err = notifier_to_errno(err);
1496 if (err)
1497 goto rollback;
1498
1499 if (!(dev->flags & IFF_UP))
1500 continue;
1da177e4 1501
351638e7 1502 call_netdevice_notifier(nb, NETDEV_UP, dev);
881d966b 1503 }
1da177e4 1504 }
fcc5a03a
HX
1505
1506unlock:
1da177e4
LT
1507 rtnl_unlock();
1508 return err;
fcc5a03a
HX
1509
1510rollback:
1511 last = dev;
881d966b
EB
1512 for_each_net(net) {
1513 for_each_netdev(net, dev) {
1514 if (dev == last)
8f891489 1515 goto outroll;
fcc5a03a 1516
881d966b 1517 if (dev->flags & IFF_UP) {
351638e7
JP
1518 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1519 dev);
1520 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
881d966b 1521 }
351638e7 1522 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
fcc5a03a 1523 }
fcc5a03a 1524 }
c67625a1 1525
8f891489 1526outroll:
c67625a1 1527 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1528 goto unlock;
1da177e4 1529}
d1b19dff 1530EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1531
1532/**
1533 * unregister_netdevice_notifier - unregister a network notifier block
1534 * @nb: notifier
1535 *
1536 * Unregister a notifier previously registered by
1537 * register_netdevice_notifier(). The notifier is unlinked into the
1538 * kernel structures and may then be reused. A negative errno code
1539 * is returned on a failure.
7d3d43da
EB
1540 *
1541 * After unregistering unregister and down device events are synthesized
1542 * for all devices on the device list to the removed notifier to remove
1543 * the need for special case cleanup code.
1da177e4
LT
1544 */
1545
1546int unregister_netdevice_notifier(struct notifier_block *nb)
1547{
7d3d43da
EB
1548 struct net_device *dev;
1549 struct net *net;
9f514950
HX
1550 int err;
1551
1552 rtnl_lock();
f07d5b94 1553 err = raw_notifier_chain_unregister(&netdev_chain, nb);
7d3d43da
EB
1554 if (err)
1555 goto unlock;
1556
1557 for_each_net(net) {
1558 for_each_netdev(net, dev) {
1559 if (dev->flags & IFF_UP) {
351638e7
JP
1560 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1561 dev);
1562 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
7d3d43da 1563 }
351638e7 1564 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
7d3d43da
EB
1565 }
1566 }
1567unlock:
9f514950
HX
1568 rtnl_unlock();
1569 return err;
1da177e4 1570}
d1b19dff 1571EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4 1572
351638e7
JP
1573/**
1574 * call_netdevice_notifiers_info - call all network notifier blocks
1575 * @val: value passed unmodified to notifier function
1576 * @dev: net_device pointer passed unmodified to notifier function
1577 * @info: notifier information data
1578 *
1579 * Call all network notifier blocks. Parameters and return value
1580 * are as for raw_notifier_call_chain().
1581 */
1582
1d143d9f 1583static int call_netdevice_notifiers_info(unsigned long val,
1584 struct net_device *dev,
1585 struct netdev_notifier_info *info)
351638e7
JP
1586{
1587 ASSERT_RTNL();
1588 netdev_notifier_info_init(info, dev);
1589 return raw_notifier_call_chain(&netdev_chain, val, info);
1590}
351638e7 1591
1da177e4
LT
1592/**
1593 * call_netdevice_notifiers - call all network notifier blocks
1594 * @val: value passed unmodified to notifier function
c4ea43c5 1595 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1596 *
1597 * Call all network notifier blocks. Parameters and return value
f07d5b94 1598 * are as for raw_notifier_call_chain().
1da177e4
LT
1599 */
1600
ad7379d4 1601int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1602{
351638e7
JP
1603 struct netdev_notifier_info info;
1604
1605 return call_netdevice_notifiers_info(val, dev, &info);
1da177e4 1606}
edf947f1 1607EXPORT_SYMBOL(call_netdevice_notifiers);
1da177e4 1608
c5905afb 1609static struct static_key netstamp_needed __read_mostly;
b90e5794 1610#ifdef HAVE_JUMP_LABEL
c5905afb 1611/* We are not allowed to call static_key_slow_dec() from irq context
b90e5794 1612 * If net_disable_timestamp() is called from irq context, defer the
c5905afb 1613 * static_key_slow_dec() calls.
b90e5794
ED
1614 */
1615static atomic_t netstamp_needed_deferred;
1616#endif
1da177e4
LT
1617
1618void net_enable_timestamp(void)
1619{
b90e5794
ED
1620#ifdef HAVE_JUMP_LABEL
1621 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1622
1623 if (deferred) {
1624 while (--deferred)
c5905afb 1625 static_key_slow_dec(&netstamp_needed);
b90e5794
ED
1626 return;
1627 }
1628#endif
c5905afb 1629 static_key_slow_inc(&netstamp_needed);
1da177e4 1630}
d1b19dff 1631EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1632
1633void net_disable_timestamp(void)
1634{
b90e5794
ED
1635#ifdef HAVE_JUMP_LABEL
1636 if (in_interrupt()) {
1637 atomic_inc(&netstamp_needed_deferred);
1638 return;
1639 }
1640#endif
c5905afb 1641 static_key_slow_dec(&netstamp_needed);
1da177e4 1642}
d1b19dff 1643EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1644
3b098e2d 1645static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4 1646{
588f0330 1647 skb->tstamp.tv64 = 0;
c5905afb 1648 if (static_key_false(&netstamp_needed))
a61bbcf2 1649 __net_timestamp(skb);
1da177e4
LT
1650}
1651
588f0330 1652#define net_timestamp_check(COND, SKB) \
c5905afb 1653 if (static_key_false(&netstamp_needed)) { \
588f0330
ED
1654 if ((COND) && !(SKB)->tstamp.tv64) \
1655 __net_timestamp(SKB); \
1656 } \
3b098e2d 1657
1ee481fb 1658bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
79b569f0
DL
1659{
1660 unsigned int len;
1661
1662 if (!(dev->flags & IFF_UP))
1663 return false;
1664
1665 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1666 if (skb->len <= len)
1667 return true;
1668
1669 /* if TSO is enabled, we don't care about the length as the packet
1670 * could be forwarded without being segmented before
1671 */
1672 if (skb_is_gso(skb))
1673 return true;
1674
1675 return false;
1676}
1ee481fb 1677EXPORT_SYMBOL_GPL(is_skb_forwardable);
79b569f0 1678
a0265d28
HX
1679int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1680{
1681 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1682 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1683 atomic_long_inc(&dev->rx_dropped);
1684 kfree_skb(skb);
1685 return NET_RX_DROP;
1686 }
1687 }
1688
1689 if (unlikely(!is_skb_forwardable(dev, skb))) {
1690 atomic_long_inc(&dev->rx_dropped);
1691 kfree_skb(skb);
1692 return NET_RX_DROP;
1693 }
1694
1695 skb_scrub_packet(skb, true);
1696 skb->protocol = eth_type_trans(skb, dev);
1697
1698 return 0;
1699}
1700EXPORT_SYMBOL_GPL(__dev_forward_skb);
1701
44540960
AB
1702/**
1703 * dev_forward_skb - loopback an skb to another netif
1704 *
1705 * @dev: destination network device
1706 * @skb: buffer to forward
1707 *
1708 * return values:
1709 * NET_RX_SUCCESS (no congestion)
6ec82562 1710 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
1711 *
1712 * dev_forward_skb can be used for injecting an skb from the
1713 * start_xmit function of one device into the receive queue
1714 * of another device.
1715 *
1716 * The receiving device may be in another namespace, so
1717 * we have to clear all information in the skb that could
1718 * impact namespace isolation.
1719 */
1720int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1721{
a0265d28 1722 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
44540960
AB
1723}
1724EXPORT_SYMBOL_GPL(dev_forward_skb);
1725
71d9dec2
CG
1726static inline int deliver_skb(struct sk_buff *skb,
1727 struct packet_type *pt_prev,
1728 struct net_device *orig_dev)
1729{
1080e512
MT
1730 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1731 return -ENOMEM;
71d9dec2
CG
1732 atomic_inc(&skb->users);
1733 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1734}
1735
c0de08d0
EL
1736static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1737{
a3d744e9 1738 if (!ptype->af_packet_priv || !skb->sk)
c0de08d0
EL
1739 return false;
1740
1741 if (ptype->id_match)
1742 return ptype->id_match(ptype, skb->sk);
1743 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1744 return true;
1745
1746 return false;
1747}
1748
1da177e4
LT
1749/*
1750 * Support routine. Sends outgoing frames to any network
1751 * taps currently in use.
1752 */
1753
f6a78bfc 1754static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1755{
1756 struct packet_type *ptype;
71d9dec2
CG
1757 struct sk_buff *skb2 = NULL;
1758 struct packet_type *pt_prev = NULL;
a61bbcf2 1759
1da177e4
LT
1760 rcu_read_lock();
1761 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1762 /* Never send packets back to the socket
1763 * they originated from - MvS (miquels@drinkel.ow.org)
1764 */
1765 if ((ptype->dev == dev || !ptype->dev) &&
c0de08d0 1766 (!skb_loop_sk(ptype, skb))) {
71d9dec2
CG
1767 if (pt_prev) {
1768 deliver_skb(skb2, pt_prev, skb->dev);
1769 pt_prev = ptype;
1770 continue;
1771 }
1772
1773 skb2 = skb_clone(skb, GFP_ATOMIC);
1da177e4
LT
1774 if (!skb2)
1775 break;
1776
70978182
ED
1777 net_timestamp_set(skb2);
1778
1da177e4
LT
1779 /* skb->nh should be correctly
1780 set by sender, so that the second statement is
1781 just protection against buggy protocols.
1782 */
459a98ed 1783 skb_reset_mac_header(skb2);
1da177e4 1784
d56f90a7 1785 if (skb_network_header(skb2) < skb2->data ||
ced14f68 1786 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
e87cc472
JP
1787 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1788 ntohs(skb2->protocol),
1789 dev->name);
c1d2bbe1 1790 skb_reset_network_header(skb2);
1da177e4
LT
1791 }
1792
b0e380b1 1793 skb2->transport_header = skb2->network_header;
1da177e4 1794 skb2->pkt_type = PACKET_OUTGOING;
71d9dec2 1795 pt_prev = ptype;
1da177e4
LT
1796 }
1797 }
71d9dec2
CG
1798 if (pt_prev)
1799 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1da177e4
LT
1800 rcu_read_unlock();
1801}
1802
2c53040f
BH
1803/**
1804 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
4f57c087
JF
1805 * @dev: Network device
1806 * @txq: number of queues available
1807 *
1808 * If real_num_tx_queues is changed the tc mappings may no longer be
1809 * valid. To resolve this verify the tc mapping remains valid and if
1810 * not NULL the mapping. With no priorities mapping to this
1811 * offset/count pair it will no longer be used. In the worst case TC0
1812 * is invalid nothing can be done so disable priority mappings. If is
1813 * expected that drivers will fix this mapping if they can before
1814 * calling netif_set_real_num_tx_queues.
1815 */
bb134d22 1816static void netif_setup_tc(struct net_device *dev, unsigned int txq)
4f57c087
JF
1817{
1818 int i;
1819 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1820
1821 /* If TC0 is invalidated disable TC mapping */
1822 if (tc->offset + tc->count > txq) {
7b6cd1ce 1823 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
4f57c087
JF
1824 dev->num_tc = 0;
1825 return;
1826 }
1827
1828 /* Invalidated prio to tc mappings set to TC0 */
1829 for (i = 1; i < TC_BITMASK + 1; i++) {
1830 int q = netdev_get_prio_tc_map(dev, i);
1831
1832 tc = &dev->tc_to_txq[q];
1833 if (tc->offset + tc->count > txq) {
7b6cd1ce
JP
1834 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1835 i, q);
4f57c087
JF
1836 netdev_set_prio_tc_map(dev, i, 0);
1837 }
1838 }
1839}
1840
537c00de
AD
1841#ifdef CONFIG_XPS
1842static DEFINE_MUTEX(xps_map_mutex);
1843#define xmap_dereference(P) \
1844 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1845
10cdc3f3
AD
1846static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1847 int cpu, u16 index)
537c00de 1848{
10cdc3f3
AD
1849 struct xps_map *map = NULL;
1850 int pos;
537c00de 1851
10cdc3f3
AD
1852 if (dev_maps)
1853 map = xmap_dereference(dev_maps->cpu_map[cpu]);
537c00de 1854
10cdc3f3
AD
1855 for (pos = 0; map && pos < map->len; pos++) {
1856 if (map->queues[pos] == index) {
537c00de
AD
1857 if (map->len > 1) {
1858 map->queues[pos] = map->queues[--map->len];
1859 } else {
10cdc3f3 1860 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
537c00de
AD
1861 kfree_rcu(map, rcu);
1862 map = NULL;
1863 }
10cdc3f3 1864 break;
537c00de 1865 }
537c00de
AD
1866 }
1867
10cdc3f3
AD
1868 return map;
1869}
1870
024e9679 1871static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
10cdc3f3
AD
1872{
1873 struct xps_dev_maps *dev_maps;
024e9679 1874 int cpu, i;
10cdc3f3
AD
1875 bool active = false;
1876
1877 mutex_lock(&xps_map_mutex);
1878 dev_maps = xmap_dereference(dev->xps_maps);
1879
1880 if (!dev_maps)
1881 goto out_no_maps;
1882
1883 for_each_possible_cpu(cpu) {
024e9679
AD
1884 for (i = index; i < dev->num_tx_queues; i++) {
1885 if (!remove_xps_queue(dev_maps, cpu, i))
1886 break;
1887 }
1888 if (i == dev->num_tx_queues)
10cdc3f3
AD
1889 active = true;
1890 }
1891
1892 if (!active) {
537c00de
AD
1893 RCU_INIT_POINTER(dev->xps_maps, NULL);
1894 kfree_rcu(dev_maps, rcu);
1895 }
1896
024e9679
AD
1897 for (i = index; i < dev->num_tx_queues; i++)
1898 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1899 NUMA_NO_NODE);
1900
537c00de
AD
1901out_no_maps:
1902 mutex_unlock(&xps_map_mutex);
1903}
1904
01c5f864
AD
1905static struct xps_map *expand_xps_map(struct xps_map *map,
1906 int cpu, u16 index)
1907{
1908 struct xps_map *new_map;
1909 int alloc_len = XPS_MIN_MAP_ALLOC;
1910 int i, pos;
1911
1912 for (pos = 0; map && pos < map->len; pos++) {
1913 if (map->queues[pos] != index)
1914 continue;
1915 return map;
1916 }
1917
1918 /* Need to add queue to this CPU's existing map */
1919 if (map) {
1920 if (pos < map->alloc_len)
1921 return map;
1922
1923 alloc_len = map->alloc_len * 2;
1924 }
1925
1926 /* Need to allocate new map to store queue on this CPU's map */
1927 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1928 cpu_to_node(cpu));
1929 if (!new_map)
1930 return NULL;
1931
1932 for (i = 0; i < pos; i++)
1933 new_map->queues[i] = map->queues[i];
1934 new_map->alloc_len = alloc_len;
1935 new_map->len = pos;
1936
1937 return new_map;
1938}
1939
3573540c
MT
1940int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
1941 u16 index)
537c00de 1942{
01c5f864 1943 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
537c00de 1944 struct xps_map *map, *new_map;
537c00de 1945 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
01c5f864
AD
1946 int cpu, numa_node_id = -2;
1947 bool active = false;
537c00de
AD
1948
1949 mutex_lock(&xps_map_mutex);
1950
1951 dev_maps = xmap_dereference(dev->xps_maps);
1952
01c5f864
AD
1953 /* allocate memory for queue storage */
1954 for_each_online_cpu(cpu) {
1955 if (!cpumask_test_cpu(cpu, mask))
1956 continue;
1957
1958 if (!new_dev_maps)
1959 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2bb60cb9
AD
1960 if (!new_dev_maps) {
1961 mutex_unlock(&xps_map_mutex);
01c5f864 1962 return -ENOMEM;
2bb60cb9 1963 }
01c5f864
AD
1964
1965 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1966 NULL;
1967
1968 map = expand_xps_map(map, cpu, index);
1969 if (!map)
1970 goto error;
1971
1972 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1973 }
1974
1975 if (!new_dev_maps)
1976 goto out_no_new_maps;
1977
537c00de 1978 for_each_possible_cpu(cpu) {
01c5f864
AD
1979 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1980 /* add queue to CPU maps */
1981 int pos = 0;
1982
1983 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1984 while ((pos < map->len) && (map->queues[pos] != index))
1985 pos++;
1986
1987 if (pos == map->len)
1988 map->queues[map->len++] = index;
537c00de 1989#ifdef CONFIG_NUMA
537c00de
AD
1990 if (numa_node_id == -2)
1991 numa_node_id = cpu_to_node(cpu);
1992 else if (numa_node_id != cpu_to_node(cpu))
1993 numa_node_id = -1;
537c00de 1994#endif
01c5f864
AD
1995 } else if (dev_maps) {
1996 /* fill in the new device map from the old device map */
1997 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1998 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
537c00de 1999 }
01c5f864 2000
537c00de
AD
2001 }
2002
01c5f864
AD
2003 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2004
537c00de 2005 /* Cleanup old maps */
01c5f864
AD
2006 if (dev_maps) {
2007 for_each_possible_cpu(cpu) {
2008 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2009 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2010 if (map && map != new_map)
2011 kfree_rcu(map, rcu);
2012 }
537c00de 2013
01c5f864 2014 kfree_rcu(dev_maps, rcu);
537c00de
AD
2015 }
2016
01c5f864
AD
2017 dev_maps = new_dev_maps;
2018 active = true;
537c00de 2019
01c5f864
AD
2020out_no_new_maps:
2021 /* update Tx queue numa node */
537c00de
AD
2022 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2023 (numa_node_id >= 0) ? numa_node_id :
2024 NUMA_NO_NODE);
2025
01c5f864
AD
2026 if (!dev_maps)
2027 goto out_no_maps;
2028
2029 /* removes queue from unused CPUs */
2030 for_each_possible_cpu(cpu) {
2031 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2032 continue;
2033
2034 if (remove_xps_queue(dev_maps, cpu, index))
2035 active = true;
2036 }
2037
2038 /* free map if not active */
2039 if (!active) {
2040 RCU_INIT_POINTER(dev->xps_maps, NULL);
2041 kfree_rcu(dev_maps, rcu);
2042 }
2043
2044out_no_maps:
537c00de
AD
2045 mutex_unlock(&xps_map_mutex);
2046
2047 return 0;
2048error:
01c5f864
AD
2049 /* remove any maps that we added */
2050 for_each_possible_cpu(cpu) {
2051 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2052 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2053 NULL;
2054 if (new_map && new_map != map)
2055 kfree(new_map);
2056 }
2057
537c00de
AD
2058 mutex_unlock(&xps_map_mutex);
2059
537c00de
AD
2060 kfree(new_dev_maps);
2061 return -ENOMEM;
2062}
2063EXPORT_SYMBOL(netif_set_xps_queue);
2064
2065#endif
f0796d5c
JF
2066/*
2067 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2068 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2069 */
e6484930 2070int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
f0796d5c 2071{
1d24eb48
TH
2072 int rc;
2073
e6484930
TH
2074 if (txq < 1 || txq > dev->num_tx_queues)
2075 return -EINVAL;
f0796d5c 2076
5c56580b
BH
2077 if (dev->reg_state == NETREG_REGISTERED ||
2078 dev->reg_state == NETREG_UNREGISTERING) {
e6484930
TH
2079 ASSERT_RTNL();
2080
1d24eb48
TH
2081 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2082 txq);
bf264145
TH
2083 if (rc)
2084 return rc;
2085
4f57c087
JF
2086 if (dev->num_tc)
2087 netif_setup_tc(dev, txq);
2088
024e9679 2089 if (txq < dev->real_num_tx_queues) {
e6484930 2090 qdisc_reset_all_tx_gt(dev, txq);
024e9679
AD
2091#ifdef CONFIG_XPS
2092 netif_reset_xps_queues_gt(dev, txq);
2093#endif
2094 }
f0796d5c 2095 }
e6484930
TH
2096
2097 dev->real_num_tx_queues = txq;
2098 return 0;
f0796d5c
JF
2099}
2100EXPORT_SYMBOL(netif_set_real_num_tx_queues);
56079431 2101
a953be53 2102#ifdef CONFIG_SYSFS
62fe0b40
BH
2103/**
2104 * netif_set_real_num_rx_queues - set actual number of RX queues used
2105 * @dev: Network device
2106 * @rxq: Actual number of RX queues
2107 *
2108 * This must be called either with the rtnl_lock held or before
2109 * registration of the net device. Returns 0 on success, or a
4e7f7951
BH
2110 * negative error code. If called before registration, it always
2111 * succeeds.
62fe0b40
BH
2112 */
2113int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2114{
2115 int rc;
2116
bd25fa7b
TH
2117 if (rxq < 1 || rxq > dev->num_rx_queues)
2118 return -EINVAL;
2119
62fe0b40
BH
2120 if (dev->reg_state == NETREG_REGISTERED) {
2121 ASSERT_RTNL();
2122
62fe0b40
BH
2123 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2124 rxq);
2125 if (rc)
2126 return rc;
62fe0b40
BH
2127 }
2128
2129 dev->real_num_rx_queues = rxq;
2130 return 0;
2131}
2132EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2133#endif
2134
2c53040f
BH
2135/**
2136 * netif_get_num_default_rss_queues - default number of RSS queues
16917b87
YM
2137 *
2138 * This routine should set an upper limit on the number of RSS queues
2139 * used by default by multiqueue devices.
2140 */
a55b138b 2141int netif_get_num_default_rss_queues(void)
16917b87
YM
2142{
2143 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2144}
2145EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2146
def82a1d 2147static inline void __netif_reschedule(struct Qdisc *q)
56079431 2148{
def82a1d
JP
2149 struct softnet_data *sd;
2150 unsigned long flags;
56079431 2151
def82a1d 2152 local_irq_save(flags);
903ceff7 2153 sd = this_cpu_ptr(&softnet_data);
a9cbd588
CG
2154 q->next_sched = NULL;
2155 *sd->output_queue_tailp = q;
2156 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
2157 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2158 local_irq_restore(flags);
2159}
2160
2161void __netif_schedule(struct Qdisc *q)
2162{
2163 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2164 __netif_reschedule(q);
56079431
DV
2165}
2166EXPORT_SYMBOL(__netif_schedule);
2167
e6247027
ED
2168struct dev_kfree_skb_cb {
2169 enum skb_free_reason reason;
2170};
2171
2172static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
56079431 2173{
e6247027
ED
2174 return (struct dev_kfree_skb_cb *)skb->cb;
2175}
2176
46e5da40
JF
2177void netif_schedule_queue(struct netdev_queue *txq)
2178{
2179 rcu_read_lock();
2180 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2181 struct Qdisc *q = rcu_dereference(txq->qdisc);
2182
2183 __netif_schedule(q);
2184 }
2185 rcu_read_unlock();
2186}
2187EXPORT_SYMBOL(netif_schedule_queue);
2188
2189/**
2190 * netif_wake_subqueue - allow sending packets on subqueue
2191 * @dev: network device
2192 * @queue_index: sub queue index
2193 *
2194 * Resume individual transmit queue of a device with multiple transmit queues.
2195 */
2196void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2197{
2198 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2199
2200 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
2201 struct Qdisc *q;
2202
2203 rcu_read_lock();
2204 q = rcu_dereference(txq->qdisc);
2205 __netif_schedule(q);
2206 rcu_read_unlock();
2207 }
2208}
2209EXPORT_SYMBOL(netif_wake_subqueue);
2210
2211void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2212{
2213 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2214 struct Qdisc *q;
2215
2216 rcu_read_lock();
2217 q = rcu_dereference(dev_queue->qdisc);
2218 __netif_schedule(q);
2219 rcu_read_unlock();
2220 }
2221}
2222EXPORT_SYMBOL(netif_tx_wake_queue);
2223
e6247027 2224void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
56079431 2225{
e6247027 2226 unsigned long flags;
56079431 2227
e6247027
ED
2228 if (likely(atomic_read(&skb->users) == 1)) {
2229 smp_rmb();
2230 atomic_set(&skb->users, 0);
2231 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2232 return;
bea3348e 2233 }
e6247027
ED
2234 get_kfree_skb_cb(skb)->reason = reason;
2235 local_irq_save(flags);
2236 skb->next = __this_cpu_read(softnet_data.completion_queue);
2237 __this_cpu_write(softnet_data.completion_queue, skb);
2238 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2239 local_irq_restore(flags);
56079431 2240}
e6247027 2241EXPORT_SYMBOL(__dev_kfree_skb_irq);
56079431 2242
e6247027 2243void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
56079431
DV
2244{
2245 if (in_irq() || irqs_disabled())
e6247027 2246 __dev_kfree_skb_irq(skb, reason);
56079431
DV
2247 else
2248 dev_kfree_skb(skb);
2249}
e6247027 2250EXPORT_SYMBOL(__dev_kfree_skb_any);
56079431
DV
2251
2252
bea3348e
SH
2253/**
2254 * netif_device_detach - mark device as removed
2255 * @dev: network device
2256 *
2257 * Mark device as removed from system and therefore no longer available.
2258 */
56079431
DV
2259void netif_device_detach(struct net_device *dev)
2260{
2261 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2262 netif_running(dev)) {
d543103a 2263 netif_tx_stop_all_queues(dev);
56079431
DV
2264 }
2265}
2266EXPORT_SYMBOL(netif_device_detach);
2267
bea3348e
SH
2268/**
2269 * netif_device_attach - mark device as attached
2270 * @dev: network device
2271 *
2272 * Mark device as attached from system and restart if needed.
2273 */
56079431
DV
2274void netif_device_attach(struct net_device *dev)
2275{
2276 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2277 netif_running(dev)) {
d543103a 2278 netif_tx_wake_all_queues(dev);
4ec93edb 2279 __netdev_watchdog_up(dev);
56079431
DV
2280 }
2281}
2282EXPORT_SYMBOL(netif_device_attach);
2283
36c92474
BH
2284static void skb_warn_bad_offload(const struct sk_buff *skb)
2285{
65e9d2fa 2286 static const netdev_features_t null_features = 0;
36c92474
BH
2287 struct net_device *dev = skb->dev;
2288 const char *driver = "";
2289
c846ad9b
BG
2290 if (!net_ratelimit())
2291 return;
2292
36c92474
BH
2293 if (dev && dev->dev.parent)
2294 driver = dev_driver_string(dev->dev.parent);
2295
2296 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2297 "gso_type=%d ip_summed=%d\n",
65e9d2fa
MM
2298 driver, dev ? &dev->features : &null_features,
2299 skb->sk ? &skb->sk->sk_route_caps : &null_features,
36c92474
BH
2300 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2301 skb_shinfo(skb)->gso_type, skb->ip_summed);
2302}
2303
1da177e4
LT
2304/*
2305 * Invalidate hardware checksum when packet is to be mangled, and
2306 * complete checksum manually on outgoing path.
2307 */
84fa7933 2308int skb_checksum_help(struct sk_buff *skb)
1da177e4 2309{
d3bc23e7 2310 __wsum csum;
663ead3b 2311 int ret = 0, offset;
1da177e4 2312
84fa7933 2313 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
2314 goto out_set_summed;
2315
2316 if (unlikely(skb_shinfo(skb)->gso_size)) {
36c92474
BH
2317 skb_warn_bad_offload(skb);
2318 return -EINVAL;
1da177e4
LT
2319 }
2320
cef401de
ED
2321 /* Before computing a checksum, we should make sure no frag could
2322 * be modified by an external entity : checksum could be wrong.
2323 */
2324 if (skb_has_shared_frag(skb)) {
2325 ret = __skb_linearize(skb);
2326 if (ret)
2327 goto out;
2328 }
2329
55508d60 2330 offset = skb_checksum_start_offset(skb);
a030847e
HX
2331 BUG_ON(offset >= skb_headlen(skb));
2332 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2333
2334 offset += skb->csum_offset;
2335 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2336
2337 if (skb_cloned(skb) &&
2338 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
2339 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2340 if (ret)
2341 goto out;
2342 }
2343
a030847e 2344 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
a430a43d 2345out_set_summed:
1da177e4 2346 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 2347out:
1da177e4
LT
2348 return ret;
2349}
d1b19dff 2350EXPORT_SYMBOL(skb_checksum_help);
1da177e4 2351
53d6471c 2352__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
f6a78bfc 2353{
4b9b1cdf 2354 unsigned int vlan_depth = skb->mac_len;
252e3346 2355 __be16 type = skb->protocol;
f6a78bfc 2356
19acc327
PS
2357 /* Tunnel gso handlers can set protocol to ethernet. */
2358 if (type == htons(ETH_P_TEB)) {
2359 struct ethhdr *eth;
2360
2361 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2362 return 0;
2363
2364 eth = (struct ethhdr *)skb_mac_header(skb);
2365 type = eth->h_proto;
2366 }
2367
4b9b1cdf
NA
2368 /* if skb->protocol is 802.1Q/AD then the header should already be
2369 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
2370 * ETH_HLEN otherwise
2371 */
2372 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
2373 if (vlan_depth) {
80019d31 2374 if (WARN_ON(vlan_depth < VLAN_HLEN))
4b9b1cdf
NA
2375 return 0;
2376 vlan_depth -= VLAN_HLEN;
2377 } else {
2378 vlan_depth = ETH_HLEN;
2379 }
2380 do {
2381 struct vlan_hdr *vh;
2382
2383 if (unlikely(!pskb_may_pull(skb,
2384 vlan_depth + VLAN_HLEN)))
2385 return 0;
2386
2387 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2388 type = vh->h_vlan_encapsulated_proto;
2389 vlan_depth += VLAN_HLEN;
2390 } while (type == htons(ETH_P_8021Q) ||
2391 type == htons(ETH_P_8021AD));
7b9c6090
JG
2392 }
2393
53d6471c
VY
2394 *depth = vlan_depth;
2395
ec5f0615
PS
2396 return type;
2397}
2398
2399/**
2400 * skb_mac_gso_segment - mac layer segmentation handler.
2401 * @skb: buffer to segment
2402 * @features: features for the output path (see dev->features)
2403 */
2404struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2405 netdev_features_t features)
2406{
2407 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2408 struct packet_offload *ptype;
53d6471c
VY
2409 int vlan_depth = skb->mac_len;
2410 __be16 type = skb_network_protocol(skb, &vlan_depth);
ec5f0615
PS
2411
2412 if (unlikely(!type))
2413 return ERR_PTR(-EINVAL);
2414
53d6471c 2415 __skb_pull(skb, vlan_depth);
f6a78bfc
HX
2416
2417 rcu_read_lock();
22061d80 2418 list_for_each_entry_rcu(ptype, &offload_base, list) {
f191a1d1 2419 if (ptype->type == type && ptype->callbacks.gso_segment) {
f191a1d1 2420 segs = ptype->callbacks.gso_segment(skb, features);
f6a78bfc
HX
2421 break;
2422 }
2423 }
2424 rcu_read_unlock();
2425
98e399f8 2426 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 2427
f6a78bfc
HX
2428 return segs;
2429}
05e8ef4a
PS
2430EXPORT_SYMBOL(skb_mac_gso_segment);
2431
2432
2433/* openvswitch calls this on rx path, so we need a different check.
2434 */
2435static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2436{
2437 if (tx_path)
2438 return skb->ip_summed != CHECKSUM_PARTIAL;
2439 else
2440 return skb->ip_summed == CHECKSUM_NONE;
2441}
2442
2443/**
2444 * __skb_gso_segment - Perform segmentation on skb.
2445 * @skb: buffer to segment
2446 * @features: features for the output path (see dev->features)
2447 * @tx_path: whether it is called in TX path
2448 *
2449 * This function segments the given skb and returns a list of segments.
2450 *
2451 * It may return NULL if the skb requires no segmentation. This is
2452 * only possible when GSO is used for verifying header integrity.
2453 */
2454struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2455 netdev_features_t features, bool tx_path)
2456{
2457 if (unlikely(skb_needs_check(skb, tx_path))) {
2458 int err;
2459
2460 skb_warn_bad_offload(skb);
2461
a40e0a66 2462 err = skb_cow_head(skb, 0);
2463 if (err < 0)
05e8ef4a
PS
2464 return ERR_PTR(err);
2465 }
2466
68c33163 2467 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3347c960
ED
2468 SKB_GSO_CB(skb)->encap_level = 0;
2469
05e8ef4a
PS
2470 skb_reset_mac_header(skb);
2471 skb_reset_mac_len(skb);
2472
2473 return skb_mac_gso_segment(skb, features);
2474}
12b0004d 2475EXPORT_SYMBOL(__skb_gso_segment);
f6a78bfc 2476
fb286bb2
HX
2477/* Take action when hardware reception checksum errors are detected. */
2478#ifdef CONFIG_BUG
2479void netdev_rx_csum_fault(struct net_device *dev)
2480{
2481 if (net_ratelimit()) {
7b6cd1ce 2482 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
fb286bb2
HX
2483 dump_stack();
2484 }
2485}
2486EXPORT_SYMBOL(netdev_rx_csum_fault);
2487#endif
2488
1da177e4
LT
2489/* Actually, we should eliminate this check as soon as we know, that:
2490 * 1. IOMMU is present and allows to map all the memory.
2491 * 2. No high memory really exists on this machine.
2492 */
2493
c1e756bf 2494static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 2495{
3d3a8533 2496#ifdef CONFIG_HIGHMEM
1da177e4 2497 int i;
5acbbd42 2498 if (!(dev->features & NETIF_F_HIGHDMA)) {
ea2ab693
IC
2499 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2500 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2501 if (PageHighMem(skb_frag_page(frag)))
5acbbd42 2502 return 1;
ea2ab693 2503 }
5acbbd42 2504 }
1da177e4 2505
5acbbd42
FT
2506 if (PCI_DMA_BUS_IS_PHYS) {
2507 struct device *pdev = dev->dev.parent;
1da177e4 2508
9092c658
ED
2509 if (!pdev)
2510 return 0;
5acbbd42 2511 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
ea2ab693
IC
2512 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2513 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
5acbbd42
FT
2514 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2515 return 1;
2516 }
2517 }
3d3a8533 2518#endif
1da177e4
LT
2519 return 0;
2520}
1da177e4 2521
3b392ddb
SH
2522/* If MPLS offload request, verify we are testing hardware MPLS features
2523 * instead of standard features for the netdev.
2524 */
d0edc7bf 2525#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3b392ddb
SH
2526static netdev_features_t net_mpls_features(struct sk_buff *skb,
2527 netdev_features_t features,
2528 __be16 type)
2529{
25cd9ba0 2530 if (eth_p_mpls(type))
3b392ddb
SH
2531 features &= skb->dev->mpls_features;
2532
2533 return features;
2534}
2535#else
2536static netdev_features_t net_mpls_features(struct sk_buff *skb,
2537 netdev_features_t features,
2538 __be16 type)
2539{
2540 return features;
2541}
2542#endif
2543
c8f44aff 2544static netdev_features_t harmonize_features(struct sk_buff *skb,
c1e756bf 2545 netdev_features_t features)
f01a5236 2546{
53d6471c 2547 int tmp;
3b392ddb
SH
2548 __be16 type;
2549
2550 type = skb_network_protocol(skb, &tmp);
2551 features = net_mpls_features(skb, features, type);
53d6471c 2552
c0d680e5 2553 if (skb->ip_summed != CHECKSUM_NONE &&
3b392ddb 2554 !can_checksum_protocol(features, type)) {
f01a5236 2555 features &= ~NETIF_F_ALL_CSUM;
c1e756bf 2556 } else if (illegal_highdma(skb->dev, skb)) {
f01a5236
JG
2557 features &= ~NETIF_F_SG;
2558 }
2559
2560 return features;
2561}
2562
c1e756bf 2563netdev_features_t netif_skb_features(struct sk_buff *skb)
58e998c6 2564{
fcbeb976
ED
2565 const struct net_device *dev = skb->dev;
2566 netdev_features_t features = dev->features;
2567 u16 gso_segs = skb_shinfo(skb)->gso_segs;
58e998c6
JG
2568 __be16 protocol = skb->protocol;
2569
fcbeb976 2570 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
30b678d8
BH
2571 features &= ~NETIF_F_GSO_MASK;
2572
8ad227ff 2573 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
58e998c6
JG
2574 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2575 protocol = veh->h_vlan_encapsulated_proto;
f01a5236 2576 } else if (!vlan_tx_tag_present(skb)) {
c1e756bf 2577 return harmonize_features(skb, features);
f01a5236 2578 }
58e998c6 2579
db115037 2580 features = netdev_intersect_features(features,
fcbeb976 2581 dev->vlan_features |
db115037
MK
2582 NETIF_F_HW_VLAN_CTAG_TX |
2583 NETIF_F_HW_VLAN_STAG_TX);
f01a5236 2584
cdbaa0bb 2585 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
db115037
MK
2586 features = netdev_intersect_features(features,
2587 NETIF_F_SG |
2588 NETIF_F_HIGHDMA |
2589 NETIF_F_FRAGLIST |
2590 NETIF_F_GEN_CSUM |
2591 NETIF_F_HW_VLAN_CTAG_TX |
2592 NETIF_F_HW_VLAN_STAG_TX);
cdbaa0bb 2593
c1e756bf 2594 return harmonize_features(skb, features);
58e998c6 2595}
c1e756bf 2596EXPORT_SYMBOL(netif_skb_features);
58e998c6 2597
2ea25513 2598static int xmit_one(struct sk_buff *skb, struct net_device *dev,
95f6b3dd 2599 struct netdev_queue *txq, bool more)
f6a78bfc 2600{
2ea25513
DM
2601 unsigned int len;
2602 int rc;
00829823 2603
2ea25513
DM
2604 if (!list_empty(&ptype_all))
2605 dev_queue_xmit_nit(skb, dev);
fc741216 2606
2ea25513
DM
2607 len = skb->len;
2608 trace_net_dev_start_xmit(skb, dev);
95f6b3dd 2609 rc = netdev_start_xmit(skb, dev, txq, more);
2ea25513 2610 trace_net_dev_xmit(skb, rc, dev, len);
adf30907 2611
2ea25513
DM
2612 return rc;
2613}
7b9c6090 2614
8dcda22a
DM
2615struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2616 struct netdev_queue *txq, int *ret)
7f2e870f
DM
2617{
2618 struct sk_buff *skb = first;
2619 int rc = NETDEV_TX_OK;
7b9c6090 2620
7f2e870f
DM
2621 while (skb) {
2622 struct sk_buff *next = skb->next;
fc70fb64 2623
7f2e870f 2624 skb->next = NULL;
95f6b3dd 2625 rc = xmit_one(skb, dev, txq, next != NULL);
7f2e870f
DM
2626 if (unlikely(!dev_xmit_complete(rc))) {
2627 skb->next = next;
2628 goto out;
2629 }
6afff0ca 2630
7f2e870f
DM
2631 skb = next;
2632 if (netif_xmit_stopped(txq) && skb) {
2633 rc = NETDEV_TX_BUSY;
2634 break;
9ccb8975 2635 }
7f2e870f 2636 }
9ccb8975 2637
7f2e870f
DM
2638out:
2639 *ret = rc;
2640 return skb;
2641}
b40863c6 2642
1ff0dc94
ED
2643static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2644 netdev_features_t features)
f6a78bfc 2645{
eae3f88e 2646 if (vlan_tx_tag_present(skb) &&
5968250c
JP
2647 !vlan_hw_offload_capable(features, skb->vlan_proto))
2648 skb = __vlan_hwaccel_push_inside(skb);
eae3f88e
DM
2649 return skb;
2650}
f6a78bfc 2651
55a93b3e 2652static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
eae3f88e
DM
2653{
2654 netdev_features_t features;
f6a78bfc 2655
eae3f88e
DM
2656 if (skb->next)
2657 return skb;
068a2de5 2658
eae3f88e
DM
2659 features = netif_skb_features(skb);
2660 skb = validate_xmit_vlan(skb, features);
2661 if (unlikely(!skb))
2662 goto out_null;
7b9c6090 2663
eae3f88e
DM
2664 /* If encapsulation offload request, verify we are testing
2665 * hardware encapsulation features instead of standard
2666 * features for the netdev
2667 */
2668 if (skb->encapsulation)
2669 features &= dev->hw_enc_features;
2670
04ffcb25 2671 if (netif_needs_gso(dev, skb, features)) {
ce93718f
DM
2672 struct sk_buff *segs;
2673
2674 segs = skb_gso_segment(skb, features);
cecda693 2675 if (IS_ERR(segs)) {
af6dabc9 2676 goto out_kfree_skb;
cecda693
JW
2677 } else if (segs) {
2678 consume_skb(skb);
2679 skb = segs;
f6a78bfc 2680 }
eae3f88e
DM
2681 } else {
2682 if (skb_needs_linearize(skb, features) &&
2683 __skb_linearize(skb))
2684 goto out_kfree_skb;
4ec93edb 2685
eae3f88e
DM
2686 /* If packet is not checksummed and device does not
2687 * support checksumming for this protocol, complete
2688 * checksumming here.
2689 */
2690 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2691 if (skb->encapsulation)
2692 skb_set_inner_transport_header(skb,
2693 skb_checksum_start_offset(skb));
2694 else
2695 skb_set_transport_header(skb,
2696 skb_checksum_start_offset(skb));
2697 if (!(features & NETIF_F_ALL_CSUM) &&
2698 skb_checksum_help(skb))
2699 goto out_kfree_skb;
7b9c6090 2700 }
0c772159 2701 }
7b9c6090 2702
eae3f88e 2703 return skb;
fc70fb64 2704
f6a78bfc
HX
2705out_kfree_skb:
2706 kfree_skb(skb);
eae3f88e
DM
2707out_null:
2708 return NULL;
2709}
6afff0ca 2710
55a93b3e
ED
2711struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2712{
2713 struct sk_buff *next, *head = NULL, *tail;
2714
bec3cfdc 2715 for (; skb != NULL; skb = next) {
55a93b3e
ED
2716 next = skb->next;
2717 skb->next = NULL;
bec3cfdc
ED
2718
2719 /* in case skb wont be segmented, point to itself */
2720 skb->prev = skb;
2721
55a93b3e 2722 skb = validate_xmit_skb(skb, dev);
bec3cfdc
ED
2723 if (!skb)
2724 continue;
55a93b3e 2725
bec3cfdc
ED
2726 if (!head)
2727 head = skb;
2728 else
2729 tail->next = skb;
2730 /* If skb was segmented, skb->prev points to
2731 * the last segment. If not, it still contains skb.
2732 */
2733 tail = skb->prev;
55a93b3e
ED
2734 }
2735 return head;
f6a78bfc
HX
2736}
2737
1def9238
ED
2738static void qdisc_pkt_len_init(struct sk_buff *skb)
2739{
2740 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2741
2742 qdisc_skb_cb(skb)->pkt_len = skb->len;
2743
2744 /* To get more precise estimation of bytes sent on wire,
2745 * we add to pkt_len the headers size of all segments
2746 */
2747 if (shinfo->gso_size) {
757b8b1d 2748 unsigned int hdr_len;
15e5a030 2749 u16 gso_segs = shinfo->gso_segs;
1def9238 2750
757b8b1d
ED
2751 /* mac layer + network layer */
2752 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2753
2754 /* + transport layer */
1def9238
ED
2755 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2756 hdr_len += tcp_hdrlen(skb);
2757 else
2758 hdr_len += sizeof(struct udphdr);
15e5a030
JW
2759
2760 if (shinfo->gso_type & SKB_GSO_DODGY)
2761 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2762 shinfo->gso_size);
2763
2764 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
1def9238
ED
2765 }
2766}
2767
bbd8a0d3
KK
2768static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2769 struct net_device *dev,
2770 struct netdev_queue *txq)
2771{
2772 spinlock_t *root_lock = qdisc_lock(q);
a2da570d 2773 bool contended;
bbd8a0d3
KK
2774 int rc;
2775
1def9238 2776 qdisc_pkt_len_init(skb);
a2da570d 2777 qdisc_calculate_pkt_len(skb, q);
79640a4c
ED
2778 /*
2779 * Heuristic to force contended enqueues to serialize on a
2780 * separate lock before trying to get qdisc main lock.
9bf2b8c2
YX
2781 * This permits __QDISC___STATE_RUNNING owner to get the lock more
2782 * often and dequeue packets faster.
79640a4c 2783 */
a2da570d 2784 contended = qdisc_is_running(q);
79640a4c
ED
2785 if (unlikely(contended))
2786 spin_lock(&q->busylock);
2787
bbd8a0d3
KK
2788 spin_lock(root_lock);
2789 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2790 kfree_skb(skb);
2791 rc = NET_XMIT_DROP;
2792 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
bc135b23 2793 qdisc_run_begin(q)) {
bbd8a0d3
KK
2794 /*
2795 * This is a work-conserving queue; there are no old skbs
2796 * waiting to be sent out; and the qdisc is not running -
2797 * xmit the skb directly.
2798 */
bfe0d029 2799
bfe0d029
ED
2800 qdisc_bstats_update(q, skb);
2801
55a93b3e 2802 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
79640a4c
ED
2803 if (unlikely(contended)) {
2804 spin_unlock(&q->busylock);
2805 contended = false;
2806 }
bbd8a0d3 2807 __qdisc_run(q);
79640a4c 2808 } else
bc135b23 2809 qdisc_run_end(q);
bbd8a0d3
KK
2810
2811 rc = NET_XMIT_SUCCESS;
2812 } else {
a2da570d 2813 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
79640a4c
ED
2814 if (qdisc_run_begin(q)) {
2815 if (unlikely(contended)) {
2816 spin_unlock(&q->busylock);
2817 contended = false;
2818 }
2819 __qdisc_run(q);
2820 }
bbd8a0d3
KK
2821 }
2822 spin_unlock(root_lock);
79640a4c
ED
2823 if (unlikely(contended))
2824 spin_unlock(&q->busylock);
bbd8a0d3
KK
2825 return rc;
2826}
2827
86f8515f 2828#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
5bc1421e
NH
2829static void skb_update_prio(struct sk_buff *skb)
2830{
6977a79d 2831 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
5bc1421e 2832
91c68ce2
ED
2833 if (!skb->priority && skb->sk && map) {
2834 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2835
2836 if (prioidx < map->priomap_len)
2837 skb->priority = map->priomap[prioidx];
2838 }
5bc1421e
NH
2839}
2840#else
2841#define skb_update_prio(skb)
2842#endif
2843
745e20f1 2844static DEFINE_PER_CPU(int, xmit_recursion);
11a766ce 2845#define RECURSION_LIMIT 10
745e20f1 2846
95603e22
MM
2847/**
2848 * dev_loopback_xmit - loop back @skb
2849 * @skb: buffer to transmit
2850 */
2851int dev_loopback_xmit(struct sk_buff *skb)
2852{
2853 skb_reset_mac_header(skb);
2854 __skb_pull(skb, skb_network_offset(skb));
2855 skb->pkt_type = PACKET_LOOPBACK;
2856 skb->ip_summed = CHECKSUM_UNNECESSARY;
2857 WARN_ON(!skb_dst(skb));
2858 skb_dst_force(skb);
2859 netif_rx_ni(skb);
2860 return 0;
2861}
2862EXPORT_SYMBOL(dev_loopback_xmit);
2863
d29f749e 2864/**
9d08dd3d 2865 * __dev_queue_xmit - transmit a buffer
d29f749e 2866 * @skb: buffer to transmit
9d08dd3d 2867 * @accel_priv: private data used for L2 forwarding offload
d29f749e
DJ
2868 *
2869 * Queue a buffer for transmission to a network device. The caller must
2870 * have set the device and priority and built the buffer before calling
2871 * this function. The function can be called from an interrupt.
2872 *
2873 * A negative errno code is returned on a failure. A success does not
2874 * guarantee the frame will be transmitted as it may be dropped due
2875 * to congestion or traffic shaping.
2876 *
2877 * -----------------------------------------------------------------------------------
2878 * I notice this method can also return errors from the queue disciplines,
2879 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2880 * be positive.
2881 *
2882 * Regardless of the return value, the skb is consumed, so it is currently
2883 * difficult to retry a send to this method. (You can bump the ref count
2884 * before sending to hold a reference for retry if you are careful.)
2885 *
2886 * When calling this method, interrupts MUST be enabled. This is because
2887 * the BH enable code must have IRQs enabled so that it will not deadlock.
2888 * --BLG
2889 */
0a59f3a9 2890static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
1da177e4
LT
2891{
2892 struct net_device *dev = skb->dev;
dc2b4847 2893 struct netdev_queue *txq;
1da177e4
LT
2894 struct Qdisc *q;
2895 int rc = -ENOMEM;
2896
6d1ccff6
ED
2897 skb_reset_mac_header(skb);
2898
e7fd2885
WB
2899 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
2900 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
2901
4ec93edb
YH
2902 /* Disable soft irqs for various locks below. Also
2903 * stops preemption for RCU.
1da177e4 2904 */
4ec93edb 2905 rcu_read_lock_bh();
1da177e4 2906
5bc1421e
NH
2907 skb_update_prio(skb);
2908
02875878
ED
2909 /* If device/qdisc don't need skb->dst, release it right now while
2910 * its hot in this cpu cache.
2911 */
2912 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2913 skb_dst_drop(skb);
2914 else
2915 skb_dst_force(skb);
2916
f663dd9a 2917 txq = netdev_pick_tx(dev, skb, accel_priv);
a898def2 2918 q = rcu_dereference_bh(txq->qdisc);
37437bb2 2919
1da177e4 2920#ifdef CONFIG_NET_CLS_ACT
d1b19dff 2921 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
1da177e4 2922#endif
cf66ba58 2923 trace_net_dev_queue(skb);
1da177e4 2924 if (q->enqueue) {
bbd8a0d3 2925 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 2926 goto out;
1da177e4
LT
2927 }
2928
2929 /* The device has no queue. Common case for software devices:
2930 loopback, all the sorts of tunnels...
2931
932ff279
HX
2932 Really, it is unlikely that netif_tx_lock protection is necessary
2933 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1da177e4
LT
2934 counters.)
2935 However, it is possible, that they rely on protection
2936 made by us here.
2937
2938 Check this and shot the lock. It is not prone from deadlocks.
2939 Either shot noqueue qdisc, it is even simpler 8)
2940 */
2941 if (dev->flags & IFF_UP) {
2942 int cpu = smp_processor_id(); /* ok because BHs are off */
2943
c773e847 2944 if (txq->xmit_lock_owner != cpu) {
1da177e4 2945
745e20f1
ED
2946 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2947 goto recursion_alert;
2948
1f59533f
JDB
2949 skb = validate_xmit_skb(skb, dev);
2950 if (!skb)
2951 goto drop;
2952
c773e847 2953 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 2954
73466498 2955 if (!netif_xmit_stopped(txq)) {
745e20f1 2956 __this_cpu_inc(xmit_recursion);
ce93718f 2957 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
745e20f1 2958 __this_cpu_dec(xmit_recursion);
572a9d7b 2959 if (dev_xmit_complete(rc)) {
c773e847 2960 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
2961 goto out;
2962 }
2963 }
c773e847 2964 HARD_TX_UNLOCK(dev, txq);
e87cc472
JP
2965 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2966 dev->name);
1da177e4
LT
2967 } else {
2968 /* Recursion is detected! It is possible,
745e20f1
ED
2969 * unfortunately
2970 */
2971recursion_alert:
e87cc472
JP
2972 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2973 dev->name);
1da177e4
LT
2974 }
2975 }
2976
2977 rc = -ENETDOWN;
1f59533f 2978drop:
d4828d85 2979 rcu_read_unlock_bh();
1da177e4 2980
015f0688 2981 atomic_long_inc(&dev->tx_dropped);
1f59533f 2982 kfree_skb_list(skb);
1da177e4
LT
2983 return rc;
2984out:
d4828d85 2985 rcu_read_unlock_bh();
1da177e4
LT
2986 return rc;
2987}
f663dd9a
JW
2988
2989int dev_queue_xmit(struct sk_buff *skb)
2990{
2991 return __dev_queue_xmit(skb, NULL);
2992}
d1b19dff 2993EXPORT_SYMBOL(dev_queue_xmit);
1da177e4 2994
f663dd9a
JW
2995int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
2996{
2997 return __dev_queue_xmit(skb, accel_priv);
2998}
2999EXPORT_SYMBOL(dev_queue_xmit_accel);
3000
1da177e4
LT
3001
3002/*=======================================================================
3003 Receiver routines
3004 =======================================================================*/
3005
6b2bedc3 3006int netdev_max_backlog __read_mostly = 1000;
c9e6bc64
ED
3007EXPORT_SYMBOL(netdev_max_backlog);
3008
3b098e2d 3009int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3
SH
3010int netdev_budget __read_mostly = 300;
3011int weight_p __read_mostly = 64; /* old backlog weight */
1da177e4 3012
eecfd7c4
ED
3013/* Called with irq disabled */
3014static inline void ____napi_schedule(struct softnet_data *sd,
3015 struct napi_struct *napi)
3016{
3017 list_add_tail(&napi->poll_list, &sd->poll_list);
3018 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3019}
3020
bfb564e7
KK
3021#ifdef CONFIG_RPS
3022
3023/* One global table that all flow-based protocols share. */
6e3f7faf 3024struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
bfb564e7
KK
3025EXPORT_SYMBOL(rps_sock_flow_table);
3026
c5905afb 3027struct static_key rps_needed __read_mostly;
adc9300e 3028
c445477d
BH
3029static struct rps_dev_flow *
3030set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3031 struct rps_dev_flow *rflow, u16 next_cpu)
3032{
09994d1b 3033 if (next_cpu != RPS_NO_CPU) {
c445477d
BH
3034#ifdef CONFIG_RFS_ACCEL
3035 struct netdev_rx_queue *rxqueue;
3036 struct rps_dev_flow_table *flow_table;
3037 struct rps_dev_flow *old_rflow;
3038 u32 flow_id;
3039 u16 rxq_index;
3040 int rc;
3041
3042 /* Should we steer this flow to a different hardware queue? */
69a19ee6
BH
3043 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3044 !(dev->features & NETIF_F_NTUPLE))
c445477d
BH
3045 goto out;
3046 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3047 if (rxq_index == skb_get_rx_queue(skb))
3048 goto out;
3049
3050 rxqueue = dev->_rx + rxq_index;
3051 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3052 if (!flow_table)
3053 goto out;
61b905da 3054 flow_id = skb_get_hash(skb) & flow_table->mask;
c445477d
BH
3055 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3056 rxq_index, flow_id);
3057 if (rc < 0)
3058 goto out;
3059 old_rflow = rflow;
3060 rflow = &flow_table->flows[flow_id];
c445477d
BH
3061 rflow->filter = rc;
3062 if (old_rflow->filter == rflow->filter)
3063 old_rflow->filter = RPS_NO_FILTER;
3064 out:
3065#endif
3066 rflow->last_qtail =
09994d1b 3067 per_cpu(softnet_data, next_cpu).input_queue_head;
c445477d
BH
3068 }
3069
09994d1b 3070 rflow->cpu = next_cpu;
c445477d
BH
3071 return rflow;
3072}
3073
bfb564e7
KK
3074/*
3075 * get_rps_cpu is called from netif_receive_skb and returns the target
3076 * CPU from the RPS map of the receiving queue for a given skb.
3077 * rcu_read_lock must be held on entry.
3078 */
3079static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3080 struct rps_dev_flow **rflowp)
3081{
3082 struct netdev_rx_queue *rxqueue;
6e3f7faf 3083 struct rps_map *map;
bfb564e7
KK
3084 struct rps_dev_flow_table *flow_table;
3085 struct rps_sock_flow_table *sock_flow_table;
3086 int cpu = -1;
3087 u16 tcpu;
61b905da 3088 u32 hash;
bfb564e7
KK
3089
3090 if (skb_rx_queue_recorded(skb)) {
3091 u16 index = skb_get_rx_queue(skb);
62fe0b40
BH
3092 if (unlikely(index >= dev->real_num_rx_queues)) {
3093 WARN_ONCE(dev->real_num_rx_queues > 1,
3094 "%s received packet on queue %u, but number "
3095 "of RX queues is %u\n",
3096 dev->name, index, dev->real_num_rx_queues);
bfb564e7
KK
3097 goto done;
3098 }
3099 rxqueue = dev->_rx + index;
3100 } else
3101 rxqueue = dev->_rx;
3102
6e3f7faf
ED
3103 map = rcu_dereference(rxqueue->rps_map);
3104 if (map) {
85875236 3105 if (map->len == 1 &&
33d480ce 3106 !rcu_access_pointer(rxqueue->rps_flow_table)) {
6febfca9
CG
3107 tcpu = map->cpus[0];
3108 if (cpu_online(tcpu))
3109 cpu = tcpu;
3110 goto done;
3111 }
33d480ce 3112 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
bfb564e7 3113 goto done;
6febfca9 3114 }
bfb564e7 3115
2d47b459 3116 skb_reset_network_header(skb);
61b905da
TH
3117 hash = skb_get_hash(skb);
3118 if (!hash)
bfb564e7
KK
3119 goto done;
3120
fec5e652
TH
3121 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3122 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3123 if (flow_table && sock_flow_table) {
3124 u16 next_cpu;
3125 struct rps_dev_flow *rflow;
3126
61b905da 3127 rflow = &flow_table->flows[hash & flow_table->mask];
fec5e652
TH
3128 tcpu = rflow->cpu;
3129
61b905da 3130 next_cpu = sock_flow_table->ents[hash & sock_flow_table->mask];
fec5e652
TH
3131
3132 /*
3133 * If the desired CPU (where last recvmsg was done) is
3134 * different from current CPU (one in the rx-queue flow
3135 * table entry), switch if one of the following holds:
3136 * - Current CPU is unset (equal to RPS_NO_CPU).
3137 * - Current CPU is offline.
3138 * - The current CPU's queue tail has advanced beyond the
3139 * last packet that was enqueued using this table entry.
3140 * This guarantees that all previous packets for the flow
3141 * have been dequeued, thus preserving in order delivery.
3142 */
3143 if (unlikely(tcpu != next_cpu) &&
3144 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3145 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
baefa31d
TH
3146 rflow->last_qtail)) >= 0)) {
3147 tcpu = next_cpu;
c445477d 3148 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
baefa31d 3149 }
c445477d 3150
fec5e652
TH
3151 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3152 *rflowp = rflow;
3153 cpu = tcpu;
3154 goto done;
3155 }
3156 }
3157
0a9627f2 3158 if (map) {
8fc54f68 3159 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
0a9627f2
TH
3160 if (cpu_online(tcpu)) {
3161 cpu = tcpu;
3162 goto done;
3163 }
3164 }
3165
3166done:
0a9627f2
TH
3167 return cpu;
3168}
3169
c445477d
BH
3170#ifdef CONFIG_RFS_ACCEL
3171
3172/**
3173 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3174 * @dev: Device on which the filter was set
3175 * @rxq_index: RX queue index
3176 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3177 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3178 *
3179 * Drivers that implement ndo_rx_flow_steer() should periodically call
3180 * this function for each installed filter and remove the filters for
3181 * which it returns %true.
3182 */
3183bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3184 u32 flow_id, u16 filter_id)
3185{
3186 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3187 struct rps_dev_flow_table *flow_table;
3188 struct rps_dev_flow *rflow;
3189 bool expire = true;
3190 int cpu;
3191
3192 rcu_read_lock();
3193 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3194 if (flow_table && flow_id <= flow_table->mask) {
3195 rflow = &flow_table->flows[flow_id];
3196 cpu = ACCESS_ONCE(rflow->cpu);
3197 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3198 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3199 rflow->last_qtail) <
3200 (int)(10 * flow_table->mask)))
3201 expire = false;
3202 }
3203 rcu_read_unlock();
3204 return expire;
3205}
3206EXPORT_SYMBOL(rps_may_expire_flow);
3207
3208#endif /* CONFIG_RFS_ACCEL */
3209
0a9627f2 3210/* Called from hardirq (IPI) context */
e36fa2f7 3211static void rps_trigger_softirq(void *data)
0a9627f2 3212{
e36fa2f7
ED
3213 struct softnet_data *sd = data;
3214
eecfd7c4 3215 ____napi_schedule(sd, &sd->backlog);
dee42870 3216 sd->received_rps++;
0a9627f2 3217}
e36fa2f7 3218
fec5e652 3219#endif /* CONFIG_RPS */
0a9627f2 3220
e36fa2f7
ED
3221/*
3222 * Check if this softnet_data structure is another cpu one
3223 * If yes, queue it to our IPI list and return 1
3224 * If no, return 0
3225 */
3226static int rps_ipi_queued(struct softnet_data *sd)
3227{
3228#ifdef CONFIG_RPS
903ceff7 3229 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
e36fa2f7
ED
3230
3231 if (sd != mysd) {
3232 sd->rps_ipi_next = mysd->rps_ipi_list;
3233 mysd->rps_ipi_list = sd;
3234
3235 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3236 return 1;
3237 }
3238#endif /* CONFIG_RPS */
3239 return 0;
3240}
3241
99bbc707
WB
3242#ifdef CONFIG_NET_FLOW_LIMIT
3243int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3244#endif
3245
3246static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3247{
3248#ifdef CONFIG_NET_FLOW_LIMIT
3249 struct sd_flow_limit *fl;
3250 struct softnet_data *sd;
3251 unsigned int old_flow, new_flow;
3252
3253 if (qlen < (netdev_max_backlog >> 1))
3254 return false;
3255
903ceff7 3256 sd = this_cpu_ptr(&softnet_data);
99bbc707
WB
3257
3258 rcu_read_lock();
3259 fl = rcu_dereference(sd->flow_limit);
3260 if (fl) {
3958afa1 3261 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
99bbc707
WB
3262 old_flow = fl->history[fl->history_head];
3263 fl->history[fl->history_head] = new_flow;
3264
3265 fl->history_head++;
3266 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3267
3268 if (likely(fl->buckets[old_flow]))
3269 fl->buckets[old_flow]--;
3270
3271 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3272 fl->count++;
3273 rcu_read_unlock();
3274 return true;
3275 }
3276 }
3277 rcu_read_unlock();
3278#endif
3279 return false;
3280}
3281
0a9627f2
TH
3282/*
3283 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3284 * queue (may be a remote CPU queue).
3285 */
fec5e652
TH
3286static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3287 unsigned int *qtail)
0a9627f2 3288{
e36fa2f7 3289 struct softnet_data *sd;
0a9627f2 3290 unsigned long flags;
99bbc707 3291 unsigned int qlen;
0a9627f2 3292
e36fa2f7 3293 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
3294
3295 local_irq_save(flags);
0a9627f2 3296
e36fa2f7 3297 rps_lock(sd);
99bbc707
WB
3298 qlen = skb_queue_len(&sd->input_pkt_queue);
3299 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
e008f3f0 3300 if (qlen) {
0a9627f2 3301enqueue:
e36fa2f7 3302 __skb_queue_tail(&sd->input_pkt_queue, skb);
76cc8b13 3303 input_queue_tail_incr_save(sd, qtail);
e36fa2f7 3304 rps_unlock(sd);
152102c7 3305 local_irq_restore(flags);
0a9627f2
TH
3306 return NET_RX_SUCCESS;
3307 }
3308
ebda37c2
ED
3309 /* Schedule NAPI for backlog device
3310 * We can use non atomic operation since we own the queue lock
3311 */
3312 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
e36fa2f7 3313 if (!rps_ipi_queued(sd))
eecfd7c4 3314 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
3315 }
3316 goto enqueue;
3317 }
3318
dee42870 3319 sd->dropped++;
e36fa2f7 3320 rps_unlock(sd);
0a9627f2 3321
0a9627f2
TH
3322 local_irq_restore(flags);
3323
caf586e5 3324 atomic_long_inc(&skb->dev->rx_dropped);
0a9627f2
TH
3325 kfree_skb(skb);
3326 return NET_RX_DROP;
3327}
1da177e4 3328
ae78dbfa 3329static int netif_rx_internal(struct sk_buff *skb)
1da177e4 3330{
b0e28f1e 3331 int ret;
1da177e4 3332
588f0330 3333 net_timestamp_check(netdev_tstamp_prequeue, skb);
1da177e4 3334
cf66ba58 3335 trace_netif_rx(skb);
df334545 3336#ifdef CONFIG_RPS
c5905afb 3337 if (static_key_false(&rps_needed)) {
fec5e652 3338 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
3339 int cpu;
3340
cece1945 3341 preempt_disable();
b0e28f1e 3342 rcu_read_lock();
fec5e652
TH
3343
3344 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
3345 if (cpu < 0)
3346 cpu = smp_processor_id();
fec5e652
TH
3347
3348 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3349
b0e28f1e 3350 rcu_read_unlock();
cece1945 3351 preempt_enable();
adc9300e
ED
3352 } else
3353#endif
fec5e652
TH
3354 {
3355 unsigned int qtail;
3356 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3357 put_cpu();
3358 }
b0e28f1e 3359 return ret;
1da177e4 3360}
ae78dbfa
BH
3361
3362/**
3363 * netif_rx - post buffer to the network code
3364 * @skb: buffer to post
3365 *
3366 * This function receives a packet from a device driver and queues it for
3367 * the upper (protocol) levels to process. It always succeeds. The buffer
3368 * may be dropped during processing for congestion control or by the
3369 * protocol layers.
3370 *
3371 * return values:
3372 * NET_RX_SUCCESS (no congestion)
3373 * NET_RX_DROP (packet was dropped)
3374 *
3375 */
3376
3377int netif_rx(struct sk_buff *skb)
3378{
3379 trace_netif_rx_entry(skb);
3380
3381 return netif_rx_internal(skb);
3382}
d1b19dff 3383EXPORT_SYMBOL(netif_rx);
1da177e4
LT
3384
3385int netif_rx_ni(struct sk_buff *skb)
3386{
3387 int err;
3388
ae78dbfa
BH
3389 trace_netif_rx_ni_entry(skb);
3390
1da177e4 3391 preempt_disable();
ae78dbfa 3392 err = netif_rx_internal(skb);
1da177e4
LT
3393 if (local_softirq_pending())
3394 do_softirq();
3395 preempt_enable();
3396
3397 return err;
3398}
1da177e4
LT
3399EXPORT_SYMBOL(netif_rx_ni);
3400
1da177e4
LT
3401static void net_tx_action(struct softirq_action *h)
3402{
903ceff7 3403 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
1da177e4
LT
3404
3405 if (sd->completion_queue) {
3406 struct sk_buff *clist;
3407
3408 local_irq_disable();
3409 clist = sd->completion_queue;
3410 sd->completion_queue = NULL;
3411 local_irq_enable();
3412
3413 while (clist) {
3414 struct sk_buff *skb = clist;
3415 clist = clist->next;
3416
547b792c 3417 WARN_ON(atomic_read(&skb->users));
e6247027
ED
3418 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3419 trace_consume_skb(skb);
3420 else
3421 trace_kfree_skb(skb, net_tx_action);
1da177e4
LT
3422 __kfree_skb(skb);
3423 }
3424 }
3425
3426 if (sd->output_queue) {
37437bb2 3427 struct Qdisc *head;
1da177e4
LT
3428
3429 local_irq_disable();
3430 head = sd->output_queue;
3431 sd->output_queue = NULL;
a9cbd588 3432 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
3433 local_irq_enable();
3434
3435 while (head) {
37437bb2
DM
3436 struct Qdisc *q = head;
3437 spinlock_t *root_lock;
3438
1da177e4
LT
3439 head = head->next_sched;
3440
5fb66229 3441 root_lock = qdisc_lock(q);
37437bb2 3442 if (spin_trylock(root_lock)) {
4e857c58 3443 smp_mb__before_atomic();
def82a1d
JP
3444 clear_bit(__QDISC_STATE_SCHED,
3445 &q->state);
37437bb2
DM
3446 qdisc_run(q);
3447 spin_unlock(root_lock);
1da177e4 3448 } else {
195648bb 3449 if (!test_bit(__QDISC_STATE_DEACTIVATED,
e8a83e10 3450 &q->state)) {
195648bb 3451 __netif_reschedule(q);
e8a83e10 3452 } else {
4e857c58 3453 smp_mb__before_atomic();
e8a83e10
JP
3454 clear_bit(__QDISC_STATE_SCHED,
3455 &q->state);
3456 }
1da177e4
LT
3457 }
3458 }
3459 }
3460}
3461
ab95bfe0
JP
3462#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3463 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
da678292
MM
3464/* This hook is defined here for ATM LANE */
3465int (*br_fdb_test_addr_hook)(struct net_device *dev,
3466 unsigned char *addr) __read_mostly;
4fb019a0 3467EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 3468#endif
1da177e4 3469
1da177e4
LT
3470#ifdef CONFIG_NET_CLS_ACT
3471/* TODO: Maybe we should just force sch_ingress to be compiled in
3472 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3473 * a compare and 2 stores extra right now if we dont have it on
3474 * but have CONFIG_NET_CLS_ACT
25985edc
LDM
3475 * NOTE: This doesn't stop any functionality; if you dont have
3476 * the ingress scheduler, you just can't add policies on ingress.
1da177e4
LT
3477 *
3478 */
24824a09 3479static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
1da177e4 3480{
1da177e4 3481 struct net_device *dev = skb->dev;
f697c3e8 3482 u32 ttl = G_TC_RTTL(skb->tc_verd);
555353cf
DM
3483 int result = TC_ACT_OK;
3484 struct Qdisc *q;
4ec93edb 3485
de384830 3486 if (unlikely(MAX_RED_LOOP < ttl++)) {
e87cc472
JP
3487 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3488 skb->skb_iif, dev->ifindex);
f697c3e8
HX
3489 return TC_ACT_SHOT;
3490 }
1da177e4 3491
f697c3e8
HX
3492 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3493 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
1da177e4 3494
46e5da40 3495 q = rcu_dereference(rxq->qdisc);
8d50b53d 3496 if (q != &noop_qdisc) {
83874000 3497 spin_lock(qdisc_lock(q));
a9312ae8
DM
3498 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3499 result = qdisc_enqueue_root(skb, q);
83874000
DM
3500 spin_unlock(qdisc_lock(q));
3501 }
f697c3e8
HX
3502
3503 return result;
3504}
86e65da9 3505
f697c3e8
HX
3506static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3507 struct packet_type **pt_prev,
3508 int *ret, struct net_device *orig_dev)
3509{
24824a09
ED
3510 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3511
46e5da40 3512 if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc)
f697c3e8 3513 goto out;
1da177e4 3514
f697c3e8
HX
3515 if (*pt_prev) {
3516 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3517 *pt_prev = NULL;
1da177e4
LT
3518 }
3519
24824a09 3520 switch (ing_filter(skb, rxq)) {
f697c3e8
HX
3521 case TC_ACT_SHOT:
3522 case TC_ACT_STOLEN:
3523 kfree_skb(skb);
3524 return NULL;
3525 }
3526
3527out:
3528 skb->tc_verd = 0;
3529 return skb;
1da177e4
LT
3530}
3531#endif
3532
ab95bfe0
JP
3533/**
3534 * netdev_rx_handler_register - register receive handler
3535 * @dev: device to register a handler for
3536 * @rx_handler: receive handler to register
93e2c32b 3537 * @rx_handler_data: data pointer that is used by rx handler
ab95bfe0 3538 *
e227867f 3539 * Register a receive handler for a device. This handler will then be
ab95bfe0
JP
3540 * called from __netif_receive_skb. A negative errno code is returned
3541 * on a failure.
3542 *
3543 * The caller must hold the rtnl_mutex.
8a4eb573
JP
3544 *
3545 * For a general description of rx_handler, see enum rx_handler_result.
ab95bfe0
JP
3546 */
3547int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
3548 rx_handler_func_t *rx_handler,
3549 void *rx_handler_data)
ab95bfe0
JP
3550{
3551 ASSERT_RTNL();
3552
3553 if (dev->rx_handler)
3554 return -EBUSY;
3555
00cfec37 3556 /* Note: rx_handler_data must be set before rx_handler */
93e2c32b 3557 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
ab95bfe0
JP
3558 rcu_assign_pointer(dev->rx_handler, rx_handler);
3559
3560 return 0;
3561}
3562EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3563
3564/**
3565 * netdev_rx_handler_unregister - unregister receive handler
3566 * @dev: device to unregister a handler from
3567 *
166ec369 3568 * Unregister a receive handler from a device.
ab95bfe0
JP
3569 *
3570 * The caller must hold the rtnl_mutex.
3571 */
3572void netdev_rx_handler_unregister(struct net_device *dev)
3573{
3574
3575 ASSERT_RTNL();
a9b3cd7f 3576 RCU_INIT_POINTER(dev->rx_handler, NULL);
00cfec37
ED
3577 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3578 * section has a guarantee to see a non NULL rx_handler_data
3579 * as well.
3580 */
3581 synchronize_net();
a9b3cd7f 3582 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
ab95bfe0
JP
3583}
3584EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3585
b4b9e355
MG
3586/*
3587 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3588 * the special handling of PFMEMALLOC skbs.
3589 */
3590static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3591{
3592 switch (skb->protocol) {
2b8837ae
JP
3593 case htons(ETH_P_ARP):
3594 case htons(ETH_P_IP):
3595 case htons(ETH_P_IPV6):
3596 case htons(ETH_P_8021Q):
3597 case htons(ETH_P_8021AD):
b4b9e355
MG
3598 return true;
3599 default:
3600 return false;
3601 }
3602}
3603
9754e293 3604static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
1da177e4
LT
3605{
3606 struct packet_type *ptype, *pt_prev;
ab95bfe0 3607 rx_handler_func_t *rx_handler;
f2ccd8fa 3608 struct net_device *orig_dev;
63d8ea7f 3609 struct net_device *null_or_dev;
8a4eb573 3610 bool deliver_exact = false;
1da177e4 3611 int ret = NET_RX_DROP;
252e3346 3612 __be16 type;
1da177e4 3613
588f0330 3614 net_timestamp_check(!netdev_tstamp_prequeue, skb);
81bbb3d4 3615
cf66ba58 3616 trace_netif_receive_skb(skb);
9b22ea56 3617
cc9bd5ce 3618 orig_dev = skb->dev;
8f903c70 3619
c1d2bbe1 3620 skb_reset_network_header(skb);
fda55eca
ED
3621 if (!skb_transport_header_was_set(skb))
3622 skb_reset_transport_header(skb);
0b5c9db1 3623 skb_reset_mac_len(skb);
1da177e4
LT
3624
3625 pt_prev = NULL;
3626
3627 rcu_read_lock();
3628
63d8ea7f 3629another_round:
b6858177 3630 skb->skb_iif = skb->dev->ifindex;
63d8ea7f
DM
3631
3632 __this_cpu_inc(softnet_data.processed);
3633
8ad227ff
PM
3634 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3635 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
0d5501c1 3636 skb = skb_vlan_untag(skb);
bcc6d479 3637 if (unlikely(!skb))
b4b9e355 3638 goto unlock;
bcc6d479
JP
3639 }
3640
1da177e4
LT
3641#ifdef CONFIG_NET_CLS_ACT
3642 if (skb->tc_verd & TC_NCLS) {
3643 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3644 goto ncls;
3645 }
3646#endif
3647
9754e293 3648 if (pfmemalloc)
b4b9e355
MG
3649 goto skip_taps;
3650
1da177e4 3651 list_for_each_entry_rcu(ptype, &ptype_all, list) {
63d8ea7f 3652 if (!ptype->dev || ptype->dev == skb->dev) {
4ec93edb 3653 if (pt_prev)
f2ccd8fa 3654 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
3655 pt_prev = ptype;
3656 }
3657 }
3658
b4b9e355 3659skip_taps:
1da177e4 3660#ifdef CONFIG_NET_CLS_ACT
f697c3e8
HX
3661 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3662 if (!skb)
b4b9e355 3663 goto unlock;
1da177e4
LT
3664ncls:
3665#endif
3666
9754e293 3667 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
b4b9e355
MG
3668 goto drop;
3669
2425717b
JF
3670 if (vlan_tx_tag_present(skb)) {
3671 if (pt_prev) {
3672 ret = deliver_skb(skb, pt_prev, orig_dev);
3673 pt_prev = NULL;
3674 }
48cc32d3 3675 if (vlan_do_receive(&skb))
2425717b
JF
3676 goto another_round;
3677 else if (unlikely(!skb))
b4b9e355 3678 goto unlock;
2425717b
JF
3679 }
3680
48cc32d3 3681 rx_handler = rcu_dereference(skb->dev->rx_handler);
ab95bfe0
JP
3682 if (rx_handler) {
3683 if (pt_prev) {
3684 ret = deliver_skb(skb, pt_prev, orig_dev);
3685 pt_prev = NULL;
3686 }
8a4eb573
JP
3687 switch (rx_handler(&skb)) {
3688 case RX_HANDLER_CONSUMED:
3bc1b1ad 3689 ret = NET_RX_SUCCESS;
b4b9e355 3690 goto unlock;
8a4eb573 3691 case RX_HANDLER_ANOTHER:
63d8ea7f 3692 goto another_round;
8a4eb573
JP
3693 case RX_HANDLER_EXACT:
3694 deliver_exact = true;
3695 case RX_HANDLER_PASS:
3696 break;
3697 default:
3698 BUG();
3699 }
ab95bfe0 3700 }
1da177e4 3701
d4b812de
ED
3702 if (unlikely(vlan_tx_tag_present(skb))) {
3703 if (vlan_tx_tag_get_id(skb))
3704 skb->pkt_type = PACKET_OTHERHOST;
3705 /* Note: we might in the future use prio bits
3706 * and set skb->priority like in vlan_do_receive()
3707 * For the time being, just ignore Priority Code Point
3708 */
3709 skb->vlan_tci = 0;
3710 }
48cc32d3 3711
63d8ea7f 3712 /* deliver only exact match when indicated */
8a4eb573 3713 null_or_dev = deliver_exact ? skb->dev : NULL;
1f3c8804 3714
1da177e4 3715 type = skb->protocol;
82d8a867
PE
3716 list_for_each_entry_rcu(ptype,
3717 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
63d8ea7f 3718 if (ptype->type == type &&
e3f48d37
JP
3719 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3720 ptype->dev == orig_dev)) {
4ec93edb 3721 if (pt_prev)
f2ccd8fa 3722 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
3723 pt_prev = ptype;
3724 }
3725 }
3726
3727 if (pt_prev) {
1080e512 3728 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
0e698bf6 3729 goto drop;
1080e512
MT
3730 else
3731 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4 3732 } else {
b4b9e355 3733drop:
caf586e5 3734 atomic_long_inc(&skb->dev->rx_dropped);
1da177e4
LT
3735 kfree_skb(skb);
3736 /* Jamal, now you will not able to escape explaining
3737 * me how you were going to use this. :-)
3738 */
3739 ret = NET_RX_DROP;
3740 }
3741
b4b9e355 3742unlock:
1da177e4 3743 rcu_read_unlock();
9754e293
DM
3744 return ret;
3745}
3746
3747static int __netif_receive_skb(struct sk_buff *skb)
3748{
3749 int ret;
3750
3751 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3752 unsigned long pflags = current->flags;
3753
3754 /*
3755 * PFMEMALLOC skbs are special, they should
3756 * - be delivered to SOCK_MEMALLOC sockets only
3757 * - stay away from userspace
3758 * - have bounded memory usage
3759 *
3760 * Use PF_MEMALLOC as this saves us from propagating the allocation
3761 * context down to all allocation sites.
3762 */
3763 current->flags |= PF_MEMALLOC;
3764 ret = __netif_receive_skb_core(skb, true);
3765 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3766 } else
3767 ret = __netif_receive_skb_core(skb, false);
3768
1da177e4
LT
3769 return ret;
3770}
0a9627f2 3771
ae78dbfa 3772static int netif_receive_skb_internal(struct sk_buff *skb)
0a9627f2 3773{
588f0330 3774 net_timestamp_check(netdev_tstamp_prequeue, skb);
3b098e2d 3775
c1f19b51
RC
3776 if (skb_defer_rx_timestamp(skb))
3777 return NET_RX_SUCCESS;
3778
df334545 3779#ifdef CONFIG_RPS
c5905afb 3780 if (static_key_false(&rps_needed)) {
3b098e2d
ED
3781 struct rps_dev_flow voidflow, *rflow = &voidflow;
3782 int cpu, ret;
fec5e652 3783
3b098e2d
ED
3784 rcu_read_lock();
3785
3786 cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 3787
3b098e2d
ED
3788 if (cpu >= 0) {
3789 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3790 rcu_read_unlock();
adc9300e 3791 return ret;
3b098e2d 3792 }
adc9300e 3793 rcu_read_unlock();
fec5e652 3794 }
1e94d72f 3795#endif
adc9300e 3796 return __netif_receive_skb(skb);
0a9627f2 3797}
ae78dbfa
BH
3798
3799/**
3800 * netif_receive_skb - process receive buffer from network
3801 * @skb: buffer to process
3802 *
3803 * netif_receive_skb() is the main receive data processing function.
3804 * It always succeeds. The buffer may be dropped during processing
3805 * for congestion control or by the protocol layers.
3806 *
3807 * This function may only be called from softirq context and interrupts
3808 * should be enabled.
3809 *
3810 * Return values (usually ignored):
3811 * NET_RX_SUCCESS: no congestion
3812 * NET_RX_DROP: packet was dropped
3813 */
3814int netif_receive_skb(struct sk_buff *skb)
3815{
3816 trace_netif_receive_skb_entry(skb);
3817
3818 return netif_receive_skb_internal(skb);
3819}
d1b19dff 3820EXPORT_SYMBOL(netif_receive_skb);
1da177e4 3821
88751275
ED
3822/* Network device is going away, flush any packets still pending
3823 * Called with irqs disabled.
3824 */
152102c7 3825static void flush_backlog(void *arg)
6e583ce5 3826{
152102c7 3827 struct net_device *dev = arg;
903ceff7 3828 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6e583ce5
SH
3829 struct sk_buff *skb, *tmp;
3830
e36fa2f7 3831 rps_lock(sd);
6e7676c1 3832 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
6e583ce5 3833 if (skb->dev == dev) {
e36fa2f7 3834 __skb_unlink(skb, &sd->input_pkt_queue);
6e583ce5 3835 kfree_skb(skb);
76cc8b13 3836 input_queue_head_incr(sd);
6e583ce5 3837 }
6e7676c1 3838 }
e36fa2f7 3839 rps_unlock(sd);
6e7676c1
CG
3840
3841 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3842 if (skb->dev == dev) {
3843 __skb_unlink(skb, &sd->process_queue);
3844 kfree_skb(skb);
76cc8b13 3845 input_queue_head_incr(sd);
6e7676c1
CG
3846 }
3847 }
6e583ce5
SH
3848}
3849
d565b0a1
HX
3850static int napi_gro_complete(struct sk_buff *skb)
3851{
22061d80 3852 struct packet_offload *ptype;
d565b0a1 3853 __be16 type = skb->protocol;
22061d80 3854 struct list_head *head = &offload_base;
d565b0a1
HX
3855 int err = -ENOENT;
3856
c3c7c254
ED
3857 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3858
fc59f9a3
HX
3859 if (NAPI_GRO_CB(skb)->count == 1) {
3860 skb_shinfo(skb)->gso_size = 0;
d565b0a1 3861 goto out;
fc59f9a3 3862 }
d565b0a1
HX
3863
3864 rcu_read_lock();
3865 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 3866 if (ptype->type != type || !ptype->callbacks.gro_complete)
d565b0a1
HX
3867 continue;
3868
299603e8 3869 err = ptype->callbacks.gro_complete(skb, 0);
d565b0a1
HX
3870 break;
3871 }
3872 rcu_read_unlock();
3873
3874 if (err) {
3875 WARN_ON(&ptype->list == head);
3876 kfree_skb(skb);
3877 return NET_RX_SUCCESS;
3878 }
3879
3880out:
ae78dbfa 3881 return netif_receive_skb_internal(skb);
d565b0a1
HX
3882}
3883
2e71a6f8
ED
3884/* napi->gro_list contains packets ordered by age.
3885 * youngest packets at the head of it.
3886 * Complete skbs in reverse order to reduce latencies.
3887 */
3888void napi_gro_flush(struct napi_struct *napi, bool flush_old)
d565b0a1 3889{
2e71a6f8 3890 struct sk_buff *skb, *prev = NULL;
d565b0a1 3891
2e71a6f8
ED
3892 /* scan list and build reverse chain */
3893 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3894 skb->prev = prev;
3895 prev = skb;
3896 }
3897
3898 for (skb = prev; skb; skb = prev) {
d565b0a1 3899 skb->next = NULL;
2e71a6f8
ED
3900
3901 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3902 return;
3903
3904 prev = skb->prev;
d565b0a1 3905 napi_gro_complete(skb);
2e71a6f8 3906 napi->gro_count--;
d565b0a1
HX
3907 }
3908
3909 napi->gro_list = NULL;
3910}
86cac58b 3911EXPORT_SYMBOL(napi_gro_flush);
d565b0a1 3912
89c5fa33
ED
3913static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3914{
3915 struct sk_buff *p;
3916 unsigned int maclen = skb->dev->hard_header_len;
0b4cec8c 3917 u32 hash = skb_get_hash_raw(skb);
89c5fa33
ED
3918
3919 for (p = napi->gro_list; p; p = p->next) {
3920 unsigned long diffs;
3921
0b4cec8c
TH
3922 NAPI_GRO_CB(p)->flush = 0;
3923
3924 if (hash != skb_get_hash_raw(p)) {
3925 NAPI_GRO_CB(p)->same_flow = 0;
3926 continue;
3927 }
3928
89c5fa33
ED
3929 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3930 diffs |= p->vlan_tci ^ skb->vlan_tci;
3931 if (maclen == ETH_HLEN)
3932 diffs |= compare_ether_header(skb_mac_header(p),
a50e233c 3933 skb_mac_header(skb));
89c5fa33
ED
3934 else if (!diffs)
3935 diffs = memcmp(skb_mac_header(p),
a50e233c 3936 skb_mac_header(skb),
89c5fa33
ED
3937 maclen);
3938 NAPI_GRO_CB(p)->same_flow = !diffs;
89c5fa33
ED
3939 }
3940}
3941
299603e8
JC
3942static void skb_gro_reset_offset(struct sk_buff *skb)
3943{
3944 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3945 const skb_frag_t *frag0 = &pinfo->frags[0];
3946
3947 NAPI_GRO_CB(skb)->data_offset = 0;
3948 NAPI_GRO_CB(skb)->frag0 = NULL;
3949 NAPI_GRO_CB(skb)->frag0_len = 0;
3950
3951 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
3952 pinfo->nr_frags &&
3953 !PageHighMem(skb_frag_page(frag0))) {
3954 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3955 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
89c5fa33
ED
3956 }
3957}
3958
a50e233c
ED
3959static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
3960{
3961 struct skb_shared_info *pinfo = skb_shinfo(skb);
3962
3963 BUG_ON(skb->end - skb->tail < grow);
3964
3965 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3966
3967 skb->data_len -= grow;
3968 skb->tail += grow;
3969
3970 pinfo->frags[0].page_offset += grow;
3971 skb_frag_size_sub(&pinfo->frags[0], grow);
3972
3973 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
3974 skb_frag_unref(skb, 0);
3975 memmove(pinfo->frags, pinfo->frags + 1,
3976 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
3977 }
3978}
3979
bb728820 3980static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1
HX
3981{
3982 struct sk_buff **pp = NULL;
22061d80 3983 struct packet_offload *ptype;
d565b0a1 3984 __be16 type = skb->protocol;
22061d80 3985 struct list_head *head = &offload_base;
0da2afd5 3986 int same_flow;
5b252f0c 3987 enum gro_result ret;
a50e233c 3988 int grow;
d565b0a1 3989
9c62a68d 3990 if (!(skb->dev->features & NETIF_F_GRO))
d565b0a1
HX
3991 goto normal;
3992
5a212329 3993 if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
f17f5c91
HX
3994 goto normal;
3995
89c5fa33
ED
3996 gro_list_prepare(napi, skb);
3997
d565b0a1
HX
3998 rcu_read_lock();
3999 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 4000 if (ptype->type != type || !ptype->callbacks.gro_receive)
d565b0a1
HX
4001 continue;
4002
86911732 4003 skb_set_network_header(skb, skb_gro_offset(skb));
efd9450e 4004 skb_reset_mac_len(skb);
d565b0a1
HX
4005 NAPI_GRO_CB(skb)->same_flow = 0;
4006 NAPI_GRO_CB(skb)->flush = 0;
5d38a079 4007 NAPI_GRO_CB(skb)->free = 0;
b582ef09 4008 NAPI_GRO_CB(skb)->udp_mark = 0;
d565b0a1 4009
662880f4
TH
4010 /* Setup for GRO checksum validation */
4011 switch (skb->ip_summed) {
4012 case CHECKSUM_COMPLETE:
4013 NAPI_GRO_CB(skb)->csum = skb->csum;
4014 NAPI_GRO_CB(skb)->csum_valid = 1;
4015 NAPI_GRO_CB(skb)->csum_cnt = 0;
4016 break;
4017 case CHECKSUM_UNNECESSARY:
4018 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4019 NAPI_GRO_CB(skb)->csum_valid = 0;
4020 break;
4021 default:
4022 NAPI_GRO_CB(skb)->csum_cnt = 0;
4023 NAPI_GRO_CB(skb)->csum_valid = 0;
4024 }
d565b0a1 4025
f191a1d1 4026 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
d565b0a1
HX
4027 break;
4028 }
4029 rcu_read_unlock();
4030
4031 if (&ptype->list == head)
4032 goto normal;
4033
0da2afd5 4034 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 4035 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 4036
d565b0a1
HX
4037 if (pp) {
4038 struct sk_buff *nskb = *pp;
4039
4040 *pp = nskb->next;
4041 nskb->next = NULL;
4042 napi_gro_complete(nskb);
4ae5544f 4043 napi->gro_count--;
d565b0a1
HX
4044 }
4045
0da2afd5 4046 if (same_flow)
d565b0a1
HX
4047 goto ok;
4048
600adc18 4049 if (NAPI_GRO_CB(skb)->flush)
d565b0a1 4050 goto normal;
d565b0a1 4051
600adc18
ED
4052 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4053 struct sk_buff *nskb = napi->gro_list;
4054
4055 /* locate the end of the list to select the 'oldest' flow */
4056 while (nskb->next) {
4057 pp = &nskb->next;
4058 nskb = *pp;
4059 }
4060 *pp = NULL;
4061 nskb->next = NULL;
4062 napi_gro_complete(nskb);
4063 } else {
4064 napi->gro_count++;
4065 }
d565b0a1 4066 NAPI_GRO_CB(skb)->count = 1;
2e71a6f8 4067 NAPI_GRO_CB(skb)->age = jiffies;
29e98242 4068 NAPI_GRO_CB(skb)->last = skb;
86911732 4069 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
d565b0a1
HX
4070 skb->next = napi->gro_list;
4071 napi->gro_list = skb;
5d0d9be8 4072 ret = GRO_HELD;
d565b0a1 4073
ad0f9904 4074pull:
a50e233c
ED
4075 grow = skb_gro_offset(skb) - skb_headlen(skb);
4076 if (grow > 0)
4077 gro_pull_from_frag0(skb, grow);
d565b0a1 4078ok:
5d0d9be8 4079 return ret;
d565b0a1
HX
4080
4081normal:
ad0f9904
HX
4082 ret = GRO_NORMAL;
4083 goto pull;
5d38a079 4084}
96e93eab 4085
bf5a755f
JC
4086struct packet_offload *gro_find_receive_by_type(__be16 type)
4087{
4088 struct list_head *offload_head = &offload_base;
4089 struct packet_offload *ptype;
4090
4091 list_for_each_entry_rcu(ptype, offload_head, list) {
4092 if (ptype->type != type || !ptype->callbacks.gro_receive)
4093 continue;
4094 return ptype;
4095 }
4096 return NULL;
4097}
e27a2f83 4098EXPORT_SYMBOL(gro_find_receive_by_type);
bf5a755f
JC
4099
4100struct packet_offload *gro_find_complete_by_type(__be16 type)
4101{
4102 struct list_head *offload_head = &offload_base;
4103 struct packet_offload *ptype;
4104
4105 list_for_each_entry_rcu(ptype, offload_head, list) {
4106 if (ptype->type != type || !ptype->callbacks.gro_complete)
4107 continue;
4108 return ptype;
4109 }
4110 return NULL;
4111}
e27a2f83 4112EXPORT_SYMBOL(gro_find_complete_by_type);
5d38a079 4113
bb728820 4114static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 4115{
5d0d9be8
HX
4116 switch (ret) {
4117 case GRO_NORMAL:
ae78dbfa 4118 if (netif_receive_skb_internal(skb))
c7c4b3b6
BH
4119 ret = GRO_DROP;
4120 break;
5d38a079 4121
5d0d9be8 4122 case GRO_DROP:
5d38a079
HX
4123 kfree_skb(skb);
4124 break;
5b252f0c 4125
daa86548 4126 case GRO_MERGED_FREE:
d7e8883c
ED
4127 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4128 kmem_cache_free(skbuff_head_cache, skb);
4129 else
4130 __kfree_skb(skb);
daa86548
ED
4131 break;
4132
5b252f0c
BH
4133 case GRO_HELD:
4134 case GRO_MERGED:
4135 break;
5d38a079
HX
4136 }
4137
c7c4b3b6 4138 return ret;
5d0d9be8 4139}
5d0d9be8 4140
c7c4b3b6 4141gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 4142{
ae78dbfa 4143 trace_napi_gro_receive_entry(skb);
86911732 4144
a50e233c
ED
4145 skb_gro_reset_offset(skb);
4146
89c5fa33 4147 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
d565b0a1
HX
4148}
4149EXPORT_SYMBOL(napi_gro_receive);
4150
d0c2b0d2 4151static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
96e93eab 4152{
93a35f59
ED
4153 if (unlikely(skb->pfmemalloc)) {
4154 consume_skb(skb);
4155 return;
4156 }
96e93eab 4157 __skb_pull(skb, skb_headlen(skb));
2a2a459e
ED
4158 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4159 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3701e513 4160 skb->vlan_tci = 0;
66c46d74 4161 skb->dev = napi->dev;
6d152e23 4162 skb->skb_iif = 0;
c3caf119
JC
4163 skb->encapsulation = 0;
4164 skb_shinfo(skb)->gso_type = 0;
e33d0ba8 4165 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
96e93eab
HX
4166
4167 napi->skb = skb;
4168}
96e93eab 4169
76620aaf 4170struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 4171{
5d38a079 4172 struct sk_buff *skb = napi->skb;
5d38a079
HX
4173
4174 if (!skb) {
fd11a83d 4175 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
84b9cd63 4176 napi->skb = skb;
80595d59 4177 }
96e93eab
HX
4178 return skb;
4179}
76620aaf 4180EXPORT_SYMBOL(napi_get_frags);
96e93eab 4181
a50e233c
ED
4182static gro_result_t napi_frags_finish(struct napi_struct *napi,
4183 struct sk_buff *skb,
4184 gro_result_t ret)
96e93eab 4185{
5d0d9be8
HX
4186 switch (ret) {
4187 case GRO_NORMAL:
a50e233c
ED
4188 case GRO_HELD:
4189 __skb_push(skb, ETH_HLEN);
4190 skb->protocol = eth_type_trans(skb, skb->dev);
4191 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
c7c4b3b6 4192 ret = GRO_DROP;
86911732 4193 break;
5d38a079 4194
5d0d9be8 4195 case GRO_DROP:
5d0d9be8
HX
4196 case GRO_MERGED_FREE:
4197 napi_reuse_skb(napi, skb);
4198 break;
5b252f0c
BH
4199
4200 case GRO_MERGED:
4201 break;
5d0d9be8 4202 }
5d38a079 4203
c7c4b3b6 4204 return ret;
5d38a079 4205}
5d0d9be8 4206
a50e233c
ED
4207/* Upper GRO stack assumes network header starts at gro_offset=0
4208 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4209 * We copy ethernet header into skb->data to have a common layout.
4210 */
4adb9c4a 4211static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
76620aaf
HX
4212{
4213 struct sk_buff *skb = napi->skb;
a50e233c
ED
4214 const struct ethhdr *eth;
4215 unsigned int hlen = sizeof(*eth);
76620aaf
HX
4216
4217 napi->skb = NULL;
4218
a50e233c
ED
4219 skb_reset_mac_header(skb);
4220 skb_gro_reset_offset(skb);
4221
4222 eth = skb_gro_header_fast(skb, 0);
4223 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4224 eth = skb_gro_header_slow(skb, hlen, 0);
4225 if (unlikely(!eth)) {
4226 napi_reuse_skb(napi, skb);
4227 return NULL;
4228 }
4229 } else {
4230 gro_pull_from_frag0(skb, hlen);
4231 NAPI_GRO_CB(skb)->frag0 += hlen;
4232 NAPI_GRO_CB(skb)->frag0_len -= hlen;
76620aaf 4233 }
a50e233c
ED
4234 __skb_pull(skb, hlen);
4235
4236 /*
4237 * This works because the only protocols we care about don't require
4238 * special handling.
4239 * We'll fix it up properly in napi_frags_finish()
4240 */
4241 skb->protocol = eth->h_proto;
76620aaf 4242
76620aaf
HX
4243 return skb;
4244}
76620aaf 4245
c7c4b3b6 4246gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 4247{
76620aaf 4248 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
4249
4250 if (!skb)
c7c4b3b6 4251 return GRO_DROP;
5d0d9be8 4252
ae78dbfa
BH
4253 trace_napi_gro_frags_entry(skb);
4254
89c5fa33 4255 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
5d0d9be8 4256}
5d38a079
HX
4257EXPORT_SYMBOL(napi_gro_frags);
4258
573e8fca
TH
4259/* Compute the checksum from gro_offset and return the folded value
4260 * after adding in any pseudo checksum.
4261 */
4262__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4263{
4264 __wsum wsum;
4265 __sum16 sum;
4266
4267 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4268
4269 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4270 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4271 if (likely(!sum)) {
4272 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4273 !skb->csum_complete_sw)
4274 netdev_rx_csum_fault(skb->dev);
4275 }
4276
4277 NAPI_GRO_CB(skb)->csum = wsum;
4278 NAPI_GRO_CB(skb)->csum_valid = 1;
4279
4280 return sum;
4281}
4282EXPORT_SYMBOL(__skb_gro_checksum_complete);
4283
e326bed2 4284/*
855abcf0 4285 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
e326bed2
ED
4286 * Note: called with local irq disabled, but exits with local irq enabled.
4287 */
4288static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4289{
4290#ifdef CONFIG_RPS
4291 struct softnet_data *remsd = sd->rps_ipi_list;
4292
4293 if (remsd) {
4294 sd->rps_ipi_list = NULL;
4295
4296 local_irq_enable();
4297
4298 /* Send pending IPI's to kick RPS processing on remote cpus. */
4299 while (remsd) {
4300 struct softnet_data *next = remsd->rps_ipi_next;
4301
4302 if (cpu_online(remsd->cpu))
c46fff2a 4303 smp_call_function_single_async(remsd->cpu,
fce8ad15 4304 &remsd->csd);
e326bed2
ED
4305 remsd = next;
4306 }
4307 } else
4308#endif
4309 local_irq_enable();
4310}
4311
d75b1ade
ED
4312static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4313{
4314#ifdef CONFIG_RPS
4315 return sd->rps_ipi_list != NULL;
4316#else
4317 return false;
4318#endif
4319}
4320
bea3348e 4321static int process_backlog(struct napi_struct *napi, int quota)
1da177e4
LT
4322{
4323 int work = 0;
eecfd7c4 4324 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
1da177e4 4325
e326bed2
ED
4326 /* Check if we have pending ipi, its better to send them now,
4327 * not waiting net_rx_action() end.
4328 */
d75b1ade 4329 if (sd_has_rps_ipi_waiting(sd)) {
e326bed2
ED
4330 local_irq_disable();
4331 net_rps_action_and_irq_enable(sd);
4332 }
d75b1ade 4333
bea3348e 4334 napi->weight = weight_p;
6e7676c1 4335 local_irq_disable();
11ef7a89 4336 while (1) {
1da177e4 4337 struct sk_buff *skb;
6e7676c1
CG
4338
4339 while ((skb = __skb_dequeue(&sd->process_queue))) {
4340 local_irq_enable();
4341 __netif_receive_skb(skb);
6e7676c1 4342 local_irq_disable();
76cc8b13
TH
4343 input_queue_head_incr(sd);
4344 if (++work >= quota) {
4345 local_irq_enable();
4346 return work;
4347 }
6e7676c1 4348 }
1da177e4 4349
e36fa2f7 4350 rps_lock(sd);
11ef7a89 4351 if (skb_queue_empty(&sd->input_pkt_queue)) {
eecfd7c4
ED
4352 /*
4353 * Inline a custom version of __napi_complete().
4354 * only current cpu owns and manipulates this napi,
11ef7a89
TH
4355 * and NAPI_STATE_SCHED is the only possible flag set
4356 * on backlog.
4357 * We can use a plain write instead of clear_bit(),
eecfd7c4
ED
4358 * and we dont need an smp_mb() memory barrier.
4359 */
eecfd7c4 4360 napi->state = 0;
11ef7a89 4361 rps_unlock(sd);
eecfd7c4 4362
11ef7a89 4363 break;
bea3348e 4364 }
11ef7a89
TH
4365
4366 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4367 &sd->process_queue);
e36fa2f7 4368 rps_unlock(sd);
6e7676c1
CG
4369 }
4370 local_irq_enable();
1da177e4 4371
bea3348e
SH
4372 return work;
4373}
1da177e4 4374
bea3348e
SH
4375/**
4376 * __napi_schedule - schedule for receive
c4ea43c5 4377 * @n: entry to schedule
bea3348e 4378 *
bc9ad166
ED
4379 * The entry's receive function will be scheduled to run.
4380 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
bea3348e 4381 */
b5606c2d 4382void __napi_schedule(struct napi_struct *n)
bea3348e
SH
4383{
4384 unsigned long flags;
1da177e4 4385
bea3348e 4386 local_irq_save(flags);
903ceff7 4387 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
bea3348e 4388 local_irq_restore(flags);
1da177e4 4389}
bea3348e
SH
4390EXPORT_SYMBOL(__napi_schedule);
4391
bc9ad166
ED
4392/**
4393 * __napi_schedule_irqoff - schedule for receive
4394 * @n: entry to schedule
4395 *
4396 * Variant of __napi_schedule() assuming hard irqs are masked
4397 */
4398void __napi_schedule_irqoff(struct napi_struct *n)
4399{
4400 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4401}
4402EXPORT_SYMBOL(__napi_schedule_irqoff);
4403
d565b0a1
HX
4404void __napi_complete(struct napi_struct *n)
4405{
4406 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
d565b0a1 4407
d75b1ade 4408 list_del_init(&n->poll_list);
4e857c58 4409 smp_mb__before_atomic();
d565b0a1
HX
4410 clear_bit(NAPI_STATE_SCHED, &n->state);
4411}
4412EXPORT_SYMBOL(__napi_complete);
4413
3b47d303 4414void napi_complete_done(struct napi_struct *n, int work_done)
d565b0a1
HX
4415{
4416 unsigned long flags;
4417
4418 /*
4419 * don't let napi dequeue from the cpu poll list
4420 * just in case its running on a different cpu
4421 */
4422 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4423 return;
4424
3b47d303
ED
4425 if (n->gro_list) {
4426 unsigned long timeout = 0;
d75b1ade 4427
3b47d303
ED
4428 if (work_done)
4429 timeout = n->dev->gro_flush_timeout;
4430
4431 if (timeout)
4432 hrtimer_start(&n->timer, ns_to_ktime(timeout),
4433 HRTIMER_MODE_REL_PINNED);
4434 else
4435 napi_gro_flush(n, false);
4436 }
d75b1ade
ED
4437 if (likely(list_empty(&n->poll_list))) {
4438 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
4439 } else {
4440 /* If n->poll_list is not empty, we need to mask irqs */
4441 local_irq_save(flags);
4442 __napi_complete(n);
4443 local_irq_restore(flags);
4444 }
d565b0a1 4445}
3b47d303 4446EXPORT_SYMBOL(napi_complete_done);
d565b0a1 4447
af12fa6e
ET
4448/* must be called under rcu_read_lock(), as we dont take a reference */
4449struct napi_struct *napi_by_id(unsigned int napi_id)
4450{
4451 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4452 struct napi_struct *napi;
4453
4454 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4455 if (napi->napi_id == napi_id)
4456 return napi;
4457
4458 return NULL;
4459}
4460EXPORT_SYMBOL_GPL(napi_by_id);
4461
4462void napi_hash_add(struct napi_struct *napi)
4463{
4464 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4465
4466 spin_lock(&napi_hash_lock);
4467
4468 /* 0 is not a valid id, we also skip an id that is taken
4469 * we expect both events to be extremely rare
4470 */
4471 napi->napi_id = 0;
4472 while (!napi->napi_id) {
4473 napi->napi_id = ++napi_gen_id;
4474 if (napi_by_id(napi->napi_id))
4475 napi->napi_id = 0;
4476 }
4477
4478 hlist_add_head_rcu(&napi->napi_hash_node,
4479 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4480
4481 spin_unlock(&napi_hash_lock);
4482 }
4483}
4484EXPORT_SYMBOL_GPL(napi_hash_add);
4485
4486/* Warning : caller is responsible to make sure rcu grace period
4487 * is respected before freeing memory containing @napi
4488 */
4489void napi_hash_del(struct napi_struct *napi)
4490{
4491 spin_lock(&napi_hash_lock);
4492
4493 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4494 hlist_del_rcu(&napi->napi_hash_node);
4495
4496 spin_unlock(&napi_hash_lock);
4497}
4498EXPORT_SYMBOL_GPL(napi_hash_del);
4499
3b47d303
ED
4500static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
4501{
4502 struct napi_struct *napi;
4503
4504 napi = container_of(timer, struct napi_struct, timer);
4505 if (napi->gro_list)
4506 napi_schedule(napi);
4507
4508 return HRTIMER_NORESTART;
4509}
4510
d565b0a1
HX
4511void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4512 int (*poll)(struct napi_struct *, int), int weight)
4513{
4514 INIT_LIST_HEAD(&napi->poll_list);
3b47d303
ED
4515 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
4516 napi->timer.function = napi_watchdog;
4ae5544f 4517 napi->gro_count = 0;
d565b0a1 4518 napi->gro_list = NULL;
5d38a079 4519 napi->skb = NULL;
d565b0a1 4520 napi->poll = poll;
82dc3c63
ED
4521 if (weight > NAPI_POLL_WEIGHT)
4522 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4523 weight, dev->name);
d565b0a1
HX
4524 napi->weight = weight;
4525 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 4526 napi->dev = dev;
5d38a079 4527#ifdef CONFIG_NETPOLL
d565b0a1
HX
4528 spin_lock_init(&napi->poll_lock);
4529 napi->poll_owner = -1;
4530#endif
4531 set_bit(NAPI_STATE_SCHED, &napi->state);
4532}
4533EXPORT_SYMBOL(netif_napi_add);
4534
3b47d303
ED
4535void napi_disable(struct napi_struct *n)
4536{
4537 might_sleep();
4538 set_bit(NAPI_STATE_DISABLE, &n->state);
4539
4540 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
4541 msleep(1);
4542
4543 hrtimer_cancel(&n->timer);
4544
4545 clear_bit(NAPI_STATE_DISABLE, &n->state);
4546}
4547EXPORT_SYMBOL(napi_disable);
4548
d565b0a1
HX
4549void netif_napi_del(struct napi_struct *napi)
4550{
d7b06636 4551 list_del_init(&napi->dev_list);
76620aaf 4552 napi_free_frags(napi);
d565b0a1 4553
289dccbe 4554 kfree_skb_list(napi->gro_list);
d565b0a1 4555 napi->gro_list = NULL;
4ae5544f 4556 napi->gro_count = 0;
d565b0a1
HX
4557}
4558EXPORT_SYMBOL(netif_napi_del);
4559
726ce70e
HX
4560static int napi_poll(struct napi_struct *n, struct list_head *repoll)
4561{
4562 void *have;
4563 int work, weight;
4564
4565 list_del_init(&n->poll_list);
4566
4567 have = netpoll_poll_lock(n);
4568
4569 weight = n->weight;
4570
4571 /* This NAPI_STATE_SCHED test is for avoiding a race
4572 * with netpoll's poll_napi(). Only the entity which
4573 * obtains the lock and sees NAPI_STATE_SCHED set will
4574 * actually make the ->poll() call. Therefore we avoid
4575 * accidentally calling ->poll() when NAPI is not scheduled.
4576 */
4577 work = 0;
4578 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4579 work = n->poll(n, weight);
4580 trace_napi_poll(n);
4581 }
4582
4583 WARN_ON_ONCE(work > weight);
4584
4585 if (likely(work < weight))
4586 goto out_unlock;
4587
4588 /* Drivers must not modify the NAPI state if they
4589 * consume the entire weight. In such cases this code
4590 * still "owns" the NAPI instance and therefore can
4591 * move the instance around on the list at-will.
4592 */
4593 if (unlikely(napi_disable_pending(n))) {
4594 napi_complete(n);
4595 goto out_unlock;
4596 }
4597
4598 if (n->gro_list) {
4599 /* flush too old packets
4600 * If HZ < 1000, flush all packets.
4601 */
4602 napi_gro_flush(n, HZ >= 1000);
4603 }
4604
001ce546
HX
4605 /* Some drivers may have called napi_schedule
4606 * prior to exhausting their budget.
4607 */
4608 if (unlikely(!list_empty(&n->poll_list))) {
4609 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
4610 n->dev ? n->dev->name : "backlog");
4611 goto out_unlock;
4612 }
4613
726ce70e
HX
4614 list_add_tail(&n->poll_list, repoll);
4615
4616out_unlock:
4617 netpoll_poll_unlock(have);
4618
4619 return work;
4620}
4621
1da177e4
LT
4622static void net_rx_action(struct softirq_action *h)
4623{
903ceff7 4624 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
24f8b238 4625 unsigned long time_limit = jiffies + 2;
51b0bded 4626 int budget = netdev_budget;
d75b1ade
ED
4627 LIST_HEAD(list);
4628 LIST_HEAD(repoll);
53fb95d3 4629
1da177e4 4630 local_irq_disable();
d75b1ade
ED
4631 list_splice_init(&sd->poll_list, &list);
4632 local_irq_enable();
1da177e4 4633
ceb8d5bf 4634 for (;;) {
bea3348e 4635 struct napi_struct *n;
1da177e4 4636
ceb8d5bf
HX
4637 if (list_empty(&list)) {
4638 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
4639 return;
4640 break;
4641 }
4642
6bd373eb
HX
4643 n = list_first_entry(&list, struct napi_struct, poll_list);
4644 budget -= napi_poll(n, &repoll);
4645
d75b1ade 4646 /* If softirq window is exhausted then punt.
24f8b238
SH
4647 * Allow this to run for 2 jiffies since which will allow
4648 * an average latency of 1.5/HZ.
bea3348e 4649 */
ceb8d5bf
HX
4650 if (unlikely(budget <= 0 ||
4651 time_after_eq(jiffies, time_limit))) {
4652 sd->time_squeeze++;
4653 break;
4654 }
1da177e4 4655 }
d75b1ade 4656
d75b1ade
ED
4657 local_irq_disable();
4658
4659 list_splice_tail_init(&sd->poll_list, &list);
4660 list_splice_tail(&repoll, &list);
4661 list_splice(&list, &sd->poll_list);
4662 if (!list_empty(&sd->poll_list))
4663 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4664
e326bed2 4665 net_rps_action_and_irq_enable(sd);
1da177e4
LT
4666}
4667
aa9d8560 4668struct netdev_adjacent {
9ff162a8 4669 struct net_device *dev;
5d261913
VF
4670
4671 /* upper master flag, there can only be one master device per list */
9ff162a8 4672 bool master;
5d261913 4673
5d261913
VF
4674 /* counter for the number of times this device was added to us */
4675 u16 ref_nr;
4676
402dae96
VF
4677 /* private field for the users */
4678 void *private;
4679
9ff162a8
JP
4680 struct list_head list;
4681 struct rcu_head rcu;
9ff162a8
JP
4682};
4683
5d261913
VF
4684static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
4685 struct net_device *adj_dev,
2f268f12 4686 struct list_head *adj_list)
9ff162a8 4687{
5d261913 4688 struct netdev_adjacent *adj;
5d261913 4689
2f268f12 4690 list_for_each_entry(adj, adj_list, list) {
5d261913
VF
4691 if (adj->dev == adj_dev)
4692 return adj;
9ff162a8
JP
4693 }
4694 return NULL;
4695}
4696
4697/**
4698 * netdev_has_upper_dev - Check if device is linked to an upper device
4699 * @dev: device
4700 * @upper_dev: upper device to check
4701 *
4702 * Find out if a device is linked to specified upper device and return true
4703 * in case it is. Note that this checks only immediate upper device,
4704 * not through a complete stack of devices. The caller must hold the RTNL lock.
4705 */
4706bool netdev_has_upper_dev(struct net_device *dev,
4707 struct net_device *upper_dev)
4708{
4709 ASSERT_RTNL();
4710
2f268f12 4711 return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
9ff162a8
JP
4712}
4713EXPORT_SYMBOL(netdev_has_upper_dev);
4714
4715/**
4716 * netdev_has_any_upper_dev - Check if device is linked to some device
4717 * @dev: device
4718 *
4719 * Find out if a device is linked to an upper device and return true in case
4720 * it is. The caller must hold the RTNL lock.
4721 */
1d143d9f 4722static bool netdev_has_any_upper_dev(struct net_device *dev)
9ff162a8
JP
4723{
4724 ASSERT_RTNL();
4725
2f268f12 4726 return !list_empty(&dev->all_adj_list.upper);
9ff162a8 4727}
9ff162a8
JP
4728
4729/**
4730 * netdev_master_upper_dev_get - Get master upper device
4731 * @dev: device
4732 *
4733 * Find a master upper device and return pointer to it or NULL in case
4734 * it's not there. The caller must hold the RTNL lock.
4735 */
4736struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4737{
aa9d8560 4738 struct netdev_adjacent *upper;
9ff162a8
JP
4739
4740 ASSERT_RTNL();
4741
2f268f12 4742 if (list_empty(&dev->adj_list.upper))
9ff162a8
JP
4743 return NULL;
4744
2f268f12 4745 upper = list_first_entry(&dev->adj_list.upper,
aa9d8560 4746 struct netdev_adjacent, list);
9ff162a8
JP
4747 if (likely(upper->master))
4748 return upper->dev;
4749 return NULL;
4750}
4751EXPORT_SYMBOL(netdev_master_upper_dev_get);
4752
b6ccba4c
VF
4753void *netdev_adjacent_get_private(struct list_head *adj_list)
4754{
4755 struct netdev_adjacent *adj;
4756
4757 adj = list_entry(adj_list, struct netdev_adjacent, list);
4758
4759 return adj->private;
4760}
4761EXPORT_SYMBOL(netdev_adjacent_get_private);
4762
44a40855
VY
4763/**
4764 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4765 * @dev: device
4766 * @iter: list_head ** of the current position
4767 *
4768 * Gets the next device from the dev's upper list, starting from iter
4769 * position. The caller must hold RCU read lock.
4770 */
4771struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4772 struct list_head **iter)
4773{
4774 struct netdev_adjacent *upper;
4775
4776 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4777
4778 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4779
4780 if (&upper->list == &dev->adj_list.upper)
4781 return NULL;
4782
4783 *iter = &upper->list;
4784
4785 return upper->dev;
4786}
4787EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4788
31088a11
VF
4789/**
4790 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
48311f46
VF
4791 * @dev: device
4792 * @iter: list_head ** of the current position
4793 *
4794 * Gets the next device from the dev's upper list, starting from iter
4795 * position. The caller must hold RCU read lock.
4796 */
2f268f12
VF
4797struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4798 struct list_head **iter)
48311f46
VF
4799{
4800 struct netdev_adjacent *upper;
4801
85328240 4802 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
48311f46
VF
4803
4804 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4805
2f268f12 4806 if (&upper->list == &dev->all_adj_list.upper)
48311f46
VF
4807 return NULL;
4808
4809 *iter = &upper->list;
4810
4811 return upper->dev;
4812}
2f268f12 4813EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
48311f46 4814
31088a11
VF
4815/**
4816 * netdev_lower_get_next_private - Get the next ->private from the
4817 * lower neighbour list
4818 * @dev: device
4819 * @iter: list_head ** of the current position
4820 *
4821 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4822 * list, starting from iter position. The caller must hold either hold the
4823 * RTNL lock or its own locking that guarantees that the neighbour lower
4824 * list will remain unchainged.
4825 */
4826void *netdev_lower_get_next_private(struct net_device *dev,
4827 struct list_head **iter)
4828{
4829 struct netdev_adjacent *lower;
4830
4831 lower = list_entry(*iter, struct netdev_adjacent, list);
4832
4833 if (&lower->list == &dev->adj_list.lower)
4834 return NULL;
4835
6859e7df 4836 *iter = lower->list.next;
31088a11
VF
4837
4838 return lower->private;
4839}
4840EXPORT_SYMBOL(netdev_lower_get_next_private);
4841
4842/**
4843 * netdev_lower_get_next_private_rcu - Get the next ->private from the
4844 * lower neighbour list, RCU
4845 * variant
4846 * @dev: device
4847 * @iter: list_head ** of the current position
4848 *
4849 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4850 * list, starting from iter position. The caller must hold RCU read lock.
4851 */
4852void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4853 struct list_head **iter)
4854{
4855 struct netdev_adjacent *lower;
4856
4857 WARN_ON_ONCE(!rcu_read_lock_held());
4858
4859 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4860
4861 if (&lower->list == &dev->adj_list.lower)
4862 return NULL;
4863
6859e7df 4864 *iter = &lower->list;
31088a11
VF
4865
4866 return lower->private;
4867}
4868EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4869
4085ebe8
VY
4870/**
4871 * netdev_lower_get_next - Get the next device from the lower neighbour
4872 * list
4873 * @dev: device
4874 * @iter: list_head ** of the current position
4875 *
4876 * Gets the next netdev_adjacent from the dev's lower neighbour
4877 * list, starting from iter position. The caller must hold RTNL lock or
4878 * its own locking that guarantees that the neighbour lower
4879 * list will remain unchainged.
4880 */
4881void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
4882{
4883 struct netdev_adjacent *lower;
4884
4885 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
4886
4887 if (&lower->list == &dev->adj_list.lower)
4888 return NULL;
4889
4890 *iter = &lower->list;
4891
4892 return lower->dev;
4893}
4894EXPORT_SYMBOL(netdev_lower_get_next);
4895
e001bfad 4896/**
4897 * netdev_lower_get_first_private_rcu - Get the first ->private from the
4898 * lower neighbour list, RCU
4899 * variant
4900 * @dev: device
4901 *
4902 * Gets the first netdev_adjacent->private from the dev's lower neighbour
4903 * list. The caller must hold RCU read lock.
4904 */
4905void *netdev_lower_get_first_private_rcu(struct net_device *dev)
4906{
4907 struct netdev_adjacent *lower;
4908
4909 lower = list_first_or_null_rcu(&dev->adj_list.lower,
4910 struct netdev_adjacent, list);
4911 if (lower)
4912 return lower->private;
4913 return NULL;
4914}
4915EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
4916
9ff162a8
JP
4917/**
4918 * netdev_master_upper_dev_get_rcu - Get master upper device
4919 * @dev: device
4920 *
4921 * Find a master upper device and return pointer to it or NULL in case
4922 * it's not there. The caller must hold the RCU read lock.
4923 */
4924struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4925{
aa9d8560 4926 struct netdev_adjacent *upper;
9ff162a8 4927
2f268f12 4928 upper = list_first_or_null_rcu(&dev->adj_list.upper,
aa9d8560 4929 struct netdev_adjacent, list);
9ff162a8
JP
4930 if (upper && likely(upper->master))
4931 return upper->dev;
4932 return NULL;
4933}
4934EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4935
0a59f3a9 4936static int netdev_adjacent_sysfs_add(struct net_device *dev,
3ee32707
VF
4937 struct net_device *adj_dev,
4938 struct list_head *dev_list)
4939{
4940 char linkname[IFNAMSIZ+7];
4941 sprintf(linkname, dev_list == &dev->adj_list.upper ?
4942 "upper_%s" : "lower_%s", adj_dev->name);
4943 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
4944 linkname);
4945}
0a59f3a9 4946static void netdev_adjacent_sysfs_del(struct net_device *dev,
3ee32707
VF
4947 char *name,
4948 struct list_head *dev_list)
4949{
4950 char linkname[IFNAMSIZ+7];
4951 sprintf(linkname, dev_list == &dev->adj_list.upper ?
4952 "upper_%s" : "lower_%s", name);
4953 sysfs_remove_link(&(dev->dev.kobj), linkname);
4954}
4955
7ce64c79
AF
4956static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
4957 struct net_device *adj_dev,
4958 struct list_head *dev_list)
4959{
4960 return (dev_list == &dev->adj_list.upper ||
4961 dev_list == &dev->adj_list.lower) &&
4962 net_eq(dev_net(dev), dev_net(adj_dev));
4963}
3ee32707 4964
5d261913
VF
4965static int __netdev_adjacent_dev_insert(struct net_device *dev,
4966 struct net_device *adj_dev,
7863c054 4967 struct list_head *dev_list,
402dae96 4968 void *private, bool master)
5d261913
VF
4969{
4970 struct netdev_adjacent *adj;
842d67a7 4971 int ret;
5d261913 4972
7863c054 4973 adj = __netdev_find_adj(dev, adj_dev, dev_list);
5d261913
VF
4974
4975 if (adj) {
5d261913
VF
4976 adj->ref_nr++;
4977 return 0;
4978 }
4979
4980 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
4981 if (!adj)
4982 return -ENOMEM;
4983
4984 adj->dev = adj_dev;
4985 adj->master = master;
5d261913 4986 adj->ref_nr = 1;
402dae96 4987 adj->private = private;
5d261913 4988 dev_hold(adj_dev);
2f268f12
VF
4989
4990 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
4991 adj_dev->name, dev->name, adj_dev->name);
5d261913 4992
7ce64c79 4993 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
3ee32707 4994 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5831d66e
VF
4995 if (ret)
4996 goto free_adj;
4997 }
4998
7863c054 4999 /* Ensure that master link is always the first item in list. */
842d67a7
VF
5000 if (master) {
5001 ret = sysfs_create_link(&(dev->dev.kobj),
5002 &(adj_dev->dev.kobj), "master");
5003 if (ret)
5831d66e 5004 goto remove_symlinks;
842d67a7 5005
7863c054 5006 list_add_rcu(&adj->list, dev_list);
842d67a7 5007 } else {
7863c054 5008 list_add_tail_rcu(&adj->list, dev_list);
842d67a7 5009 }
5d261913
VF
5010
5011 return 0;
842d67a7 5012
5831d66e 5013remove_symlinks:
7ce64c79 5014 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 5015 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
842d67a7
VF
5016free_adj:
5017 kfree(adj);
974daef7 5018 dev_put(adj_dev);
842d67a7
VF
5019
5020 return ret;
5d261913
VF
5021}
5022
1d143d9f 5023static void __netdev_adjacent_dev_remove(struct net_device *dev,
5024 struct net_device *adj_dev,
5025 struct list_head *dev_list)
5d261913
VF
5026{
5027 struct netdev_adjacent *adj;
5028
7863c054 5029 adj = __netdev_find_adj(dev, adj_dev, dev_list);
5d261913 5030
2f268f12
VF
5031 if (!adj) {
5032 pr_err("tried to remove device %s from %s\n",
5033 dev->name, adj_dev->name);
5d261913 5034 BUG();
2f268f12 5035 }
5d261913
VF
5036
5037 if (adj->ref_nr > 1) {
2f268f12
VF
5038 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
5039 adj->ref_nr-1);
5d261913
VF
5040 adj->ref_nr--;
5041 return;
5042 }
5043
842d67a7
VF
5044 if (adj->master)
5045 sysfs_remove_link(&(dev->dev.kobj), "master");
5046
7ce64c79 5047 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 5048 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5831d66e 5049
5d261913 5050 list_del_rcu(&adj->list);
2f268f12
VF
5051 pr_debug("dev_put for %s, because link removed from %s to %s\n",
5052 adj_dev->name, dev->name, adj_dev->name);
5d261913
VF
5053 dev_put(adj_dev);
5054 kfree_rcu(adj, rcu);
5055}
5056
1d143d9f 5057static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
5058 struct net_device *upper_dev,
5059 struct list_head *up_list,
5060 struct list_head *down_list,
5061 void *private, bool master)
5d261913
VF
5062{
5063 int ret;
5064
402dae96
VF
5065 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
5066 master);
5d261913
VF
5067 if (ret)
5068 return ret;
5069
402dae96
VF
5070 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
5071 false);
5d261913 5072 if (ret) {
2f268f12 5073 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5d261913
VF
5074 return ret;
5075 }
5076
5077 return 0;
5078}
5079
1d143d9f 5080static int __netdev_adjacent_dev_link(struct net_device *dev,
5081 struct net_device *upper_dev)
5d261913 5082{
2f268f12
VF
5083 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
5084 &dev->all_adj_list.upper,
5085 &upper_dev->all_adj_list.lower,
402dae96 5086 NULL, false);
5d261913
VF
5087}
5088
1d143d9f 5089static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5090 struct net_device *upper_dev,
5091 struct list_head *up_list,
5092 struct list_head *down_list)
5d261913 5093{
2f268f12
VF
5094 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5095 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
5d261913
VF
5096}
5097
1d143d9f 5098static void __netdev_adjacent_dev_unlink(struct net_device *dev,
5099 struct net_device *upper_dev)
5d261913 5100{
2f268f12
VF
5101 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5102 &dev->all_adj_list.upper,
5103 &upper_dev->all_adj_list.lower);
5104}
5105
1d143d9f 5106static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5107 struct net_device *upper_dev,
5108 void *private, bool master)
2f268f12
VF
5109{
5110 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
5111
5112 if (ret)
5113 return ret;
5114
5115 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
5116 &dev->adj_list.upper,
5117 &upper_dev->adj_list.lower,
402dae96 5118 private, master);
2f268f12
VF
5119 if (ret) {
5120 __netdev_adjacent_dev_unlink(dev, upper_dev);
5121 return ret;
5122 }
5123
5124 return 0;
5d261913
VF
5125}
5126
1d143d9f 5127static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5128 struct net_device *upper_dev)
2f268f12
VF
5129{
5130 __netdev_adjacent_dev_unlink(dev, upper_dev);
5131 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5132 &dev->adj_list.upper,
5133 &upper_dev->adj_list.lower);
5134}
5d261913 5135
9ff162a8 5136static int __netdev_upper_dev_link(struct net_device *dev,
402dae96
VF
5137 struct net_device *upper_dev, bool master,
5138 void *private)
9ff162a8 5139{
5d261913
VF
5140 struct netdev_adjacent *i, *j, *to_i, *to_j;
5141 int ret = 0;
9ff162a8
JP
5142
5143 ASSERT_RTNL();
5144
5145 if (dev == upper_dev)
5146 return -EBUSY;
5147
5148 /* To prevent loops, check if dev is not upper device to upper_dev. */
2f268f12 5149 if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
9ff162a8
JP
5150 return -EBUSY;
5151
2f268f12 5152 if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
9ff162a8
JP
5153 return -EEXIST;
5154
5155 if (master && netdev_master_upper_dev_get(dev))
5156 return -EBUSY;
5157
402dae96
VF
5158 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
5159 master);
5d261913
VF
5160 if (ret)
5161 return ret;
9ff162a8 5162
5d261913 5163 /* Now that we linked these devs, make all the upper_dev's
2f268f12 5164 * all_adj_list.upper visible to every dev's all_adj_list.lower an
5d261913
VF
5165 * versa, and don't forget the devices itself. All of these
5166 * links are non-neighbours.
5167 */
2f268f12
VF
5168 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5169 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5170 pr_debug("Interlinking %s with %s, non-neighbour\n",
5171 i->dev->name, j->dev->name);
5d261913
VF
5172 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
5173 if (ret)
5174 goto rollback_mesh;
5175 }
5176 }
5177
5178 /* add dev to every upper_dev's upper device */
2f268f12
VF
5179 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5180 pr_debug("linking %s's upper device %s with %s\n",
5181 upper_dev->name, i->dev->name, dev->name);
5d261913
VF
5182 ret = __netdev_adjacent_dev_link(dev, i->dev);
5183 if (ret)
5184 goto rollback_upper_mesh;
5185 }
5186
5187 /* add upper_dev to every dev's lower device */
2f268f12
VF
5188 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5189 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5190 i->dev->name, upper_dev->name);
5d261913
VF
5191 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5192 if (ret)
5193 goto rollback_lower_mesh;
5194 }
9ff162a8 5195
42e52bf9 5196 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
9ff162a8 5197 return 0;
5d261913
VF
5198
5199rollback_lower_mesh:
5200 to_i = i;
2f268f12 5201 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5d261913
VF
5202 if (i == to_i)
5203 break;
5204 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5205 }
5206
5207 i = NULL;
5208
5209rollback_upper_mesh:
5210 to_i = i;
2f268f12 5211 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5d261913
VF
5212 if (i == to_i)
5213 break;
5214 __netdev_adjacent_dev_unlink(dev, i->dev);
5215 }
5216
5217 i = j = NULL;
5218
5219rollback_mesh:
5220 to_i = i;
5221 to_j = j;
2f268f12
VF
5222 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5223 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5d261913
VF
5224 if (i == to_i && j == to_j)
5225 break;
5226 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5227 }
5228 if (i == to_i)
5229 break;
5230 }
5231
2f268f12 5232 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
5233
5234 return ret;
9ff162a8
JP
5235}
5236
5237/**
5238 * netdev_upper_dev_link - Add a link to the upper device
5239 * @dev: device
5240 * @upper_dev: new upper device
5241 *
5242 * Adds a link to device which is upper to this one. The caller must hold
5243 * the RTNL lock. On a failure a negative errno code is returned.
5244 * On success the reference counts are adjusted and the function
5245 * returns zero.
5246 */
5247int netdev_upper_dev_link(struct net_device *dev,
5248 struct net_device *upper_dev)
5249{
402dae96 5250 return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
9ff162a8
JP
5251}
5252EXPORT_SYMBOL(netdev_upper_dev_link);
5253
5254/**
5255 * netdev_master_upper_dev_link - Add a master link to the upper device
5256 * @dev: device
5257 * @upper_dev: new upper device
5258 *
5259 * Adds a link to device which is upper to this one. In this case, only
5260 * one master upper device can be linked, although other non-master devices
5261 * might be linked as well. The caller must hold the RTNL lock.
5262 * On a failure a negative errno code is returned. On success the reference
5263 * counts are adjusted and the function returns zero.
5264 */
5265int netdev_master_upper_dev_link(struct net_device *dev,
5266 struct net_device *upper_dev)
5267{
402dae96 5268 return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
9ff162a8
JP
5269}
5270EXPORT_SYMBOL(netdev_master_upper_dev_link);
5271
402dae96
VF
5272int netdev_master_upper_dev_link_private(struct net_device *dev,
5273 struct net_device *upper_dev,
5274 void *private)
5275{
5276 return __netdev_upper_dev_link(dev, upper_dev, true, private);
5277}
5278EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
5279
9ff162a8
JP
5280/**
5281 * netdev_upper_dev_unlink - Removes a link to upper device
5282 * @dev: device
5283 * @upper_dev: new upper device
5284 *
5285 * Removes a link to device which is upper to this one. The caller must hold
5286 * the RTNL lock.
5287 */
5288void netdev_upper_dev_unlink(struct net_device *dev,
5289 struct net_device *upper_dev)
5290{
5d261913 5291 struct netdev_adjacent *i, *j;
9ff162a8
JP
5292 ASSERT_RTNL();
5293
2f268f12 5294 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
5295
5296 /* Here is the tricky part. We must remove all dev's lower
5297 * devices from all upper_dev's upper devices and vice
5298 * versa, to maintain the graph relationship.
5299 */
2f268f12
VF
5300 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5301 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
5d261913
VF
5302 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5303
5304 /* remove also the devices itself from lower/upper device
5305 * list
5306 */
2f268f12 5307 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5d261913
VF
5308 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5309
2f268f12 5310 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
5d261913
VF
5311 __netdev_adjacent_dev_unlink(dev, i->dev);
5312
42e52bf9 5313 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
9ff162a8
JP
5314}
5315EXPORT_SYMBOL(netdev_upper_dev_unlink);
5316
4c75431a
AF
5317void netdev_adjacent_add_links(struct net_device *dev)
5318{
5319 struct netdev_adjacent *iter;
5320
5321 struct net *net = dev_net(dev);
5322
5323 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5324 if (!net_eq(net,dev_net(iter->dev)))
5325 continue;
5326 netdev_adjacent_sysfs_add(iter->dev, dev,
5327 &iter->dev->adj_list.lower);
5328 netdev_adjacent_sysfs_add(dev, iter->dev,
5329 &dev->adj_list.upper);
5330 }
5331
5332 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5333 if (!net_eq(net,dev_net(iter->dev)))
5334 continue;
5335 netdev_adjacent_sysfs_add(iter->dev, dev,
5336 &iter->dev->adj_list.upper);
5337 netdev_adjacent_sysfs_add(dev, iter->dev,
5338 &dev->adj_list.lower);
5339 }
5340}
5341
5342void netdev_adjacent_del_links(struct net_device *dev)
5343{
5344 struct netdev_adjacent *iter;
5345
5346 struct net *net = dev_net(dev);
5347
5348 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5349 if (!net_eq(net,dev_net(iter->dev)))
5350 continue;
5351 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5352 &iter->dev->adj_list.lower);
5353 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5354 &dev->adj_list.upper);
5355 }
5356
5357 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5358 if (!net_eq(net,dev_net(iter->dev)))
5359 continue;
5360 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5361 &iter->dev->adj_list.upper);
5362 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5363 &dev->adj_list.lower);
5364 }
5365}
5366
5bb025fa 5367void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
402dae96 5368{
5bb025fa 5369 struct netdev_adjacent *iter;
402dae96 5370
4c75431a
AF
5371 struct net *net = dev_net(dev);
5372
5bb025fa 5373 list_for_each_entry(iter, &dev->adj_list.upper, list) {
4c75431a
AF
5374 if (!net_eq(net,dev_net(iter->dev)))
5375 continue;
5bb025fa
VF
5376 netdev_adjacent_sysfs_del(iter->dev, oldname,
5377 &iter->dev->adj_list.lower);
5378 netdev_adjacent_sysfs_add(iter->dev, dev,
5379 &iter->dev->adj_list.lower);
5380 }
402dae96 5381
5bb025fa 5382 list_for_each_entry(iter, &dev->adj_list.lower, list) {
4c75431a
AF
5383 if (!net_eq(net,dev_net(iter->dev)))
5384 continue;
5bb025fa
VF
5385 netdev_adjacent_sysfs_del(iter->dev, oldname,
5386 &iter->dev->adj_list.upper);
5387 netdev_adjacent_sysfs_add(iter->dev, dev,
5388 &iter->dev->adj_list.upper);
5389 }
402dae96 5390}
402dae96
VF
5391
5392void *netdev_lower_dev_get_private(struct net_device *dev,
5393 struct net_device *lower_dev)
5394{
5395 struct netdev_adjacent *lower;
5396
5397 if (!lower_dev)
5398 return NULL;
5399 lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
5400 if (!lower)
5401 return NULL;
5402
5403 return lower->private;
5404}
5405EXPORT_SYMBOL(netdev_lower_dev_get_private);
5406
4085ebe8
VY
5407
5408int dev_get_nest_level(struct net_device *dev,
5409 bool (*type_check)(struct net_device *dev))
5410{
5411 struct net_device *lower = NULL;
5412 struct list_head *iter;
5413 int max_nest = -1;
5414 int nest;
5415
5416 ASSERT_RTNL();
5417
5418 netdev_for_each_lower_dev(dev, lower, iter) {
5419 nest = dev_get_nest_level(lower, type_check);
5420 if (max_nest < nest)
5421 max_nest = nest;
5422 }
5423
5424 if (type_check(dev))
5425 max_nest++;
5426
5427 return max_nest;
5428}
5429EXPORT_SYMBOL(dev_get_nest_level);
5430
b6c40d68
PM
5431static void dev_change_rx_flags(struct net_device *dev, int flags)
5432{
d314774c
SH
5433 const struct net_device_ops *ops = dev->netdev_ops;
5434
d2615bf4 5435 if (ops->ndo_change_rx_flags)
d314774c 5436 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
5437}
5438
991fb3f7 5439static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
1da177e4 5440{
b536db93 5441 unsigned int old_flags = dev->flags;
d04a48b0
EB
5442 kuid_t uid;
5443 kgid_t gid;
1da177e4 5444
24023451
PM
5445 ASSERT_RTNL();
5446
dad9b335
WC
5447 dev->flags |= IFF_PROMISC;
5448 dev->promiscuity += inc;
5449 if (dev->promiscuity == 0) {
5450 /*
5451 * Avoid overflow.
5452 * If inc causes overflow, untouch promisc and return error.
5453 */
5454 if (inc < 0)
5455 dev->flags &= ~IFF_PROMISC;
5456 else {
5457 dev->promiscuity -= inc;
7b6cd1ce
JP
5458 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5459 dev->name);
dad9b335
WC
5460 return -EOVERFLOW;
5461 }
5462 }
52609c0b 5463 if (dev->flags != old_flags) {
7b6cd1ce
JP
5464 pr_info("device %s %s promiscuous mode\n",
5465 dev->name,
5466 dev->flags & IFF_PROMISC ? "entered" : "left");
8192b0c4
DH
5467 if (audit_enabled) {
5468 current_uid_gid(&uid, &gid);
7759db82
KHK
5469 audit_log(current->audit_context, GFP_ATOMIC,
5470 AUDIT_ANOM_PROMISCUOUS,
5471 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5472 dev->name, (dev->flags & IFF_PROMISC),
5473 (old_flags & IFF_PROMISC),
e1760bd5 5474 from_kuid(&init_user_ns, audit_get_loginuid(current)),
d04a48b0
EB
5475 from_kuid(&init_user_ns, uid),
5476 from_kgid(&init_user_ns, gid),
7759db82 5477 audit_get_sessionid(current));
8192b0c4 5478 }
24023451 5479
b6c40d68 5480 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 5481 }
991fb3f7
ND
5482 if (notify)
5483 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
dad9b335 5484 return 0;
1da177e4
LT
5485}
5486
4417da66
PM
5487/**
5488 * dev_set_promiscuity - update promiscuity count on a device
5489 * @dev: device
5490 * @inc: modifier
5491 *
5492 * Add or remove promiscuity from a device. While the count in the device
5493 * remains above zero the interface remains promiscuous. Once it hits zero
5494 * the device reverts back to normal filtering operation. A negative inc
5495 * value is used to drop promiscuity on the device.
dad9b335 5496 * Return 0 if successful or a negative errno code on error.
4417da66 5497 */
dad9b335 5498int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66 5499{
b536db93 5500 unsigned int old_flags = dev->flags;
dad9b335 5501 int err;
4417da66 5502
991fb3f7 5503 err = __dev_set_promiscuity(dev, inc, true);
4b5a698e 5504 if (err < 0)
dad9b335 5505 return err;
4417da66
PM
5506 if (dev->flags != old_flags)
5507 dev_set_rx_mode(dev);
dad9b335 5508 return err;
4417da66 5509}
d1b19dff 5510EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 5511
991fb3f7 5512static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
1da177e4 5513{
991fb3f7 5514 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
1da177e4 5515
24023451
PM
5516 ASSERT_RTNL();
5517
1da177e4 5518 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
5519 dev->allmulti += inc;
5520 if (dev->allmulti == 0) {
5521 /*
5522 * Avoid overflow.
5523 * If inc causes overflow, untouch allmulti and return error.
5524 */
5525 if (inc < 0)
5526 dev->flags &= ~IFF_ALLMULTI;
5527 else {
5528 dev->allmulti -= inc;
7b6cd1ce
JP
5529 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5530 dev->name);
dad9b335
WC
5531 return -EOVERFLOW;
5532 }
5533 }
24023451 5534 if (dev->flags ^ old_flags) {
b6c40d68 5535 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 5536 dev_set_rx_mode(dev);
991fb3f7
ND
5537 if (notify)
5538 __dev_notify_flags(dev, old_flags,
5539 dev->gflags ^ old_gflags);
24023451 5540 }
dad9b335 5541 return 0;
4417da66 5542}
991fb3f7
ND
5543
5544/**
5545 * dev_set_allmulti - update allmulti count on a device
5546 * @dev: device
5547 * @inc: modifier
5548 *
5549 * Add or remove reception of all multicast frames to a device. While the
5550 * count in the device remains above zero the interface remains listening
5551 * to all interfaces. Once it hits zero the device reverts back to normal
5552 * filtering operation. A negative @inc value is used to drop the counter
5553 * when releasing a resource needing all multicasts.
5554 * Return 0 if successful or a negative errno code on error.
5555 */
5556
5557int dev_set_allmulti(struct net_device *dev, int inc)
5558{
5559 return __dev_set_allmulti(dev, inc, true);
5560}
d1b19dff 5561EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
5562
5563/*
5564 * Upload unicast and multicast address lists to device and
5565 * configure RX filtering. When the device doesn't support unicast
53ccaae1 5566 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
5567 * are present.
5568 */
5569void __dev_set_rx_mode(struct net_device *dev)
5570{
d314774c
SH
5571 const struct net_device_ops *ops = dev->netdev_ops;
5572
4417da66
PM
5573 /* dev_open will call this function so the list will stay sane. */
5574 if (!(dev->flags&IFF_UP))
5575 return;
5576
5577 if (!netif_device_present(dev))
40b77c94 5578 return;
4417da66 5579
01789349 5580 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4417da66
PM
5581 /* Unicast addresses changes may only happen under the rtnl,
5582 * therefore calling __dev_set_promiscuity here is safe.
5583 */
32e7bfc4 5584 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
991fb3f7 5585 __dev_set_promiscuity(dev, 1, false);
2d348d1f 5586 dev->uc_promisc = true;
32e7bfc4 5587 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
991fb3f7 5588 __dev_set_promiscuity(dev, -1, false);
2d348d1f 5589 dev->uc_promisc = false;
4417da66 5590 }
4417da66 5591 }
01789349
JP
5592
5593 if (ops->ndo_set_rx_mode)
5594 ops->ndo_set_rx_mode(dev);
4417da66
PM
5595}
5596
5597void dev_set_rx_mode(struct net_device *dev)
5598{
b9e40857 5599 netif_addr_lock_bh(dev);
4417da66 5600 __dev_set_rx_mode(dev);
b9e40857 5601 netif_addr_unlock_bh(dev);
1da177e4
LT
5602}
5603
f0db275a
SH
5604/**
5605 * dev_get_flags - get flags reported to userspace
5606 * @dev: device
5607 *
5608 * Get the combination of flag bits exported through APIs to userspace.
5609 */
95c96174 5610unsigned int dev_get_flags(const struct net_device *dev)
1da177e4 5611{
95c96174 5612 unsigned int flags;
1da177e4
LT
5613
5614 flags = (dev->flags & ~(IFF_PROMISC |
5615 IFF_ALLMULTI |
b00055aa
SR
5616 IFF_RUNNING |
5617 IFF_LOWER_UP |
5618 IFF_DORMANT)) |
1da177e4
LT
5619 (dev->gflags & (IFF_PROMISC |
5620 IFF_ALLMULTI));
5621
b00055aa
SR
5622 if (netif_running(dev)) {
5623 if (netif_oper_up(dev))
5624 flags |= IFF_RUNNING;
5625 if (netif_carrier_ok(dev))
5626 flags |= IFF_LOWER_UP;
5627 if (netif_dormant(dev))
5628 flags |= IFF_DORMANT;
5629 }
1da177e4
LT
5630
5631 return flags;
5632}
d1b19dff 5633EXPORT_SYMBOL(dev_get_flags);
1da177e4 5634
bd380811 5635int __dev_change_flags(struct net_device *dev, unsigned int flags)
1da177e4 5636{
b536db93 5637 unsigned int old_flags = dev->flags;
bd380811 5638 int ret;
1da177e4 5639
24023451
PM
5640 ASSERT_RTNL();
5641
1da177e4
LT
5642 /*
5643 * Set the flags on our device.
5644 */
5645
5646 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5647 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5648 IFF_AUTOMEDIA)) |
5649 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5650 IFF_ALLMULTI));
5651
5652 /*
5653 * Load in the correct multicast list now the flags have changed.
5654 */
5655
b6c40d68
PM
5656 if ((old_flags ^ flags) & IFF_MULTICAST)
5657 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 5658
4417da66 5659 dev_set_rx_mode(dev);
1da177e4
LT
5660
5661 /*
5662 * Have we downed the interface. We handle IFF_UP ourselves
5663 * according to user attempts to set it, rather than blindly
5664 * setting it.
5665 */
5666
5667 ret = 0;
d215d10f 5668 if ((old_flags ^ flags) & IFF_UP)
bd380811 5669 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
1da177e4 5670
1da177e4 5671 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff 5672 int inc = (flags & IFF_PROMISC) ? 1 : -1;
991fb3f7 5673 unsigned int old_flags = dev->flags;
d1b19dff 5674
1da177e4 5675 dev->gflags ^= IFF_PROMISC;
991fb3f7
ND
5676
5677 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5678 if (dev->flags != old_flags)
5679 dev_set_rx_mode(dev);
1da177e4
LT
5680 }
5681
5682 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5683 is important. Some (broken) drivers set IFF_PROMISC, when
5684 IFF_ALLMULTI is requested not asking us and not reporting.
5685 */
5686 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
5687 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5688
1da177e4 5689 dev->gflags ^= IFF_ALLMULTI;
991fb3f7 5690 __dev_set_allmulti(dev, inc, false);
1da177e4
LT
5691 }
5692
bd380811
PM
5693 return ret;
5694}
5695
a528c219
ND
5696void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5697 unsigned int gchanges)
bd380811
PM
5698{
5699 unsigned int changes = dev->flags ^ old_flags;
5700
a528c219 5701 if (gchanges)
7f294054 5702 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
a528c219 5703
bd380811
PM
5704 if (changes & IFF_UP) {
5705 if (dev->flags & IFF_UP)
5706 call_netdevice_notifiers(NETDEV_UP, dev);
5707 else
5708 call_netdevice_notifiers(NETDEV_DOWN, dev);
5709 }
5710
5711 if (dev->flags & IFF_UP &&
be9efd36
JP
5712 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5713 struct netdev_notifier_change_info change_info;
5714
5715 change_info.flags_changed = changes;
5716 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5717 &change_info.info);
5718 }
bd380811
PM
5719}
5720
5721/**
5722 * dev_change_flags - change device settings
5723 * @dev: device
5724 * @flags: device state flags
5725 *
5726 * Change settings on device based state flags. The flags are
5727 * in the userspace exported format.
5728 */
b536db93 5729int dev_change_flags(struct net_device *dev, unsigned int flags)
bd380811 5730{
b536db93 5731 int ret;
991fb3f7 5732 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
bd380811
PM
5733
5734 ret = __dev_change_flags(dev, flags);
5735 if (ret < 0)
5736 return ret;
5737
991fb3f7 5738 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
a528c219 5739 __dev_notify_flags(dev, old_flags, changes);
1da177e4
LT
5740 return ret;
5741}
d1b19dff 5742EXPORT_SYMBOL(dev_change_flags);
1da177e4 5743
2315dc91
VF
5744static int __dev_set_mtu(struct net_device *dev, int new_mtu)
5745{
5746 const struct net_device_ops *ops = dev->netdev_ops;
5747
5748 if (ops->ndo_change_mtu)
5749 return ops->ndo_change_mtu(dev, new_mtu);
5750
5751 dev->mtu = new_mtu;
5752 return 0;
5753}
5754
f0db275a
SH
5755/**
5756 * dev_set_mtu - Change maximum transfer unit
5757 * @dev: device
5758 * @new_mtu: new transfer unit
5759 *
5760 * Change the maximum transfer size of the network device.
5761 */
1da177e4
LT
5762int dev_set_mtu(struct net_device *dev, int new_mtu)
5763{
2315dc91 5764 int err, orig_mtu;
1da177e4
LT
5765
5766 if (new_mtu == dev->mtu)
5767 return 0;
5768
5769 /* MTU must be positive. */
5770 if (new_mtu < 0)
5771 return -EINVAL;
5772
5773 if (!netif_device_present(dev))
5774 return -ENODEV;
5775
1d486bfb
VF
5776 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
5777 err = notifier_to_errno(err);
5778 if (err)
5779 return err;
d314774c 5780
2315dc91
VF
5781 orig_mtu = dev->mtu;
5782 err = __dev_set_mtu(dev, new_mtu);
d314774c 5783
2315dc91
VF
5784 if (!err) {
5785 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5786 err = notifier_to_errno(err);
5787 if (err) {
5788 /* setting mtu back and notifying everyone again,
5789 * so that they have a chance to revert changes.
5790 */
5791 __dev_set_mtu(dev, orig_mtu);
5792 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5793 }
5794 }
1da177e4
LT
5795 return err;
5796}
d1b19dff 5797EXPORT_SYMBOL(dev_set_mtu);
1da177e4 5798
cbda10fa
VD
5799/**
5800 * dev_set_group - Change group this device belongs to
5801 * @dev: device
5802 * @new_group: group this device should belong to
5803 */
5804void dev_set_group(struct net_device *dev, int new_group)
5805{
5806 dev->group = new_group;
5807}
5808EXPORT_SYMBOL(dev_set_group);
5809
f0db275a
SH
5810/**
5811 * dev_set_mac_address - Change Media Access Control Address
5812 * @dev: device
5813 * @sa: new address
5814 *
5815 * Change the hardware (MAC) address of the device
5816 */
1da177e4
LT
5817int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5818{
d314774c 5819 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
5820 int err;
5821
d314774c 5822 if (!ops->ndo_set_mac_address)
1da177e4
LT
5823 return -EOPNOTSUPP;
5824 if (sa->sa_family != dev->type)
5825 return -EINVAL;
5826 if (!netif_device_present(dev))
5827 return -ENODEV;
d314774c 5828 err = ops->ndo_set_mac_address(dev, sa);
f6521516
JP
5829 if (err)
5830 return err;
fbdeca2d 5831 dev->addr_assign_type = NET_ADDR_SET;
f6521516 5832 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
7bf23575 5833 add_device_randomness(dev->dev_addr, dev->addr_len);
f6521516 5834 return 0;
1da177e4 5835}
d1b19dff 5836EXPORT_SYMBOL(dev_set_mac_address);
1da177e4 5837
4bf84c35
JP
5838/**
5839 * dev_change_carrier - Change device carrier
5840 * @dev: device
691b3b7e 5841 * @new_carrier: new value
4bf84c35
JP
5842 *
5843 * Change device carrier
5844 */
5845int dev_change_carrier(struct net_device *dev, bool new_carrier)
5846{
5847 const struct net_device_ops *ops = dev->netdev_ops;
5848
5849 if (!ops->ndo_change_carrier)
5850 return -EOPNOTSUPP;
5851 if (!netif_device_present(dev))
5852 return -ENODEV;
5853 return ops->ndo_change_carrier(dev, new_carrier);
5854}
5855EXPORT_SYMBOL(dev_change_carrier);
5856
66b52b0d
JP
5857/**
5858 * dev_get_phys_port_id - Get device physical port ID
5859 * @dev: device
5860 * @ppid: port ID
5861 *
5862 * Get device physical port ID
5863 */
5864int dev_get_phys_port_id(struct net_device *dev,
02637fce 5865 struct netdev_phys_item_id *ppid)
66b52b0d
JP
5866{
5867 const struct net_device_ops *ops = dev->netdev_ops;
5868
5869 if (!ops->ndo_get_phys_port_id)
5870 return -EOPNOTSUPP;
5871 return ops->ndo_get_phys_port_id(dev, ppid);
5872}
5873EXPORT_SYMBOL(dev_get_phys_port_id);
5874
1da177e4
LT
5875/**
5876 * dev_new_index - allocate an ifindex
c4ea43c5 5877 * @net: the applicable net namespace
1da177e4
LT
5878 *
5879 * Returns a suitable unique value for a new device interface
5880 * number. The caller must hold the rtnl semaphore or the
5881 * dev_base_lock to be sure it remains unique.
5882 */
881d966b 5883static int dev_new_index(struct net *net)
1da177e4 5884{
aa79e66e 5885 int ifindex = net->ifindex;
1da177e4
LT
5886 for (;;) {
5887 if (++ifindex <= 0)
5888 ifindex = 1;
881d966b 5889 if (!__dev_get_by_index(net, ifindex))
aa79e66e 5890 return net->ifindex = ifindex;
1da177e4
LT
5891 }
5892}
5893
1da177e4 5894/* Delayed registration/unregisteration */
3b5b34fd 5895static LIST_HEAD(net_todo_list);
200b916f 5896DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
1da177e4 5897
6f05f629 5898static void net_set_todo(struct net_device *dev)
1da177e4 5899{
1da177e4 5900 list_add_tail(&dev->todo_list, &net_todo_list);
50624c93 5901 dev_net(dev)->dev_unreg_count++;
1da177e4
LT
5902}
5903
9b5e383c 5904static void rollback_registered_many(struct list_head *head)
93ee31f1 5905{
e93737b0 5906 struct net_device *dev, *tmp;
5cde2829 5907 LIST_HEAD(close_head);
9b5e383c 5908
93ee31f1
DL
5909 BUG_ON(dev_boot_phase);
5910 ASSERT_RTNL();
5911
e93737b0 5912 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 5913 /* Some devices call without registering
e93737b0
KK
5914 * for initialization unwind. Remove those
5915 * devices and proceed with the remaining.
9b5e383c
ED
5916 */
5917 if (dev->reg_state == NETREG_UNINITIALIZED) {
7b6cd1ce
JP
5918 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5919 dev->name, dev);
93ee31f1 5920
9b5e383c 5921 WARN_ON(1);
e93737b0
KK
5922 list_del(&dev->unreg_list);
5923 continue;
9b5e383c 5924 }
449f4544 5925 dev->dismantle = true;
9b5e383c 5926 BUG_ON(dev->reg_state != NETREG_REGISTERED);
44345724 5927 }
93ee31f1 5928
44345724 5929 /* If device is running, close it first. */
5cde2829
EB
5930 list_for_each_entry(dev, head, unreg_list)
5931 list_add_tail(&dev->close_list, &close_head);
5932 dev_close_many(&close_head);
93ee31f1 5933
44345724 5934 list_for_each_entry(dev, head, unreg_list) {
9b5e383c
ED
5935 /* And unlink it from device chain. */
5936 unlist_netdevice(dev);
93ee31f1 5937
9b5e383c
ED
5938 dev->reg_state = NETREG_UNREGISTERING;
5939 }
93ee31f1
DL
5940
5941 synchronize_net();
5942
9b5e383c 5943 list_for_each_entry(dev, head, unreg_list) {
395eea6c
MB
5944 struct sk_buff *skb = NULL;
5945
9b5e383c
ED
5946 /* Shutdown queueing discipline. */
5947 dev_shutdown(dev);
93ee31f1
DL
5948
5949
9b5e383c
ED
5950 /* Notify protocols, that we are about to destroy
5951 this device. They should clean all the things.
5952 */
5953 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 5954
395eea6c
MB
5955 if (!dev->rtnl_link_ops ||
5956 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5957 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
5958 GFP_KERNEL);
5959
9b5e383c
ED
5960 /*
5961 * Flush the unicast and multicast chains
5962 */
a748ee24 5963 dev_uc_flush(dev);
22bedad3 5964 dev_mc_flush(dev);
93ee31f1 5965
9b5e383c
ED
5966 if (dev->netdev_ops->ndo_uninit)
5967 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 5968
395eea6c
MB
5969 if (skb)
5970 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
56bfa7ee 5971
9ff162a8
JP
5972 /* Notifier chain MUST detach us all upper devices. */
5973 WARN_ON(netdev_has_any_upper_dev(dev));
93ee31f1 5974
9b5e383c
ED
5975 /* Remove entries from kobject tree */
5976 netdev_unregister_kobject(dev);
024e9679
AD
5977#ifdef CONFIG_XPS
5978 /* Remove XPS queueing entries */
5979 netif_reset_xps_queues_gt(dev, 0);
5980#endif
9b5e383c 5981 }
93ee31f1 5982
850a545b 5983 synchronize_net();
395264d5 5984
a5ee1551 5985 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
5986 dev_put(dev);
5987}
5988
5989static void rollback_registered(struct net_device *dev)
5990{
5991 LIST_HEAD(single);
5992
5993 list_add(&dev->unreg_list, &single);
5994 rollback_registered_many(&single);
ceaaec98 5995 list_del(&single);
93ee31f1
DL
5996}
5997
c8f44aff
MM
5998static netdev_features_t netdev_fix_features(struct net_device *dev,
5999 netdev_features_t features)
b63365a2 6000{
57422dc5
MM
6001 /* Fix illegal checksum combinations */
6002 if ((features & NETIF_F_HW_CSUM) &&
6003 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 6004 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
57422dc5
MM
6005 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
6006 }
6007
b63365a2 6008 /* TSO requires that SG is present as well. */
ea2d3688 6009 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6f404e44 6010 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
ea2d3688 6011 features &= ~NETIF_F_ALL_TSO;
b63365a2
HX
6012 }
6013
ec5f0615
PS
6014 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
6015 !(features & NETIF_F_IP_CSUM)) {
6016 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
6017 features &= ~NETIF_F_TSO;
6018 features &= ~NETIF_F_TSO_ECN;
6019 }
6020
6021 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
6022 !(features & NETIF_F_IPV6_CSUM)) {
6023 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
6024 features &= ~NETIF_F_TSO6;
6025 }
6026
31d8b9e0
BH
6027 /* TSO ECN requires that TSO is present as well. */
6028 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
6029 features &= ~NETIF_F_TSO_ECN;
6030
212b573f
MM
6031 /* Software GSO depends on SG. */
6032 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6f404e44 6033 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
212b573f
MM
6034 features &= ~NETIF_F_GSO;
6035 }
6036
acd1130e 6037 /* UFO needs SG and checksumming */
b63365a2 6038 if (features & NETIF_F_UFO) {
79032644
MM
6039 /* maybe split UFO into V4 and V6? */
6040 if (!((features & NETIF_F_GEN_CSUM) ||
6041 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
6042 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 6043 netdev_dbg(dev,
acd1130e 6044 "Dropping NETIF_F_UFO since no checksum offload features.\n");
b63365a2
HX
6045 features &= ~NETIF_F_UFO;
6046 }
6047
6048 if (!(features & NETIF_F_SG)) {
6f404e44 6049 netdev_dbg(dev,
acd1130e 6050 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
b63365a2
HX
6051 features &= ~NETIF_F_UFO;
6052 }
6053 }
6054
d0290214
JP
6055#ifdef CONFIG_NET_RX_BUSY_POLL
6056 if (dev->netdev_ops->ndo_busy_poll)
6057 features |= NETIF_F_BUSY_POLL;
6058 else
6059#endif
6060 features &= ~NETIF_F_BUSY_POLL;
6061
b63365a2
HX
6062 return features;
6063}
b63365a2 6064
6cb6a27c 6065int __netdev_update_features(struct net_device *dev)
5455c699 6066{
c8f44aff 6067 netdev_features_t features;
5455c699
MM
6068 int err = 0;
6069
87267485
MM
6070 ASSERT_RTNL();
6071
5455c699
MM
6072 features = netdev_get_wanted_features(dev);
6073
6074 if (dev->netdev_ops->ndo_fix_features)
6075 features = dev->netdev_ops->ndo_fix_features(dev, features);
6076
6077 /* driver might be less strict about feature dependencies */
6078 features = netdev_fix_features(dev, features);
6079
6080 if (dev->features == features)
6cb6a27c 6081 return 0;
5455c699 6082
c8f44aff
MM
6083 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6084 &dev->features, &features);
5455c699
MM
6085
6086 if (dev->netdev_ops->ndo_set_features)
6087 err = dev->netdev_ops->ndo_set_features(dev, features);
6088
6cb6a27c 6089 if (unlikely(err < 0)) {
5455c699 6090 netdev_err(dev,
c8f44aff
MM
6091 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6092 err, &features, &dev->features);
6cb6a27c
MM
6093 return -1;
6094 }
6095
6096 if (!err)
6097 dev->features = features;
6098
6099 return 1;
6100}
6101
afe12cc8
MM
6102/**
6103 * netdev_update_features - recalculate device features
6104 * @dev: the device to check
6105 *
6106 * Recalculate dev->features set and send notifications if it
6107 * has changed. Should be called after driver or hardware dependent
6108 * conditions might have changed that influence the features.
6109 */
6cb6a27c
MM
6110void netdev_update_features(struct net_device *dev)
6111{
6112 if (__netdev_update_features(dev))
6113 netdev_features_change(dev);
5455c699
MM
6114}
6115EXPORT_SYMBOL(netdev_update_features);
6116
afe12cc8
MM
6117/**
6118 * netdev_change_features - recalculate device features
6119 * @dev: the device to check
6120 *
6121 * Recalculate dev->features set and send notifications even
6122 * if they have not changed. Should be called instead of
6123 * netdev_update_features() if also dev->vlan_features might
6124 * have changed to allow the changes to be propagated to stacked
6125 * VLAN devices.
6126 */
6127void netdev_change_features(struct net_device *dev)
6128{
6129 __netdev_update_features(dev);
6130 netdev_features_change(dev);
6131}
6132EXPORT_SYMBOL(netdev_change_features);
6133
fc4a7489
PM
6134/**
6135 * netif_stacked_transfer_operstate - transfer operstate
6136 * @rootdev: the root or lower level device to transfer state from
6137 * @dev: the device to transfer operstate to
6138 *
6139 * Transfer operational state from root to device. This is normally
6140 * called when a stacking relationship exists between the root
6141 * device and the device(a leaf device).
6142 */
6143void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6144 struct net_device *dev)
6145{
6146 if (rootdev->operstate == IF_OPER_DORMANT)
6147 netif_dormant_on(dev);
6148 else
6149 netif_dormant_off(dev);
6150
6151 if (netif_carrier_ok(rootdev)) {
6152 if (!netif_carrier_ok(dev))
6153 netif_carrier_on(dev);
6154 } else {
6155 if (netif_carrier_ok(dev))
6156 netif_carrier_off(dev);
6157 }
6158}
6159EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6160
a953be53 6161#ifdef CONFIG_SYSFS
1b4bf461
ED
6162static int netif_alloc_rx_queues(struct net_device *dev)
6163{
1b4bf461 6164 unsigned int i, count = dev->num_rx_queues;
bd25fa7b 6165 struct netdev_rx_queue *rx;
1b4bf461 6166
bd25fa7b 6167 BUG_ON(count < 1);
1b4bf461 6168
bd25fa7b 6169 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
62b5942a 6170 if (!rx)
bd25fa7b 6171 return -ENOMEM;
62b5942a 6172
bd25fa7b
TH
6173 dev->_rx = rx;
6174
bd25fa7b 6175 for (i = 0; i < count; i++)
fe822240 6176 rx[i].dev = dev;
1b4bf461
ED
6177 return 0;
6178}
bf264145 6179#endif
1b4bf461 6180
aa942104
CG
6181static void netdev_init_one_queue(struct net_device *dev,
6182 struct netdev_queue *queue, void *_unused)
6183{
6184 /* Initialize queue lock */
6185 spin_lock_init(&queue->_xmit_lock);
6186 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6187 queue->xmit_lock_owner = -1;
b236da69 6188 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
aa942104 6189 queue->dev = dev;
114cf580
TH
6190#ifdef CONFIG_BQL
6191 dql_init(&queue->dql, HZ);
6192#endif
aa942104
CG
6193}
6194
60877a32
ED
6195static void netif_free_tx_queues(struct net_device *dev)
6196{
4cb28970 6197 kvfree(dev->_tx);
60877a32
ED
6198}
6199
e6484930
TH
6200static int netif_alloc_netdev_queues(struct net_device *dev)
6201{
6202 unsigned int count = dev->num_tx_queues;
6203 struct netdev_queue *tx;
60877a32 6204 size_t sz = count * sizeof(*tx);
e6484930 6205
60877a32 6206 BUG_ON(count < 1 || count > 0xffff);
62b5942a 6207
60877a32
ED
6208 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6209 if (!tx) {
6210 tx = vzalloc(sz);
6211 if (!tx)
6212 return -ENOMEM;
6213 }
e6484930 6214 dev->_tx = tx;
1d24eb48 6215
e6484930
TH
6216 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6217 spin_lock_init(&dev->tx_global_lock);
aa942104
CG
6218
6219 return 0;
e6484930
TH
6220}
6221
1da177e4
LT
6222/**
6223 * register_netdevice - register a network device
6224 * @dev: device to register
6225 *
6226 * Take a completed network device structure and add it to the kernel
6227 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6228 * chain. 0 is returned on success. A negative errno code is returned
6229 * on a failure to set up the device, or if the name is a duplicate.
6230 *
6231 * Callers must hold the rtnl semaphore. You may want
6232 * register_netdev() instead of this.
6233 *
6234 * BUGS:
6235 * The locking appears insufficient to guarantee two parallel registers
6236 * will not get the same name.
6237 */
6238
6239int register_netdevice(struct net_device *dev)
6240{
1da177e4 6241 int ret;
d314774c 6242 struct net *net = dev_net(dev);
1da177e4
LT
6243
6244 BUG_ON(dev_boot_phase);
6245 ASSERT_RTNL();
6246
b17a7c17
SH
6247 might_sleep();
6248
1da177e4
LT
6249 /* When net_device's are persistent, this will be fatal. */
6250 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 6251 BUG_ON(!net);
1da177e4 6252
f1f28aa3 6253 spin_lock_init(&dev->addr_list_lock);
cf508b12 6254 netdev_set_addr_lockdep_class(dev);
1da177e4 6255
1da177e4
LT
6256 dev->iflink = -1;
6257
828de4f6 6258 ret = dev_get_valid_name(net, dev, dev->name);
0696c3a8
PP
6259 if (ret < 0)
6260 goto out;
6261
1da177e4 6262 /* Init, if this function is available */
d314774c
SH
6263 if (dev->netdev_ops->ndo_init) {
6264 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
6265 if (ret) {
6266 if (ret > 0)
6267 ret = -EIO;
90833aa4 6268 goto out;
1da177e4
LT
6269 }
6270 }
4ec93edb 6271
f646968f
PM
6272 if (((dev->hw_features | dev->features) &
6273 NETIF_F_HW_VLAN_CTAG_FILTER) &&
d2ed273d
MM
6274 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6275 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6276 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6277 ret = -EINVAL;
6278 goto err_uninit;
6279 }
6280
9c7dafbf
PE
6281 ret = -EBUSY;
6282 if (!dev->ifindex)
6283 dev->ifindex = dev_new_index(net);
6284 else if (__dev_get_by_index(net, dev->ifindex))
6285 goto err_uninit;
6286
1da177e4
LT
6287 if (dev->iflink == -1)
6288 dev->iflink = dev->ifindex;
6289
5455c699
MM
6290 /* Transfer changeable features to wanted_features and enable
6291 * software offloads (GSO and GRO).
6292 */
6293 dev->hw_features |= NETIF_F_SOFT_FEATURES;
14d1232f
MM
6294 dev->features |= NETIF_F_SOFT_FEATURES;
6295 dev->wanted_features = dev->features & dev->hw_features;
1da177e4 6296
34324dc2
MM
6297 if (!(dev->flags & IFF_LOOPBACK)) {
6298 dev->hw_features |= NETIF_F_NOCACHE_COPY;
c6e1a0d1
TH
6299 }
6300
1180e7d6 6301 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
16c3ea78 6302 */
1180e7d6 6303 dev->vlan_features |= NETIF_F_HIGHDMA;
16c3ea78 6304
ee579677
PS
6305 /* Make NETIF_F_SG inheritable to tunnel devices.
6306 */
6307 dev->hw_enc_features |= NETIF_F_SG;
6308
0d89d203
SH
6309 /* Make NETIF_F_SG inheritable to MPLS.
6310 */
6311 dev->mpls_features |= NETIF_F_SG;
6312
7ffbe3fd
JB
6313 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6314 ret = notifier_to_errno(ret);
6315 if (ret)
6316 goto err_uninit;
6317
8b41d188 6318 ret = netdev_register_kobject(dev);
b17a7c17 6319 if (ret)
7ce1b0ed 6320 goto err_uninit;
b17a7c17
SH
6321 dev->reg_state = NETREG_REGISTERED;
6322
6cb6a27c 6323 __netdev_update_features(dev);
8e9b59b2 6324
1da177e4
LT
6325 /*
6326 * Default initial state at registry is that the
6327 * device is present.
6328 */
6329
6330 set_bit(__LINK_STATE_PRESENT, &dev->state);
6331
8f4cccbb
BH
6332 linkwatch_init_dev(dev);
6333
1da177e4 6334 dev_init_scheduler(dev);
1da177e4 6335 dev_hold(dev);
ce286d32 6336 list_netdevice(dev);
7bf23575 6337 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 6338
948b337e
JP
6339 /* If the device has permanent device address, driver should
6340 * set dev_addr and also addr_assign_type should be set to
6341 * NET_ADDR_PERM (default value).
6342 */
6343 if (dev->addr_assign_type == NET_ADDR_PERM)
6344 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6345
1da177e4 6346 /* Notify protocols, that a new device appeared. */
056925ab 6347 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 6348 ret = notifier_to_errno(ret);
93ee31f1
DL
6349 if (ret) {
6350 rollback_registered(dev);
6351 dev->reg_state = NETREG_UNREGISTERED;
6352 }
d90a909e
EB
6353 /*
6354 * Prevent userspace races by waiting until the network
6355 * device is fully setup before sending notifications.
6356 */
a2835763
PM
6357 if (!dev->rtnl_link_ops ||
6358 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7f294054 6359 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
1da177e4
LT
6360
6361out:
6362 return ret;
7ce1b0ed
HX
6363
6364err_uninit:
d314774c
SH
6365 if (dev->netdev_ops->ndo_uninit)
6366 dev->netdev_ops->ndo_uninit(dev);
7ce1b0ed 6367 goto out;
1da177e4 6368}
d1b19dff 6369EXPORT_SYMBOL(register_netdevice);
1da177e4 6370
937f1ba5
BH
6371/**
6372 * init_dummy_netdev - init a dummy network device for NAPI
6373 * @dev: device to init
6374 *
6375 * This takes a network device structure and initialize the minimum
6376 * amount of fields so it can be used to schedule NAPI polls without
6377 * registering a full blown interface. This is to be used by drivers
6378 * that need to tie several hardware interfaces to a single NAPI
6379 * poll scheduler due to HW limitations.
6380 */
6381int init_dummy_netdev(struct net_device *dev)
6382{
6383 /* Clear everything. Note we don't initialize spinlocks
6384 * are they aren't supposed to be taken by any of the
6385 * NAPI code and this dummy netdev is supposed to be
6386 * only ever used for NAPI polls
6387 */
6388 memset(dev, 0, sizeof(struct net_device));
6389
6390 /* make sure we BUG if trying to hit standard
6391 * register/unregister code path
6392 */
6393 dev->reg_state = NETREG_DUMMY;
6394
937f1ba5
BH
6395 /* NAPI wants this */
6396 INIT_LIST_HEAD(&dev->napi_list);
6397
6398 /* a dummy interface is started by default */
6399 set_bit(__LINK_STATE_PRESENT, &dev->state);
6400 set_bit(__LINK_STATE_START, &dev->state);
6401
29b4433d
ED
6402 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6403 * because users of this 'device' dont need to change
6404 * its refcount.
6405 */
6406
937f1ba5
BH
6407 return 0;
6408}
6409EXPORT_SYMBOL_GPL(init_dummy_netdev);
6410
6411
1da177e4
LT
6412/**
6413 * register_netdev - register a network device
6414 * @dev: device to register
6415 *
6416 * Take a completed network device structure and add it to the kernel
6417 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6418 * chain. 0 is returned on success. A negative errno code is returned
6419 * on a failure to set up the device, or if the name is a duplicate.
6420 *
38b4da38 6421 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
6422 * and expands the device name if you passed a format string to
6423 * alloc_netdev.
6424 */
6425int register_netdev(struct net_device *dev)
6426{
6427 int err;
6428
6429 rtnl_lock();
1da177e4 6430 err = register_netdevice(dev);
1da177e4
LT
6431 rtnl_unlock();
6432 return err;
6433}
6434EXPORT_SYMBOL(register_netdev);
6435
29b4433d
ED
6436int netdev_refcnt_read(const struct net_device *dev)
6437{
6438 int i, refcnt = 0;
6439
6440 for_each_possible_cpu(i)
6441 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6442 return refcnt;
6443}
6444EXPORT_SYMBOL(netdev_refcnt_read);
6445
2c53040f 6446/**
1da177e4 6447 * netdev_wait_allrefs - wait until all references are gone.
3de7a37b 6448 * @dev: target net_device
1da177e4
LT
6449 *
6450 * This is called when unregistering network devices.
6451 *
6452 * Any protocol or device that holds a reference should register
6453 * for netdevice notification, and cleanup and put back the
6454 * reference if they receive an UNREGISTER event.
6455 * We can get stuck here if buggy protocols don't correctly
4ec93edb 6456 * call dev_put.
1da177e4
LT
6457 */
6458static void netdev_wait_allrefs(struct net_device *dev)
6459{
6460 unsigned long rebroadcast_time, warning_time;
29b4433d 6461 int refcnt;
1da177e4 6462
e014debe
ED
6463 linkwatch_forget_dev(dev);
6464
1da177e4 6465 rebroadcast_time = warning_time = jiffies;
29b4433d
ED
6466 refcnt = netdev_refcnt_read(dev);
6467
6468 while (refcnt != 0) {
1da177e4 6469 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 6470 rtnl_lock();
1da177e4
LT
6471
6472 /* Rebroadcast unregister notification */
056925ab 6473 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
1da177e4 6474
748e2d93 6475 __rtnl_unlock();
0115e8e3 6476 rcu_barrier();
748e2d93
ED
6477 rtnl_lock();
6478
0115e8e3 6479 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
1da177e4
LT
6480 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6481 &dev->state)) {
6482 /* We must not have linkwatch events
6483 * pending on unregister. If this
6484 * happens, we simply run the queue
6485 * unscheduled, resulting in a noop
6486 * for this device.
6487 */
6488 linkwatch_run_queue();
6489 }
6490
6756ae4b 6491 __rtnl_unlock();
1da177e4
LT
6492
6493 rebroadcast_time = jiffies;
6494 }
6495
6496 msleep(250);
6497
29b4433d
ED
6498 refcnt = netdev_refcnt_read(dev);
6499
1da177e4 6500 if (time_after(jiffies, warning_time + 10 * HZ)) {
7b6cd1ce
JP
6501 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6502 dev->name, refcnt);
1da177e4
LT
6503 warning_time = jiffies;
6504 }
6505 }
6506}
6507
6508/* The sequence is:
6509 *
6510 * rtnl_lock();
6511 * ...
6512 * register_netdevice(x1);
6513 * register_netdevice(x2);
6514 * ...
6515 * unregister_netdevice(y1);
6516 * unregister_netdevice(y2);
6517 * ...
6518 * rtnl_unlock();
6519 * free_netdev(y1);
6520 * free_netdev(y2);
6521 *
58ec3b4d 6522 * We are invoked by rtnl_unlock().
1da177e4 6523 * This allows us to deal with problems:
b17a7c17 6524 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
6525 * without deadlocking with linkwatch via keventd.
6526 * 2) Since we run with the RTNL semaphore not held, we can sleep
6527 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
6528 *
6529 * We must not return until all unregister events added during
6530 * the interval the lock was held have been completed.
1da177e4 6531 */
1da177e4
LT
6532void netdev_run_todo(void)
6533{
626ab0e6 6534 struct list_head list;
1da177e4 6535
1da177e4 6536 /* Snapshot list, allow later requests */
626ab0e6 6537 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
6538
6539 __rtnl_unlock();
626ab0e6 6540
0115e8e3
ED
6541
6542 /* Wait for rcu callbacks to finish before next phase */
850a545b
EB
6543 if (!list_empty(&list))
6544 rcu_barrier();
6545
1da177e4
LT
6546 while (!list_empty(&list)) {
6547 struct net_device *dev
e5e26d75 6548 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
6549 list_del(&dev->todo_list);
6550
748e2d93 6551 rtnl_lock();
0115e8e3 6552 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
748e2d93 6553 __rtnl_unlock();
0115e8e3 6554
b17a7c17 6555 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
7b6cd1ce 6556 pr_err("network todo '%s' but state %d\n",
b17a7c17
SH
6557 dev->name, dev->reg_state);
6558 dump_stack();
6559 continue;
6560 }
1da177e4 6561
b17a7c17 6562 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 6563
152102c7 6564 on_each_cpu(flush_backlog, dev, 1);
6e583ce5 6565
b17a7c17 6566 netdev_wait_allrefs(dev);
1da177e4 6567
b17a7c17 6568 /* paranoia */
29b4433d 6569 BUG_ON(netdev_refcnt_read(dev));
33d480ce
ED
6570 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6571 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
547b792c 6572 WARN_ON(dev->dn_ptr);
1da177e4 6573
b17a7c17
SH
6574 if (dev->destructor)
6575 dev->destructor(dev);
9093bbb2 6576
50624c93
EB
6577 /* Report a network device has been unregistered */
6578 rtnl_lock();
6579 dev_net(dev)->dev_unreg_count--;
6580 __rtnl_unlock();
6581 wake_up(&netdev_unregistering_wq);
6582
9093bbb2
SH
6583 /* Free network device */
6584 kobject_put(&dev->dev.kobj);
1da177e4 6585 }
1da177e4
LT
6586}
6587
3cfde79c
BH
6588/* Convert net_device_stats to rtnl_link_stats64. They have the same
6589 * fields in the same order, with only the type differing.
6590 */
77a1abf5
ED
6591void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6592 const struct net_device_stats *netdev_stats)
3cfde79c
BH
6593{
6594#if BITS_PER_LONG == 64
77a1abf5
ED
6595 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6596 memcpy(stats64, netdev_stats, sizeof(*stats64));
3cfde79c
BH
6597#else
6598 size_t i, n = sizeof(*stats64) / sizeof(u64);
6599 const unsigned long *src = (const unsigned long *)netdev_stats;
6600 u64 *dst = (u64 *)stats64;
6601
6602 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6603 sizeof(*stats64) / sizeof(u64));
6604 for (i = 0; i < n; i++)
6605 dst[i] = src[i];
6606#endif
6607}
77a1abf5 6608EXPORT_SYMBOL(netdev_stats_to_stats64);
3cfde79c 6609
eeda3fd6
SH
6610/**
6611 * dev_get_stats - get network device statistics
6612 * @dev: device to get statistics from
28172739 6613 * @storage: place to store stats
eeda3fd6 6614 *
d7753516
BH
6615 * Get network statistics from device. Return @storage.
6616 * The device driver may provide its own method by setting
6617 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6618 * otherwise the internal statistics structure is used.
eeda3fd6 6619 */
d7753516
BH
6620struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6621 struct rtnl_link_stats64 *storage)
7004bf25 6622{
eeda3fd6
SH
6623 const struct net_device_ops *ops = dev->netdev_ops;
6624
28172739
ED
6625 if (ops->ndo_get_stats64) {
6626 memset(storage, 0, sizeof(*storage));
caf586e5
ED
6627 ops->ndo_get_stats64(dev, storage);
6628 } else if (ops->ndo_get_stats) {
3cfde79c 6629 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
caf586e5
ED
6630 } else {
6631 netdev_stats_to_stats64(storage, &dev->stats);
28172739 6632 }
caf586e5 6633 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
015f0688 6634 storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
28172739 6635 return storage;
c45d286e 6636}
eeda3fd6 6637EXPORT_SYMBOL(dev_get_stats);
c45d286e 6638
24824a09 6639struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
dc2b4847 6640{
24824a09 6641 struct netdev_queue *queue = dev_ingress_queue(dev);
dc2b4847 6642
24824a09
ED
6643#ifdef CONFIG_NET_CLS_ACT
6644 if (queue)
6645 return queue;
6646 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6647 if (!queue)
6648 return NULL;
6649 netdev_init_one_queue(dev, queue, NULL);
24824a09
ED
6650 queue->qdisc = &noop_qdisc;
6651 queue->qdisc_sleeping = &noop_qdisc;
6652 rcu_assign_pointer(dev->ingress_queue, queue);
6653#endif
6654 return queue;
bb949fbd
DM
6655}
6656
2c60db03
ED
6657static const struct ethtool_ops default_ethtool_ops;
6658
d07d7507
SG
6659void netdev_set_default_ethtool_ops(struct net_device *dev,
6660 const struct ethtool_ops *ops)
6661{
6662 if (dev->ethtool_ops == &default_ethtool_ops)
6663 dev->ethtool_ops = ops;
6664}
6665EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6666
74d332c1
ED
6667void netdev_freemem(struct net_device *dev)
6668{
6669 char *addr = (char *)dev - dev->padded;
6670
4cb28970 6671 kvfree(addr);
74d332c1
ED
6672}
6673
1da177e4 6674/**
36909ea4 6675 * alloc_netdev_mqs - allocate network device
c835a677
TG
6676 * @sizeof_priv: size of private data to allocate space for
6677 * @name: device name format string
6678 * @name_assign_type: origin of device name
6679 * @setup: callback to initialize device
6680 * @txqs: the number of TX subqueues to allocate
6681 * @rxqs: the number of RX subqueues to allocate
1da177e4
LT
6682 *
6683 * Allocates a struct net_device with private data area for driver use
90e51adf 6684 * and performs basic initialization. Also allocates subqueue structs
36909ea4 6685 * for each queue on the device.
1da177e4 6686 */
36909ea4 6687struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
c835a677 6688 unsigned char name_assign_type,
36909ea4
TH
6689 void (*setup)(struct net_device *),
6690 unsigned int txqs, unsigned int rxqs)
1da177e4 6691{
1da177e4 6692 struct net_device *dev;
7943986c 6693 size_t alloc_size;
1ce8e7b5 6694 struct net_device *p;
1da177e4 6695
b6fe17d6
SH
6696 BUG_ON(strlen(name) >= sizeof(dev->name));
6697
36909ea4 6698 if (txqs < 1) {
7b6cd1ce 6699 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
55513fb4
TH
6700 return NULL;
6701 }
6702
a953be53 6703#ifdef CONFIG_SYSFS
36909ea4 6704 if (rxqs < 1) {
7b6cd1ce 6705 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
36909ea4
TH
6706 return NULL;
6707 }
6708#endif
6709
fd2ea0a7 6710 alloc_size = sizeof(struct net_device);
d1643d24
AD
6711 if (sizeof_priv) {
6712 /* ensure 32-byte alignment of private area */
1ce8e7b5 6713 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
6714 alloc_size += sizeof_priv;
6715 }
6716 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 6717 alloc_size += NETDEV_ALIGN - 1;
1da177e4 6718
74d332c1
ED
6719 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6720 if (!p)
6721 p = vzalloc(alloc_size);
62b5942a 6722 if (!p)
1da177e4 6723 return NULL;
1da177e4 6724
1ce8e7b5 6725 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 6726 dev->padded = (char *)dev - (char *)p;
ab9c73cc 6727
29b4433d
ED
6728 dev->pcpu_refcnt = alloc_percpu(int);
6729 if (!dev->pcpu_refcnt)
74d332c1 6730 goto free_dev;
ab9c73cc 6731
ab9c73cc 6732 if (dev_addr_init(dev))
29b4433d 6733 goto free_pcpu;
ab9c73cc 6734
22bedad3 6735 dev_mc_init(dev);
a748ee24 6736 dev_uc_init(dev);
ccffad25 6737
c346dca1 6738 dev_net_set(dev, &init_net);
1da177e4 6739
8d3bdbd5 6740 dev->gso_max_size = GSO_MAX_SIZE;
30b678d8 6741 dev->gso_max_segs = GSO_MAX_SEGS;
fcbeb976 6742 dev->gso_min_segs = 0;
8d3bdbd5 6743
8d3bdbd5
DM
6744 INIT_LIST_HEAD(&dev->napi_list);
6745 INIT_LIST_HEAD(&dev->unreg_list);
5cde2829 6746 INIT_LIST_HEAD(&dev->close_list);
8d3bdbd5 6747 INIT_LIST_HEAD(&dev->link_watch_list);
2f268f12
VF
6748 INIT_LIST_HEAD(&dev->adj_list.upper);
6749 INIT_LIST_HEAD(&dev->adj_list.lower);
6750 INIT_LIST_HEAD(&dev->all_adj_list.upper);
6751 INIT_LIST_HEAD(&dev->all_adj_list.lower);
02875878 6752 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
8d3bdbd5
DM
6753 setup(dev);
6754
36909ea4
TH
6755 dev->num_tx_queues = txqs;
6756 dev->real_num_tx_queues = txqs;
ed9af2e8 6757 if (netif_alloc_netdev_queues(dev))
8d3bdbd5 6758 goto free_all;
e8a0464c 6759
a953be53 6760#ifdef CONFIG_SYSFS
36909ea4
TH
6761 dev->num_rx_queues = rxqs;
6762 dev->real_num_rx_queues = rxqs;
fe822240 6763 if (netif_alloc_rx_queues(dev))
8d3bdbd5 6764 goto free_all;
df334545 6765#endif
0a9627f2 6766
1da177e4 6767 strcpy(dev->name, name);
c835a677 6768 dev->name_assign_type = name_assign_type;
cbda10fa 6769 dev->group = INIT_NETDEV_GROUP;
2c60db03
ED
6770 if (!dev->ethtool_ops)
6771 dev->ethtool_ops = &default_ethtool_ops;
1da177e4 6772 return dev;
ab9c73cc 6773
8d3bdbd5
DM
6774free_all:
6775 free_netdev(dev);
6776 return NULL;
6777
29b4433d
ED
6778free_pcpu:
6779 free_percpu(dev->pcpu_refcnt);
74d332c1
ED
6780free_dev:
6781 netdev_freemem(dev);
ab9c73cc 6782 return NULL;
1da177e4 6783}
36909ea4 6784EXPORT_SYMBOL(alloc_netdev_mqs);
1da177e4
LT
6785
6786/**
6787 * free_netdev - free network device
6788 * @dev: device
6789 *
4ec93edb
YH
6790 * This function does the last stage of destroying an allocated device
6791 * interface. The reference to the device object is released.
1da177e4
LT
6792 * If this is the last reference then it will be freed.
6793 */
6794void free_netdev(struct net_device *dev)
6795{
d565b0a1
HX
6796 struct napi_struct *p, *n;
6797
f3005d7f
DL
6798 release_net(dev_net(dev));
6799
60877a32 6800 netif_free_tx_queues(dev);
a953be53 6801#ifdef CONFIG_SYSFS
fe822240
TH
6802 kfree(dev->_rx);
6803#endif
e8a0464c 6804
33d480ce 6805 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
24824a09 6806
f001fde5
JP
6807 /* Flush device addresses */
6808 dev_addr_flush(dev);
6809
d565b0a1
HX
6810 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6811 netif_napi_del(p);
6812
29b4433d
ED
6813 free_percpu(dev->pcpu_refcnt);
6814 dev->pcpu_refcnt = NULL;
6815
3041a069 6816 /* Compatibility with error handling in drivers */
1da177e4 6817 if (dev->reg_state == NETREG_UNINITIALIZED) {
74d332c1 6818 netdev_freemem(dev);
1da177e4
LT
6819 return;
6820 }
6821
6822 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6823 dev->reg_state = NETREG_RELEASED;
6824
43cb76d9
GKH
6825 /* will free via device release */
6826 put_device(&dev->dev);
1da177e4 6827}
d1b19dff 6828EXPORT_SYMBOL(free_netdev);
4ec93edb 6829
f0db275a
SH
6830/**
6831 * synchronize_net - Synchronize with packet receive processing
6832 *
6833 * Wait for packets currently being received to be done.
6834 * Does not block later packets from starting.
6835 */
4ec93edb 6836void synchronize_net(void)
1da177e4
LT
6837{
6838 might_sleep();
be3fc413
ED
6839 if (rtnl_is_locked())
6840 synchronize_rcu_expedited();
6841 else
6842 synchronize_rcu();
1da177e4 6843}
d1b19dff 6844EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
6845
6846/**
44a0873d 6847 * unregister_netdevice_queue - remove device from the kernel
1da177e4 6848 * @dev: device
44a0873d 6849 * @head: list
6ebfbc06 6850 *
1da177e4 6851 * This function shuts down a device interface and removes it
d59b54b1 6852 * from the kernel tables.
44a0873d 6853 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
6854 *
6855 * Callers must hold the rtnl semaphore. You may want
6856 * unregister_netdev() instead of this.
6857 */
6858
44a0873d 6859void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 6860{
a6620712
HX
6861 ASSERT_RTNL();
6862
44a0873d 6863 if (head) {
9fdce099 6864 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
6865 } else {
6866 rollback_registered(dev);
6867 /* Finish processing unregister after unlock */
6868 net_set_todo(dev);
6869 }
1da177e4 6870}
44a0873d 6871EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 6872
9b5e383c
ED
6873/**
6874 * unregister_netdevice_many - unregister many devices
6875 * @head: list of devices
87757a91
ED
6876 *
6877 * Note: As most callers use a stack allocated list_head,
6878 * we force a list_del() to make sure stack wont be corrupted later.
9b5e383c
ED
6879 */
6880void unregister_netdevice_many(struct list_head *head)
6881{
6882 struct net_device *dev;
6883
6884 if (!list_empty(head)) {
6885 rollback_registered_many(head);
6886 list_for_each_entry(dev, head, unreg_list)
6887 net_set_todo(dev);
87757a91 6888 list_del(head);
9b5e383c
ED
6889 }
6890}
63c8099d 6891EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 6892
1da177e4
LT
6893/**
6894 * unregister_netdev - remove device from the kernel
6895 * @dev: device
6896 *
6897 * This function shuts down a device interface and removes it
d59b54b1 6898 * from the kernel tables.
1da177e4
LT
6899 *
6900 * This is just a wrapper for unregister_netdevice that takes
6901 * the rtnl semaphore. In general you want to use this and not
6902 * unregister_netdevice.
6903 */
6904void unregister_netdev(struct net_device *dev)
6905{
6906 rtnl_lock();
6907 unregister_netdevice(dev);
6908 rtnl_unlock();
6909}
1da177e4
LT
6910EXPORT_SYMBOL(unregister_netdev);
6911
ce286d32
EB
6912/**
6913 * dev_change_net_namespace - move device to different nethost namespace
6914 * @dev: device
6915 * @net: network namespace
6916 * @pat: If not NULL name pattern to try if the current device name
6917 * is already taken in the destination network namespace.
6918 *
6919 * This function shuts down a device interface and moves it
6920 * to a new network namespace. On success 0 is returned, on
6921 * a failure a netagive errno code is returned.
6922 *
6923 * Callers must hold the rtnl semaphore.
6924 */
6925
6926int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6927{
ce286d32
EB
6928 int err;
6929
6930 ASSERT_RTNL();
6931
6932 /* Don't allow namespace local devices to be moved. */
6933 err = -EINVAL;
6934 if (dev->features & NETIF_F_NETNS_LOCAL)
6935 goto out;
6936
6937 /* Ensure the device has been registrered */
ce286d32
EB
6938 if (dev->reg_state != NETREG_REGISTERED)
6939 goto out;
6940
6941 /* Get out if there is nothing todo */
6942 err = 0;
878628fb 6943 if (net_eq(dev_net(dev), net))
ce286d32
EB
6944 goto out;
6945
6946 /* Pick the destination device name, and ensure
6947 * we can use it in the destination network namespace.
6948 */
6949 err = -EEXIST;
d9031024 6950 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
6951 /* We get here if we can't use the current device name */
6952 if (!pat)
6953 goto out;
828de4f6 6954 if (dev_get_valid_name(net, dev, pat) < 0)
ce286d32
EB
6955 goto out;
6956 }
6957
6958 /*
6959 * And now a mini version of register_netdevice unregister_netdevice.
6960 */
6961
6962 /* If device is running close it first. */
9b772652 6963 dev_close(dev);
ce286d32
EB
6964
6965 /* And unlink it from device chain */
6966 err = -ENODEV;
6967 unlist_netdevice(dev);
6968
6969 synchronize_net();
6970
6971 /* Shutdown queueing discipline. */
6972 dev_shutdown(dev);
6973
6974 /* Notify protocols, that we are about to destroy
6975 this device. They should clean all the things.
3b27e105
DL
6976
6977 Note that dev->reg_state stays at NETREG_REGISTERED.
6978 This is wanted because this way 8021q and macvlan know
6979 the device is just moving and can keep their slaves up.
ce286d32
EB
6980 */
6981 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6549dd43
G
6982 rcu_barrier();
6983 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7f294054 6984 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
ce286d32
EB
6985
6986 /*
6987 * Flush the unicast and multicast chains
6988 */
a748ee24 6989 dev_uc_flush(dev);
22bedad3 6990 dev_mc_flush(dev);
ce286d32 6991
4e66ae2e
SH
6992 /* Send a netdev-removed uevent to the old namespace */
6993 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
4c75431a 6994 netdev_adjacent_del_links(dev);
4e66ae2e 6995
ce286d32 6996 /* Actually switch the network namespace */
c346dca1 6997 dev_net_set(dev, net);
ce286d32 6998
ce286d32
EB
6999 /* If there is an ifindex conflict assign a new one */
7000 if (__dev_get_by_index(net, dev->ifindex)) {
7001 int iflink = (dev->iflink == dev->ifindex);
7002 dev->ifindex = dev_new_index(net);
7003 if (iflink)
7004 dev->iflink = dev->ifindex;
7005 }
7006
4e66ae2e
SH
7007 /* Send a netdev-add uevent to the new namespace */
7008 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
4c75431a 7009 netdev_adjacent_add_links(dev);
4e66ae2e 7010
8b41d188 7011 /* Fixup kobjects */
a1b3f594 7012 err = device_rename(&dev->dev, dev->name);
8b41d188 7013 WARN_ON(err);
ce286d32
EB
7014
7015 /* Add the device back in the hashes */
7016 list_netdevice(dev);
7017
7018 /* Notify protocols, that a new device appeared. */
7019 call_netdevice_notifiers(NETDEV_REGISTER, dev);
7020
d90a909e
EB
7021 /*
7022 * Prevent userspace races by waiting until the network
7023 * device is fully setup before sending notifications.
7024 */
7f294054 7025 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
d90a909e 7026
ce286d32
EB
7027 synchronize_net();
7028 err = 0;
7029out:
7030 return err;
7031}
463d0183 7032EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 7033
1da177e4
LT
7034static int dev_cpu_callback(struct notifier_block *nfb,
7035 unsigned long action,
7036 void *ocpu)
7037{
7038 struct sk_buff **list_skb;
1da177e4
LT
7039 struct sk_buff *skb;
7040 unsigned int cpu, oldcpu = (unsigned long)ocpu;
7041 struct softnet_data *sd, *oldsd;
7042
8bb78442 7043 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1da177e4
LT
7044 return NOTIFY_OK;
7045
7046 local_irq_disable();
7047 cpu = smp_processor_id();
7048 sd = &per_cpu(softnet_data, cpu);
7049 oldsd = &per_cpu(softnet_data, oldcpu);
7050
7051 /* Find end of our completion_queue. */
7052 list_skb = &sd->completion_queue;
7053 while (*list_skb)
7054 list_skb = &(*list_skb)->next;
7055 /* Append completion queue from offline CPU. */
7056 *list_skb = oldsd->completion_queue;
7057 oldsd->completion_queue = NULL;
7058
1da177e4 7059 /* Append output queue from offline CPU. */
a9cbd588
CG
7060 if (oldsd->output_queue) {
7061 *sd->output_queue_tailp = oldsd->output_queue;
7062 sd->output_queue_tailp = oldsd->output_queue_tailp;
7063 oldsd->output_queue = NULL;
7064 oldsd->output_queue_tailp = &oldsd->output_queue;
7065 }
264524d5
HC
7066 /* Append NAPI poll list from offline CPU. */
7067 if (!list_empty(&oldsd->poll_list)) {
7068 list_splice_init(&oldsd->poll_list, &sd->poll_list);
7069 raise_softirq_irqoff(NET_RX_SOFTIRQ);
7070 }
1da177e4
LT
7071
7072 raise_softirq_irqoff(NET_TX_SOFTIRQ);
7073 local_irq_enable();
7074
7075 /* Process offline CPU's input_pkt_queue */
76cc8b13 7076 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
ae78dbfa 7077 netif_rx_internal(skb);
76cc8b13 7078 input_queue_head_incr(oldsd);
fec5e652 7079 }
76cc8b13 7080 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
ae78dbfa 7081 netif_rx_internal(skb);
76cc8b13
TH
7082 input_queue_head_incr(oldsd);
7083 }
1da177e4
LT
7084
7085 return NOTIFY_OK;
7086}
1da177e4
LT
7087
7088
7f353bf2 7089/**
b63365a2
HX
7090 * netdev_increment_features - increment feature set by one
7091 * @all: current feature set
7092 * @one: new feature set
7093 * @mask: mask feature set
7f353bf2
HX
7094 *
7095 * Computes a new feature set after adding a device with feature set
b63365a2
HX
7096 * @one to the master device with current feature set @all. Will not
7097 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 7098 */
c8f44aff
MM
7099netdev_features_t netdev_increment_features(netdev_features_t all,
7100 netdev_features_t one, netdev_features_t mask)
b63365a2 7101{
1742f183
MM
7102 if (mask & NETIF_F_GEN_CSUM)
7103 mask |= NETIF_F_ALL_CSUM;
7104 mask |= NETIF_F_VLAN_CHALLENGED;
7f353bf2 7105
1742f183
MM
7106 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
7107 all &= one | ~NETIF_F_ALL_FOR_ALL;
c6e1a0d1 7108
1742f183
MM
7109 /* If one device supports hw checksumming, set for all. */
7110 if (all & NETIF_F_GEN_CSUM)
7111 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
7f353bf2
HX
7112
7113 return all;
7114}
b63365a2 7115EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 7116
430f03cd 7117static struct hlist_head * __net_init netdev_create_hash(void)
30d97d35
PE
7118{
7119 int i;
7120 struct hlist_head *hash;
7121
7122 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7123 if (hash != NULL)
7124 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7125 INIT_HLIST_HEAD(&hash[i]);
7126
7127 return hash;
7128}
7129
881d966b 7130/* Initialize per network namespace state */
4665079c 7131static int __net_init netdev_init(struct net *net)
881d966b 7132{
734b6541
RM
7133 if (net != &init_net)
7134 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 7135
30d97d35
PE
7136 net->dev_name_head = netdev_create_hash();
7137 if (net->dev_name_head == NULL)
7138 goto err_name;
881d966b 7139
30d97d35
PE
7140 net->dev_index_head = netdev_create_hash();
7141 if (net->dev_index_head == NULL)
7142 goto err_idx;
881d966b
EB
7143
7144 return 0;
30d97d35
PE
7145
7146err_idx:
7147 kfree(net->dev_name_head);
7148err_name:
7149 return -ENOMEM;
881d966b
EB
7150}
7151
f0db275a
SH
7152/**
7153 * netdev_drivername - network driver for the device
7154 * @dev: network device
f0db275a
SH
7155 *
7156 * Determine network driver for device.
7157 */
3019de12 7158const char *netdev_drivername(const struct net_device *dev)
6579e57b 7159{
cf04a4c7
SH
7160 const struct device_driver *driver;
7161 const struct device *parent;
3019de12 7162 const char *empty = "";
6579e57b
AV
7163
7164 parent = dev->dev.parent;
6579e57b 7165 if (!parent)
3019de12 7166 return empty;
6579e57b
AV
7167
7168 driver = parent->driver;
7169 if (driver && driver->name)
3019de12
DM
7170 return driver->name;
7171 return empty;
6579e57b
AV
7172}
7173
6ea754eb
JP
7174static void __netdev_printk(const char *level, const struct net_device *dev,
7175 struct va_format *vaf)
256df2f3 7176{
b004ff49 7177 if (dev && dev->dev.parent) {
6ea754eb
JP
7178 dev_printk_emit(level[1] - '0',
7179 dev->dev.parent,
7180 "%s %s %s%s: %pV",
7181 dev_driver_string(dev->dev.parent),
7182 dev_name(dev->dev.parent),
7183 netdev_name(dev), netdev_reg_state(dev),
7184 vaf);
b004ff49 7185 } else if (dev) {
6ea754eb
JP
7186 printk("%s%s%s: %pV",
7187 level, netdev_name(dev), netdev_reg_state(dev), vaf);
b004ff49 7188 } else {
6ea754eb 7189 printk("%s(NULL net_device): %pV", level, vaf);
b004ff49 7190 }
256df2f3
JP
7191}
7192
6ea754eb
JP
7193void netdev_printk(const char *level, const struct net_device *dev,
7194 const char *format, ...)
256df2f3
JP
7195{
7196 struct va_format vaf;
7197 va_list args;
256df2f3
JP
7198
7199 va_start(args, format);
7200
7201 vaf.fmt = format;
7202 vaf.va = &args;
7203
6ea754eb 7204 __netdev_printk(level, dev, &vaf);
b004ff49 7205
256df2f3 7206 va_end(args);
256df2f3
JP
7207}
7208EXPORT_SYMBOL(netdev_printk);
7209
7210#define define_netdev_printk_level(func, level) \
6ea754eb 7211void func(const struct net_device *dev, const char *fmt, ...) \
256df2f3 7212{ \
256df2f3
JP
7213 struct va_format vaf; \
7214 va_list args; \
7215 \
7216 va_start(args, fmt); \
7217 \
7218 vaf.fmt = fmt; \
7219 vaf.va = &args; \
7220 \
6ea754eb 7221 __netdev_printk(level, dev, &vaf); \
b004ff49 7222 \
256df2f3 7223 va_end(args); \
256df2f3
JP
7224} \
7225EXPORT_SYMBOL(func);
7226
7227define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7228define_netdev_printk_level(netdev_alert, KERN_ALERT);
7229define_netdev_printk_level(netdev_crit, KERN_CRIT);
7230define_netdev_printk_level(netdev_err, KERN_ERR);
7231define_netdev_printk_level(netdev_warn, KERN_WARNING);
7232define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7233define_netdev_printk_level(netdev_info, KERN_INFO);
7234
4665079c 7235static void __net_exit netdev_exit(struct net *net)
881d966b
EB
7236{
7237 kfree(net->dev_name_head);
7238 kfree(net->dev_index_head);
7239}
7240
022cbae6 7241static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
7242 .init = netdev_init,
7243 .exit = netdev_exit,
7244};
7245
4665079c 7246static void __net_exit default_device_exit(struct net *net)
ce286d32 7247{
e008b5fc 7248 struct net_device *dev, *aux;
ce286d32 7249 /*
e008b5fc 7250 * Push all migratable network devices back to the
ce286d32
EB
7251 * initial network namespace
7252 */
7253 rtnl_lock();
e008b5fc 7254 for_each_netdev_safe(net, dev, aux) {
ce286d32 7255 int err;
aca51397 7256 char fb_name[IFNAMSIZ];
ce286d32
EB
7257
7258 /* Ignore unmoveable devices (i.e. loopback) */
7259 if (dev->features & NETIF_F_NETNS_LOCAL)
7260 continue;
7261
e008b5fc
EB
7262 /* Leave virtual devices for the generic cleanup */
7263 if (dev->rtnl_link_ops)
7264 continue;
d0c082ce 7265
25985edc 7266 /* Push remaining network devices to init_net */
aca51397
PE
7267 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7268 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 7269 if (err) {
7b6cd1ce
JP
7270 pr_emerg("%s: failed to move %s to init_net: %d\n",
7271 __func__, dev->name, err);
aca51397 7272 BUG();
ce286d32
EB
7273 }
7274 }
7275 rtnl_unlock();
7276}
7277
50624c93
EB
7278static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7279{
7280 /* Return with the rtnl_lock held when there are no network
7281 * devices unregistering in any network namespace in net_list.
7282 */
7283 struct net *net;
7284 bool unregistering;
ff960a73 7285 DEFINE_WAIT_FUNC(wait, woken_wake_function);
50624c93 7286
ff960a73 7287 add_wait_queue(&netdev_unregistering_wq, &wait);
50624c93 7288 for (;;) {
50624c93
EB
7289 unregistering = false;
7290 rtnl_lock();
7291 list_for_each_entry(net, net_list, exit_list) {
7292 if (net->dev_unreg_count > 0) {
7293 unregistering = true;
7294 break;
7295 }
7296 }
7297 if (!unregistering)
7298 break;
7299 __rtnl_unlock();
ff960a73
PZ
7300
7301 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
50624c93 7302 }
ff960a73 7303 remove_wait_queue(&netdev_unregistering_wq, &wait);
50624c93
EB
7304}
7305
04dc7f6b
EB
7306static void __net_exit default_device_exit_batch(struct list_head *net_list)
7307{
7308 /* At exit all network devices most be removed from a network
b595076a 7309 * namespace. Do this in the reverse order of registration.
04dc7f6b
EB
7310 * Do this across as many network namespaces as possible to
7311 * improve batching efficiency.
7312 */
7313 struct net_device *dev;
7314 struct net *net;
7315 LIST_HEAD(dev_kill_list);
7316
50624c93
EB
7317 /* To prevent network device cleanup code from dereferencing
7318 * loopback devices or network devices that have been freed
7319 * wait here for all pending unregistrations to complete,
7320 * before unregistring the loopback device and allowing the
7321 * network namespace be freed.
7322 *
7323 * The netdev todo list containing all network devices
7324 * unregistrations that happen in default_device_exit_batch
7325 * will run in the rtnl_unlock() at the end of
7326 * default_device_exit_batch.
7327 */
7328 rtnl_lock_unregistering(net_list);
04dc7f6b
EB
7329 list_for_each_entry(net, net_list, exit_list) {
7330 for_each_netdev_reverse(net, dev) {
b0ab2fab 7331 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
04dc7f6b
EB
7332 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7333 else
7334 unregister_netdevice_queue(dev, &dev_kill_list);
7335 }
7336 }
7337 unregister_netdevice_many(&dev_kill_list);
7338 rtnl_unlock();
7339}
7340
022cbae6 7341static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 7342 .exit = default_device_exit,
04dc7f6b 7343 .exit_batch = default_device_exit_batch,
ce286d32
EB
7344};
7345
1da177e4
LT
7346/*
7347 * Initialize the DEV module. At boot time this walks the device list and
7348 * unhooks any devices that fail to initialise (normally hardware not
7349 * present) and leaves us with a valid list of present and active devices.
7350 *
7351 */
7352
7353/*
7354 * This is called single threaded during boot, so no need
7355 * to take the rtnl semaphore.
7356 */
7357static int __init net_dev_init(void)
7358{
7359 int i, rc = -ENOMEM;
7360
7361 BUG_ON(!dev_boot_phase);
7362
1da177e4
LT
7363 if (dev_proc_init())
7364 goto out;
7365
8b41d188 7366 if (netdev_kobject_init())
1da177e4
LT
7367 goto out;
7368
7369 INIT_LIST_HEAD(&ptype_all);
82d8a867 7370 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
7371 INIT_LIST_HEAD(&ptype_base[i]);
7372
62532da9
VY
7373 INIT_LIST_HEAD(&offload_base);
7374
881d966b
EB
7375 if (register_pernet_subsys(&netdev_net_ops))
7376 goto out;
1da177e4
LT
7377
7378 /*
7379 * Initialise the packet receive queues.
7380 */
7381
6f912042 7382 for_each_possible_cpu(i) {
e36fa2f7 7383 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 7384
e36fa2f7 7385 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 7386 skb_queue_head_init(&sd->process_queue);
e36fa2f7 7387 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588 7388 sd->output_queue_tailp = &sd->output_queue;
df334545 7389#ifdef CONFIG_RPS
e36fa2f7
ED
7390 sd->csd.func = rps_trigger_softirq;
7391 sd->csd.info = sd;
e36fa2f7 7392 sd->cpu = i;
1e94d72f 7393#endif
0a9627f2 7394
e36fa2f7
ED
7395 sd->backlog.poll = process_backlog;
7396 sd->backlog.weight = weight_p;
1da177e4
LT
7397 }
7398
1da177e4
LT
7399 dev_boot_phase = 0;
7400
505d4f73
EB
7401 /* The loopback device is special if any other network devices
7402 * is present in a network namespace the loopback device must
7403 * be present. Since we now dynamically allocate and free the
7404 * loopback device ensure this invariant is maintained by
7405 * keeping the loopback device as the first device on the
7406 * list of network devices. Ensuring the loopback devices
7407 * is the first device that appears and the last network device
7408 * that disappears.
7409 */
7410 if (register_pernet_device(&loopback_net_ops))
7411 goto out;
7412
7413 if (register_pernet_device(&default_device_ops))
7414 goto out;
7415
962cf36c
CM
7416 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7417 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4
LT
7418
7419 hotcpu_notifier(dev_cpu_callback, 0);
7420 dst_init();
1da177e4
LT
7421 rc = 0;
7422out:
7423 return rc;
7424}
7425
7426subsys_initcall(net_dev_init);
This page took 1.857429 seconds and 5 git commands to generate.