packet: doc: describe PACKET_MMAP with one packet socket for rx and tx
[deliverable/linux.git] / net / core / dev.c
CommitLineData
1da177e4
LT
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
1da177e4 76#include <linux/bitops.h>
4fc268d2 77#include <linux/capability.h>
1da177e4
LT
78#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
08e9897d 81#include <linux/hash.h>
5a0e3ad6 82#include <linux/slab.h>
1da177e4 83#include <linux/sched.h>
4a3e2f71 84#include <linux/mutex.h>
1da177e4
LT
85#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
0187bdfb 94#include <linux/ethtool.h>
1da177e4
LT
95#include <linux/notifier.h>
96#include <linux/skbuff.h>
457c4cbc 97#include <net/net_namespace.h>
1da177e4
LT
98#include <net/sock.h>
99#include <linux/rtnetlink.h>
1da177e4 100#include <linux/stat.h>
1da177e4
LT
101#include <net/dst.h>
102#include <net/pkt_sched.h>
103#include <net/checksum.h>
44540960 104#include <net/xfrm.h>
1da177e4
LT
105#include <linux/highmem.h>
106#include <linux/init.h>
1da177e4 107#include <linux/module.h>
1da177e4
LT
108#include <linux/netpoll.h>
109#include <linux/rcupdate.h>
110#include <linux/delay.h>
1da177e4 111#include <net/iw_handler.h>
1da177e4 112#include <asm/current.h>
5bdb9886 113#include <linux/audit.h>
db217334 114#include <linux/dmaengine.h>
f6a78bfc 115#include <linux/err.h>
c7fa9d18 116#include <linux/ctype.h>
723e98b7 117#include <linux/if_arp.h>
6de329e2 118#include <linux/if_vlan.h>
8f0f2223 119#include <linux/ip.h>
ad55dcaf 120#include <net/ip.h>
8f0f2223
DM
121#include <linux/ipv6.h>
122#include <linux/in.h>
b6b2fed1
DM
123#include <linux/jhash.h>
124#include <linux/random.h>
9cbc1cb8 125#include <trace/events/napi.h>
cf66ba58 126#include <trace/events/net.h>
07dc22e7 127#include <trace/events/skb.h>
5acbbd42 128#include <linux/pci.h>
caeda9b9 129#include <linux/inetdevice.h>
c445477d 130#include <linux/cpu_rmap.h>
c5905afb 131#include <linux/static_key.h>
af12fa6e 132#include <linux/hashtable.h>
60877a32 133#include <linux/vmalloc.h>
529d0489 134#include <linux/if_macvlan.h>
1da177e4 135
342709ef
PE
136#include "net-sysfs.h"
137
d565b0a1
HX
138/* Instead of increasing this, you should create a hash table. */
139#define MAX_GRO_SKBS 8
140
5d38a079
HX
141/* This should be increased if a protocol with a bigger head is added. */
142#define GRO_MAX_HEAD (MAX_HEADER + 128)
143
1da177e4 144static DEFINE_SPINLOCK(ptype_lock);
62532da9 145static DEFINE_SPINLOCK(offload_lock);
900ff8c6
CW
146struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
147struct list_head ptype_all __read_mostly; /* Taps */
62532da9 148static struct list_head offload_base __read_mostly;
1da177e4 149
1da177e4 150/*
7562f876 151 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
152 * semaphore.
153 *
c6d14c84 154 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
155 *
156 * Writers must hold the rtnl semaphore while they loop through the
7562f876 157 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
158 * actual updates. This allows pure readers to access the list even
159 * while a writer is preparing to update it.
160 *
161 * To put it another way, dev_base_lock is held for writing only to
162 * protect against pure readers; the rtnl semaphore provides the
163 * protection against other writers.
164 *
165 * See, for example usages, register_netdevice() and
166 * unregister_netdevice(), which must be called with the rtnl
167 * semaphore held.
168 */
1da177e4 169DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
170EXPORT_SYMBOL(dev_base_lock);
171
af12fa6e
ET
172/* protects napi_hash addition/deletion and napi_gen_id */
173static DEFINE_SPINLOCK(napi_hash_lock);
174
175static unsigned int napi_gen_id;
176static DEFINE_HASHTABLE(napi_hash, 8);
177
18afa4b0 178static seqcount_t devnet_rename_seq;
c91f6df2 179
4e985ada
TG
180static inline void dev_base_seq_inc(struct net *net)
181{
182 while (++net->dev_base_seq == 0);
183}
184
881d966b 185static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4 186{
95c96174
ED
187 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
188
08e9897d 189 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
190}
191
881d966b 192static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 193{
7c28bd0b 194 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
195}
196
e36fa2f7 197static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
198{
199#ifdef CONFIG_RPS
e36fa2f7 200 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
201#endif
202}
203
e36fa2f7 204static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
205{
206#ifdef CONFIG_RPS
e36fa2f7 207 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
208#endif
209}
210
ce286d32 211/* Device list insertion */
53759be9 212static void list_netdevice(struct net_device *dev)
ce286d32 213{
c346dca1 214 struct net *net = dev_net(dev);
ce286d32
EB
215
216 ASSERT_RTNL();
217
218 write_lock_bh(&dev_base_lock);
c6d14c84 219 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
72c9528b 220 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
fb699dfd
ED
221 hlist_add_head_rcu(&dev->index_hlist,
222 dev_index_hash(net, dev->ifindex));
ce286d32 223 write_unlock_bh(&dev_base_lock);
4e985ada
TG
224
225 dev_base_seq_inc(net);
ce286d32
EB
226}
227
fb699dfd
ED
228/* Device list removal
229 * caller must respect a RCU grace period before freeing/reusing dev
230 */
ce286d32
EB
231static void unlist_netdevice(struct net_device *dev)
232{
233 ASSERT_RTNL();
234
235 /* Unlink dev from the device chain */
236 write_lock_bh(&dev_base_lock);
c6d14c84 237 list_del_rcu(&dev->dev_list);
72c9528b 238 hlist_del_rcu(&dev->name_hlist);
fb699dfd 239 hlist_del_rcu(&dev->index_hlist);
ce286d32 240 write_unlock_bh(&dev_base_lock);
4e985ada
TG
241
242 dev_base_seq_inc(dev_net(dev));
ce286d32
EB
243}
244
1da177e4
LT
245/*
246 * Our notifier list
247 */
248
f07d5b94 249static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
250
251/*
252 * Device drivers call our routines to queue packets here. We empty the
253 * queue in the local softnet handler.
254 */
bea3348e 255
9958da05 256DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 257EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 258
cf508b12 259#ifdef CONFIG_LOCKDEP
723e98b7 260/*
c773e847 261 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
262 * according to dev->type
263 */
264static const unsigned short netdev_lock_type[] =
265 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
266 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
267 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
268 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
269 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
270 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
271 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
272 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
273 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
274 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
275 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
276 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
211ed865
PG
277 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
278 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
279 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
723e98b7 280
36cbd3dc 281static const char *const netdev_lock_name[] =
723e98b7
JP
282 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
283 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
284 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
285 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
286 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
287 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
288 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
289 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
290 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
291 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
292 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
293 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
211ed865
PG
294 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
295 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
296 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
297
298static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 299static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
300
301static inline unsigned short netdev_lock_pos(unsigned short dev_type)
302{
303 int i;
304
305 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
306 if (netdev_lock_type[i] == dev_type)
307 return i;
308 /* the last key is used by default */
309 return ARRAY_SIZE(netdev_lock_type) - 1;
310}
311
cf508b12
DM
312static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
313 unsigned short dev_type)
723e98b7
JP
314{
315 int i;
316
317 i = netdev_lock_pos(dev_type);
318 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
319 netdev_lock_name[i]);
320}
cf508b12
DM
321
322static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
323{
324 int i;
325
326 i = netdev_lock_pos(dev->type);
327 lockdep_set_class_and_name(&dev->addr_list_lock,
328 &netdev_addr_lock_key[i],
329 netdev_lock_name[i]);
330}
723e98b7 331#else
cf508b12
DM
332static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
333 unsigned short dev_type)
334{
335}
336static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
337{
338}
339#endif
1da177e4
LT
340
341/*******************************************************************************
342
343 Protocol management and registration routines
344
345*******************************************************************************/
346
1da177e4
LT
347/*
348 * Add a protocol ID to the list. Now that the input handler is
349 * smarter we can dispense with all the messy stuff that used to be
350 * here.
351 *
352 * BEWARE!!! Protocol handlers, mangling input packets,
353 * MUST BE last in hash buckets and checking protocol handlers
354 * MUST start from promiscuous ptype_all chain in net_bh.
355 * It is true now, do not change it.
356 * Explanation follows: if protocol handler, mangling packet, will
357 * be the first on list, it is not able to sense, that packet
358 * is cloned and should be copied-on-write, so that it will
359 * change it and subsequent readers will get broken packet.
360 * --ANK (980803)
361 */
362
c07b68e8
ED
363static inline struct list_head *ptype_head(const struct packet_type *pt)
364{
365 if (pt->type == htons(ETH_P_ALL))
366 return &ptype_all;
367 else
368 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
369}
370
1da177e4
LT
371/**
372 * dev_add_pack - add packet handler
373 * @pt: packet type declaration
374 *
375 * Add a protocol handler to the networking stack. The passed &packet_type
376 * is linked into kernel lists and may not be freed until it has been
377 * removed from the kernel lists.
378 *
4ec93edb 379 * This call does not sleep therefore it can not
1da177e4
LT
380 * guarantee all CPU's that are in middle of receiving packets
381 * will see the new packet type (until the next received packet).
382 */
383
384void dev_add_pack(struct packet_type *pt)
385{
c07b68e8 386 struct list_head *head = ptype_head(pt);
1da177e4 387
c07b68e8
ED
388 spin_lock(&ptype_lock);
389 list_add_rcu(&pt->list, head);
390 spin_unlock(&ptype_lock);
1da177e4 391}
d1b19dff 392EXPORT_SYMBOL(dev_add_pack);
1da177e4 393
1da177e4
LT
394/**
395 * __dev_remove_pack - remove packet handler
396 * @pt: packet type declaration
397 *
398 * Remove a protocol handler that was previously added to the kernel
399 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
400 * from the kernel lists and can be freed or reused once this function
4ec93edb 401 * returns.
1da177e4
LT
402 *
403 * The packet type might still be in use by receivers
404 * and must not be freed until after all the CPU's have gone
405 * through a quiescent state.
406 */
407void __dev_remove_pack(struct packet_type *pt)
408{
c07b68e8 409 struct list_head *head = ptype_head(pt);
1da177e4
LT
410 struct packet_type *pt1;
411
c07b68e8 412 spin_lock(&ptype_lock);
1da177e4
LT
413
414 list_for_each_entry(pt1, head, list) {
415 if (pt == pt1) {
416 list_del_rcu(&pt->list);
417 goto out;
418 }
419 }
420
7b6cd1ce 421 pr_warn("dev_remove_pack: %p not found\n", pt);
1da177e4 422out:
c07b68e8 423 spin_unlock(&ptype_lock);
1da177e4 424}
d1b19dff
ED
425EXPORT_SYMBOL(__dev_remove_pack);
426
1da177e4
LT
427/**
428 * dev_remove_pack - remove packet handler
429 * @pt: packet type declaration
430 *
431 * Remove a protocol handler that was previously added to the kernel
432 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
433 * from the kernel lists and can be freed or reused once this function
434 * returns.
435 *
436 * This call sleeps to guarantee that no CPU is looking at the packet
437 * type after return.
438 */
439void dev_remove_pack(struct packet_type *pt)
440{
441 __dev_remove_pack(pt);
4ec93edb 442
1da177e4
LT
443 synchronize_net();
444}
d1b19dff 445EXPORT_SYMBOL(dev_remove_pack);
1da177e4 446
62532da9
VY
447
448/**
449 * dev_add_offload - register offload handlers
450 * @po: protocol offload declaration
451 *
452 * Add protocol offload handlers to the networking stack. The passed
453 * &proto_offload is linked into kernel lists and may not be freed until
454 * it has been removed from the kernel lists.
455 *
456 * This call does not sleep therefore it can not
457 * guarantee all CPU's that are in middle of receiving packets
458 * will see the new offload handlers (until the next received packet).
459 */
460void dev_add_offload(struct packet_offload *po)
461{
462 struct list_head *head = &offload_base;
463
464 spin_lock(&offload_lock);
465 list_add_rcu(&po->list, head);
466 spin_unlock(&offload_lock);
467}
468EXPORT_SYMBOL(dev_add_offload);
469
470/**
471 * __dev_remove_offload - remove offload handler
472 * @po: packet offload declaration
473 *
474 * Remove a protocol offload handler that was previously added to the
475 * kernel offload handlers by dev_add_offload(). The passed &offload_type
476 * is removed from the kernel lists and can be freed or reused once this
477 * function returns.
478 *
479 * The packet type might still be in use by receivers
480 * and must not be freed until after all the CPU's have gone
481 * through a quiescent state.
482 */
1d143d9f 483static void __dev_remove_offload(struct packet_offload *po)
62532da9
VY
484{
485 struct list_head *head = &offload_base;
486 struct packet_offload *po1;
487
c53aa505 488 spin_lock(&offload_lock);
62532da9
VY
489
490 list_for_each_entry(po1, head, list) {
491 if (po == po1) {
492 list_del_rcu(&po->list);
493 goto out;
494 }
495 }
496
497 pr_warn("dev_remove_offload: %p not found\n", po);
498out:
c53aa505 499 spin_unlock(&offload_lock);
62532da9 500}
62532da9
VY
501
502/**
503 * dev_remove_offload - remove packet offload handler
504 * @po: packet offload declaration
505 *
506 * Remove a packet offload handler that was previously added to the kernel
507 * offload handlers by dev_add_offload(). The passed &offload_type is
508 * removed from the kernel lists and can be freed or reused once this
509 * function returns.
510 *
511 * This call sleeps to guarantee that no CPU is looking at the packet
512 * type after return.
513 */
514void dev_remove_offload(struct packet_offload *po)
515{
516 __dev_remove_offload(po);
517
518 synchronize_net();
519}
520EXPORT_SYMBOL(dev_remove_offload);
521
1da177e4
LT
522/******************************************************************************
523
524 Device Boot-time Settings Routines
525
526*******************************************************************************/
527
528/* Boot time configuration table */
529static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
530
531/**
532 * netdev_boot_setup_add - add new setup entry
533 * @name: name of the device
534 * @map: configured settings for the device
535 *
536 * Adds new setup entry to the dev_boot_setup list. The function
537 * returns 0 on error and 1 on success. This is a generic routine to
538 * all netdevices.
539 */
540static int netdev_boot_setup_add(char *name, struct ifmap *map)
541{
542 struct netdev_boot_setup *s;
543 int i;
544
545 s = dev_boot_setup;
546 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
547 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
548 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 549 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
550 memcpy(&s[i].map, map, sizeof(s[i].map));
551 break;
552 }
553 }
554
555 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
556}
557
558/**
559 * netdev_boot_setup_check - check boot time settings
560 * @dev: the netdevice
561 *
562 * Check boot time settings for the device.
563 * The found settings are set for the device to be used
564 * later in the device probing.
565 * Returns 0 if no settings found, 1 if they are.
566 */
567int netdev_boot_setup_check(struct net_device *dev)
568{
569 struct netdev_boot_setup *s = dev_boot_setup;
570 int i;
571
572 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
573 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 574 !strcmp(dev->name, s[i].name)) {
1da177e4
LT
575 dev->irq = s[i].map.irq;
576 dev->base_addr = s[i].map.base_addr;
577 dev->mem_start = s[i].map.mem_start;
578 dev->mem_end = s[i].map.mem_end;
579 return 1;
580 }
581 }
582 return 0;
583}
d1b19dff 584EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
585
586
587/**
588 * netdev_boot_base - get address from boot time settings
589 * @prefix: prefix for network device
590 * @unit: id for network device
591 *
592 * Check boot time settings for the base address of device.
593 * The found settings are set for the device to be used
594 * later in the device probing.
595 * Returns 0 if no settings found.
596 */
597unsigned long netdev_boot_base(const char *prefix, int unit)
598{
599 const struct netdev_boot_setup *s = dev_boot_setup;
600 char name[IFNAMSIZ];
601 int i;
602
603 sprintf(name, "%s%d", prefix, unit);
604
605 /*
606 * If device already registered then return base of 1
607 * to indicate not to probe for this interface
608 */
881d966b 609 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
610 return 1;
611
612 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
613 if (!strcmp(name, s[i].name))
614 return s[i].map.base_addr;
615 return 0;
616}
617
618/*
619 * Saves at boot time configured settings for any netdevice.
620 */
621int __init netdev_boot_setup(char *str)
622{
623 int ints[5];
624 struct ifmap map;
625
626 str = get_options(str, ARRAY_SIZE(ints), ints);
627 if (!str || !*str)
628 return 0;
629
630 /* Save settings */
631 memset(&map, 0, sizeof(map));
632 if (ints[0] > 0)
633 map.irq = ints[1];
634 if (ints[0] > 1)
635 map.base_addr = ints[2];
636 if (ints[0] > 2)
637 map.mem_start = ints[3];
638 if (ints[0] > 3)
639 map.mem_end = ints[4];
640
641 /* Add new entry to the list */
642 return netdev_boot_setup_add(str, &map);
643}
644
645__setup("netdev=", netdev_boot_setup);
646
647/*******************************************************************************
648
649 Device Interface Subroutines
650
651*******************************************************************************/
652
653/**
654 * __dev_get_by_name - find a device by its name
c4ea43c5 655 * @net: the applicable net namespace
1da177e4
LT
656 * @name: name to find
657 *
658 * Find an interface by name. Must be called under RTNL semaphore
659 * or @dev_base_lock. If the name is found a pointer to the device
660 * is returned. If the name is not found then %NULL is returned. The
661 * reference counters are not incremented so the caller must be
662 * careful with locks.
663 */
664
881d966b 665struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4 666{
0bd8d536
ED
667 struct net_device *dev;
668 struct hlist_head *head = dev_name_hash(net, name);
1da177e4 669
b67bfe0d 670 hlist_for_each_entry(dev, head, name_hlist)
1da177e4
LT
671 if (!strncmp(dev->name, name, IFNAMSIZ))
672 return dev;
0bd8d536 673
1da177e4
LT
674 return NULL;
675}
d1b19dff 676EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 677
72c9528b
ED
678/**
679 * dev_get_by_name_rcu - find a device by its name
680 * @net: the applicable net namespace
681 * @name: name to find
682 *
683 * Find an interface by name.
684 * If the name is found a pointer to the device is returned.
685 * If the name is not found then %NULL is returned.
686 * The reference counters are not incremented so the caller must be
687 * careful with locks. The caller must hold RCU lock.
688 */
689
690struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
691{
72c9528b
ED
692 struct net_device *dev;
693 struct hlist_head *head = dev_name_hash(net, name);
694
b67bfe0d 695 hlist_for_each_entry_rcu(dev, head, name_hlist)
72c9528b
ED
696 if (!strncmp(dev->name, name, IFNAMSIZ))
697 return dev;
698
699 return NULL;
700}
701EXPORT_SYMBOL(dev_get_by_name_rcu);
702
1da177e4
LT
703/**
704 * dev_get_by_name - find a device by its name
c4ea43c5 705 * @net: the applicable net namespace
1da177e4
LT
706 * @name: name to find
707 *
708 * Find an interface by name. This can be called from any
709 * context and does its own locking. The returned handle has
710 * the usage count incremented and the caller must use dev_put() to
711 * release it when it is no longer needed. %NULL is returned if no
712 * matching device is found.
713 */
714
881d966b 715struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
716{
717 struct net_device *dev;
718
72c9528b
ED
719 rcu_read_lock();
720 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
721 if (dev)
722 dev_hold(dev);
72c9528b 723 rcu_read_unlock();
1da177e4
LT
724 return dev;
725}
d1b19dff 726EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
727
728/**
729 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 730 * @net: the applicable net namespace
1da177e4
LT
731 * @ifindex: index of device
732 *
733 * Search for an interface by index. Returns %NULL if the device
734 * is not found or a pointer to the device. The device has not
735 * had its reference counter increased so the caller must be careful
736 * about locking. The caller must hold either the RTNL semaphore
737 * or @dev_base_lock.
738 */
739
881d966b 740struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4 741{
0bd8d536
ED
742 struct net_device *dev;
743 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 744
b67bfe0d 745 hlist_for_each_entry(dev, head, index_hlist)
1da177e4
LT
746 if (dev->ifindex == ifindex)
747 return dev;
0bd8d536 748
1da177e4
LT
749 return NULL;
750}
d1b19dff 751EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 752
fb699dfd
ED
753/**
754 * dev_get_by_index_rcu - find a device by its ifindex
755 * @net: the applicable net namespace
756 * @ifindex: index of device
757 *
758 * Search for an interface by index. Returns %NULL if the device
759 * is not found or a pointer to the device. The device has not
760 * had its reference counter increased so the caller must be careful
761 * about locking. The caller must hold RCU lock.
762 */
763
764struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
765{
fb699dfd
ED
766 struct net_device *dev;
767 struct hlist_head *head = dev_index_hash(net, ifindex);
768
b67bfe0d 769 hlist_for_each_entry_rcu(dev, head, index_hlist)
fb699dfd
ED
770 if (dev->ifindex == ifindex)
771 return dev;
772
773 return NULL;
774}
775EXPORT_SYMBOL(dev_get_by_index_rcu);
776
1da177e4
LT
777
778/**
779 * dev_get_by_index - find a device by its ifindex
c4ea43c5 780 * @net: the applicable net namespace
1da177e4
LT
781 * @ifindex: index of device
782 *
783 * Search for an interface by index. Returns NULL if the device
784 * is not found or a pointer to the device. The device returned has
785 * had a reference added and the pointer is safe until the user calls
786 * dev_put to indicate they have finished with it.
787 */
788
881d966b 789struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
790{
791 struct net_device *dev;
792
fb699dfd
ED
793 rcu_read_lock();
794 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
795 if (dev)
796 dev_hold(dev);
fb699dfd 797 rcu_read_unlock();
1da177e4
LT
798 return dev;
799}
d1b19dff 800EXPORT_SYMBOL(dev_get_by_index);
1da177e4 801
5dbe7c17
NS
802/**
803 * netdev_get_name - get a netdevice name, knowing its ifindex.
804 * @net: network namespace
805 * @name: a pointer to the buffer where the name will be stored.
806 * @ifindex: the ifindex of the interface to get the name from.
807 *
808 * The use of raw_seqcount_begin() and cond_resched() before
809 * retrying is required as we want to give the writers a chance
810 * to complete when CONFIG_PREEMPT is not set.
811 */
812int netdev_get_name(struct net *net, char *name, int ifindex)
813{
814 struct net_device *dev;
815 unsigned int seq;
816
817retry:
818 seq = raw_seqcount_begin(&devnet_rename_seq);
819 rcu_read_lock();
820 dev = dev_get_by_index_rcu(net, ifindex);
821 if (!dev) {
822 rcu_read_unlock();
823 return -ENODEV;
824 }
825
826 strcpy(name, dev->name);
827 rcu_read_unlock();
828 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
829 cond_resched();
830 goto retry;
831 }
832
833 return 0;
834}
835
1da177e4 836/**
941666c2 837 * dev_getbyhwaddr_rcu - find a device by its hardware address
c4ea43c5 838 * @net: the applicable net namespace
1da177e4
LT
839 * @type: media type of device
840 * @ha: hardware address
841 *
842 * Search for an interface by MAC address. Returns NULL if the device
c506653d
ED
843 * is not found or a pointer to the device.
844 * The caller must hold RCU or RTNL.
941666c2 845 * The returned device has not had its ref count increased
1da177e4
LT
846 * and the caller must therefore be careful about locking
847 *
1da177e4
LT
848 */
849
941666c2
ED
850struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
851 const char *ha)
1da177e4
LT
852{
853 struct net_device *dev;
854
941666c2 855 for_each_netdev_rcu(net, dev)
1da177e4
LT
856 if (dev->type == type &&
857 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
858 return dev;
859
860 return NULL;
1da177e4 861}
941666c2 862EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
cf309e3f 863
881d966b 864struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
865{
866 struct net_device *dev;
867
4e9cac2b 868 ASSERT_RTNL();
881d966b 869 for_each_netdev(net, dev)
4e9cac2b 870 if (dev->type == type)
7562f876
PE
871 return dev;
872
873 return NULL;
4e9cac2b 874}
4e9cac2b
PM
875EXPORT_SYMBOL(__dev_getfirstbyhwtype);
876
881d966b 877struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 878{
99fe3c39 879 struct net_device *dev, *ret = NULL;
4e9cac2b 880
99fe3c39
ED
881 rcu_read_lock();
882 for_each_netdev_rcu(net, dev)
883 if (dev->type == type) {
884 dev_hold(dev);
885 ret = dev;
886 break;
887 }
888 rcu_read_unlock();
889 return ret;
1da177e4 890}
1da177e4
LT
891EXPORT_SYMBOL(dev_getfirstbyhwtype);
892
893/**
bb69ae04 894 * dev_get_by_flags_rcu - find any device with given flags
c4ea43c5 895 * @net: the applicable net namespace
1da177e4
LT
896 * @if_flags: IFF_* values
897 * @mask: bitmask of bits in if_flags to check
898 *
899 * Search for any interface with the given flags. Returns NULL if a device
bb69ae04
ED
900 * is not found or a pointer to the device. Must be called inside
901 * rcu_read_lock(), and result refcount is unchanged.
1da177e4
LT
902 */
903
bb69ae04 904struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
d1b19dff 905 unsigned short mask)
1da177e4 906{
7562f876 907 struct net_device *dev, *ret;
1da177e4 908
7562f876 909 ret = NULL;
c6d14c84 910 for_each_netdev_rcu(net, dev) {
1da177e4 911 if (((dev->flags ^ if_flags) & mask) == 0) {
7562f876 912 ret = dev;
1da177e4
LT
913 break;
914 }
915 }
7562f876 916 return ret;
1da177e4 917}
bb69ae04 918EXPORT_SYMBOL(dev_get_by_flags_rcu);
1da177e4
LT
919
920/**
921 * dev_valid_name - check if name is okay for network device
922 * @name: name string
923 *
924 * Network device names need to be valid file names to
c7fa9d18
DM
925 * to allow sysfs to work. We also disallow any kind of
926 * whitespace.
1da177e4 927 */
95f050bf 928bool dev_valid_name(const char *name)
1da177e4 929{
c7fa9d18 930 if (*name == '\0')
95f050bf 931 return false;
b6fe17d6 932 if (strlen(name) >= IFNAMSIZ)
95f050bf 933 return false;
c7fa9d18 934 if (!strcmp(name, ".") || !strcmp(name, ".."))
95f050bf 935 return false;
c7fa9d18
DM
936
937 while (*name) {
938 if (*name == '/' || isspace(*name))
95f050bf 939 return false;
c7fa9d18
DM
940 name++;
941 }
95f050bf 942 return true;
1da177e4 943}
d1b19dff 944EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
945
946/**
b267b179
EB
947 * __dev_alloc_name - allocate a name for a device
948 * @net: network namespace to allocate the device name in
1da177e4 949 * @name: name format string
b267b179 950 * @buf: scratch buffer and result name string
1da177e4
LT
951 *
952 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
953 * id. It scans list of devices to build up a free map, then chooses
954 * the first empty slot. The caller must hold the dev_base or rtnl lock
955 * while allocating the name and adding the device in order to avoid
956 * duplicates.
957 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
958 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
959 */
960
b267b179 961static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
962{
963 int i = 0;
1da177e4
LT
964 const char *p;
965 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 966 unsigned long *inuse;
1da177e4
LT
967 struct net_device *d;
968
969 p = strnchr(name, IFNAMSIZ-1, '%');
970 if (p) {
971 /*
972 * Verify the string as this thing may have come from
973 * the user. There must be either one "%d" and no other "%"
974 * characters.
975 */
976 if (p[1] != 'd' || strchr(p + 2, '%'))
977 return -EINVAL;
978
979 /* Use one page as a bit array of possible slots */
cfcabdcc 980 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
981 if (!inuse)
982 return -ENOMEM;
983
881d966b 984 for_each_netdev(net, d) {
1da177e4
LT
985 if (!sscanf(d->name, name, &i))
986 continue;
987 if (i < 0 || i >= max_netdevices)
988 continue;
989
990 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 991 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
992 if (!strncmp(buf, d->name, IFNAMSIZ))
993 set_bit(i, inuse);
994 }
995
996 i = find_first_zero_bit(inuse, max_netdevices);
997 free_page((unsigned long) inuse);
998 }
999
d9031024
OP
1000 if (buf != name)
1001 snprintf(buf, IFNAMSIZ, name, i);
b267b179 1002 if (!__dev_get_by_name(net, buf))
1da177e4 1003 return i;
1da177e4
LT
1004
1005 /* It is possible to run out of possible slots
1006 * when the name is long and there isn't enough space left
1007 * for the digits, or if all bits are used.
1008 */
1009 return -ENFILE;
1010}
1011
b267b179
EB
1012/**
1013 * dev_alloc_name - allocate a name for a device
1014 * @dev: device
1015 * @name: name format string
1016 *
1017 * Passed a format string - eg "lt%d" it will try and find a suitable
1018 * id. It scans list of devices to build up a free map, then chooses
1019 * the first empty slot. The caller must hold the dev_base or rtnl lock
1020 * while allocating the name and adding the device in order to avoid
1021 * duplicates.
1022 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1023 * Returns the number of the unit assigned or a negative errno code.
1024 */
1025
1026int dev_alloc_name(struct net_device *dev, const char *name)
1027{
1028 char buf[IFNAMSIZ];
1029 struct net *net;
1030 int ret;
1031
c346dca1
YH
1032 BUG_ON(!dev_net(dev));
1033 net = dev_net(dev);
b267b179
EB
1034 ret = __dev_alloc_name(net, name, buf);
1035 if (ret >= 0)
1036 strlcpy(dev->name, buf, IFNAMSIZ);
1037 return ret;
1038}
d1b19dff 1039EXPORT_SYMBOL(dev_alloc_name);
b267b179 1040
828de4f6
G
1041static int dev_alloc_name_ns(struct net *net,
1042 struct net_device *dev,
1043 const char *name)
d9031024 1044{
828de4f6
G
1045 char buf[IFNAMSIZ];
1046 int ret;
8ce6cebc 1047
828de4f6
G
1048 ret = __dev_alloc_name(net, name, buf);
1049 if (ret >= 0)
1050 strlcpy(dev->name, buf, IFNAMSIZ);
1051 return ret;
1052}
1053
1054static int dev_get_valid_name(struct net *net,
1055 struct net_device *dev,
1056 const char *name)
1057{
1058 BUG_ON(!net);
8ce6cebc 1059
d9031024
OP
1060 if (!dev_valid_name(name))
1061 return -EINVAL;
1062
1c5cae81 1063 if (strchr(name, '%'))
828de4f6 1064 return dev_alloc_name_ns(net, dev, name);
d9031024
OP
1065 else if (__dev_get_by_name(net, name))
1066 return -EEXIST;
8ce6cebc
DL
1067 else if (dev->name != name)
1068 strlcpy(dev->name, name, IFNAMSIZ);
d9031024
OP
1069
1070 return 0;
1071}
1da177e4
LT
1072
1073/**
1074 * dev_change_name - change name of a device
1075 * @dev: device
1076 * @newname: name (or format string) must be at least IFNAMSIZ
1077 *
1078 * Change name of a device, can pass format strings "eth%d".
1079 * for wildcarding.
1080 */
cf04a4c7 1081int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 1082{
fcc5a03a 1083 char oldname[IFNAMSIZ];
1da177e4 1084 int err = 0;
fcc5a03a 1085 int ret;
881d966b 1086 struct net *net;
1da177e4
LT
1087
1088 ASSERT_RTNL();
c346dca1 1089 BUG_ON(!dev_net(dev));
1da177e4 1090
c346dca1 1091 net = dev_net(dev);
1da177e4
LT
1092 if (dev->flags & IFF_UP)
1093 return -EBUSY;
1094
30e6c9fa 1095 write_seqcount_begin(&devnet_rename_seq);
c91f6df2
BH
1096
1097 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
30e6c9fa 1098 write_seqcount_end(&devnet_rename_seq);
c8d90dca 1099 return 0;
c91f6df2 1100 }
c8d90dca 1101
fcc5a03a
HX
1102 memcpy(oldname, dev->name, IFNAMSIZ);
1103
828de4f6 1104 err = dev_get_valid_name(net, dev, newname);
c91f6df2 1105 if (err < 0) {
30e6c9fa 1106 write_seqcount_end(&devnet_rename_seq);
d9031024 1107 return err;
c91f6df2 1108 }
1da177e4 1109
fcc5a03a 1110rollback:
a1b3f594
EB
1111 ret = device_rename(&dev->dev, dev->name);
1112 if (ret) {
1113 memcpy(dev->name, oldname, IFNAMSIZ);
30e6c9fa 1114 write_seqcount_end(&devnet_rename_seq);
a1b3f594 1115 return ret;
dcc99773 1116 }
7f988eab 1117
30e6c9fa 1118 write_seqcount_end(&devnet_rename_seq);
c91f6df2 1119
7f988eab 1120 write_lock_bh(&dev_base_lock);
372b2312 1121 hlist_del_rcu(&dev->name_hlist);
72c9528b
ED
1122 write_unlock_bh(&dev_base_lock);
1123
1124 synchronize_rcu();
1125
1126 write_lock_bh(&dev_base_lock);
1127 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
1128 write_unlock_bh(&dev_base_lock);
1129
056925ab 1130 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1131 ret = notifier_to_errno(ret);
1132
1133 if (ret) {
91e9c07b
ED
1134 /* err >= 0 after dev_alloc_name() or stores the first errno */
1135 if (err >= 0) {
fcc5a03a 1136 err = ret;
30e6c9fa 1137 write_seqcount_begin(&devnet_rename_seq);
fcc5a03a
HX
1138 memcpy(dev->name, oldname, IFNAMSIZ);
1139 goto rollback;
91e9c07b 1140 } else {
7b6cd1ce 1141 pr_err("%s: name change rollback failed: %d\n",
91e9c07b 1142 dev->name, ret);
fcc5a03a
HX
1143 }
1144 }
1da177e4
LT
1145
1146 return err;
1147}
1148
0b815a1a
SH
1149/**
1150 * dev_set_alias - change ifalias of a device
1151 * @dev: device
1152 * @alias: name up to IFALIASZ
f0db275a 1153 * @len: limit of bytes to copy from info
0b815a1a
SH
1154 *
1155 * Set ifalias for a device,
1156 */
1157int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1158{
7364e445
AK
1159 char *new_ifalias;
1160
0b815a1a
SH
1161 ASSERT_RTNL();
1162
1163 if (len >= IFALIASZ)
1164 return -EINVAL;
1165
96ca4a2c 1166 if (!len) {
388dfc2d
SK
1167 kfree(dev->ifalias);
1168 dev->ifalias = NULL;
96ca4a2c
OH
1169 return 0;
1170 }
1171
7364e445
AK
1172 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1173 if (!new_ifalias)
0b815a1a 1174 return -ENOMEM;
7364e445 1175 dev->ifalias = new_ifalias;
0b815a1a
SH
1176
1177 strlcpy(dev->ifalias, alias, len+1);
1178 return len;
1179}
1180
1181
d8a33ac4 1182/**
3041a069 1183 * netdev_features_change - device changes features
d8a33ac4
SH
1184 * @dev: device to cause notification
1185 *
1186 * Called to indicate a device has changed features.
1187 */
1188void netdev_features_change(struct net_device *dev)
1189{
056925ab 1190 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1191}
1192EXPORT_SYMBOL(netdev_features_change);
1193
1da177e4
LT
1194/**
1195 * netdev_state_change - device changes state
1196 * @dev: device to cause notification
1197 *
1198 * Called to indicate a device has changed state. This function calls
1199 * the notifier chains for netdev_chain and sends a NEWLINK message
1200 * to the routing socket.
1201 */
1202void netdev_state_change(struct net_device *dev)
1203{
1204 if (dev->flags & IFF_UP) {
056925ab 1205 call_netdevice_notifiers(NETDEV_CHANGE, dev);
7f294054 1206 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1da177e4
LT
1207 }
1208}
d1b19dff 1209EXPORT_SYMBOL(netdev_state_change);
1da177e4 1210
ee89bab1
AW
1211/**
1212 * netdev_notify_peers - notify network peers about existence of @dev
1213 * @dev: network device
1214 *
1215 * Generate traffic such that interested network peers are aware of
1216 * @dev, such as by generating a gratuitous ARP. This may be used when
1217 * a device wants to inform the rest of the network about some sort of
1218 * reconfiguration such as a failover event or virtual machine
1219 * migration.
1220 */
1221void netdev_notify_peers(struct net_device *dev)
c1da4ac7 1222{
ee89bab1
AW
1223 rtnl_lock();
1224 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1225 rtnl_unlock();
c1da4ac7 1226}
ee89bab1 1227EXPORT_SYMBOL(netdev_notify_peers);
c1da4ac7 1228
bd380811 1229static int __dev_open(struct net_device *dev)
1da177e4 1230{
d314774c 1231 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1232 int ret;
1da177e4 1233
e46b66bc
BH
1234 ASSERT_RTNL();
1235
1da177e4
LT
1236 if (!netif_device_present(dev))
1237 return -ENODEV;
1238
ca99ca14
NH
1239 /* Block netpoll from trying to do any rx path servicing.
1240 * If we don't do this there is a chance ndo_poll_controller
1241 * or ndo_poll may be running while we open the device
1242 */
da6e378b 1243 netpoll_rx_disable(dev);
ca99ca14 1244
3b8bcfd5
JB
1245 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1246 ret = notifier_to_errno(ret);
1247 if (ret)
1248 return ret;
1249
1da177e4 1250 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1251
d314774c
SH
1252 if (ops->ndo_validate_addr)
1253 ret = ops->ndo_validate_addr(dev);
bada339b 1254
d314774c
SH
1255 if (!ret && ops->ndo_open)
1256 ret = ops->ndo_open(dev);
1da177e4 1257
ca99ca14
NH
1258 netpoll_rx_enable(dev);
1259
bada339b
JG
1260 if (ret)
1261 clear_bit(__LINK_STATE_START, &dev->state);
1262 else {
1da177e4 1263 dev->flags |= IFF_UP;
b4bd07c2 1264 net_dmaengine_get();
4417da66 1265 dev_set_rx_mode(dev);
1da177e4 1266 dev_activate(dev);
7bf23575 1267 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 1268 }
bada339b 1269
1da177e4
LT
1270 return ret;
1271}
1272
1273/**
bd380811
PM
1274 * dev_open - prepare an interface for use.
1275 * @dev: device to open
1da177e4 1276 *
bd380811
PM
1277 * Takes a device from down to up state. The device's private open
1278 * function is invoked and then the multicast lists are loaded. Finally
1279 * the device is moved into the up state and a %NETDEV_UP message is
1280 * sent to the netdev notifier chain.
1281 *
1282 * Calling this function on an active interface is a nop. On a failure
1283 * a negative errno code is returned.
1da177e4 1284 */
bd380811
PM
1285int dev_open(struct net_device *dev)
1286{
1287 int ret;
1288
bd380811
PM
1289 if (dev->flags & IFF_UP)
1290 return 0;
1291
bd380811
PM
1292 ret = __dev_open(dev);
1293 if (ret < 0)
1294 return ret;
1295
7f294054 1296 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
bd380811
PM
1297 call_netdevice_notifiers(NETDEV_UP, dev);
1298
1299 return ret;
1300}
1301EXPORT_SYMBOL(dev_open);
1302
44345724 1303static int __dev_close_many(struct list_head *head)
1da177e4 1304{
44345724 1305 struct net_device *dev;
e46b66bc 1306
bd380811 1307 ASSERT_RTNL();
9d5010db
DM
1308 might_sleep();
1309
5cde2829 1310 list_for_each_entry(dev, head, close_list) {
44345724 1311 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1312
44345724 1313 clear_bit(__LINK_STATE_START, &dev->state);
1da177e4 1314
44345724
OP
1315 /* Synchronize to scheduled poll. We cannot touch poll list, it
1316 * can be even on different cpu. So just clear netif_running().
1317 *
1318 * dev->stop() will invoke napi_disable() on all of it's
1319 * napi_struct instances on this device.
1320 */
1321 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1322 }
1da177e4 1323
44345724 1324 dev_deactivate_many(head);
d8b2a4d2 1325
5cde2829 1326 list_for_each_entry(dev, head, close_list) {
44345724 1327 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4 1328
44345724
OP
1329 /*
1330 * Call the device specific close. This cannot fail.
1331 * Only if device is UP
1332 *
1333 * We allow it to be called even after a DETACH hot-plug
1334 * event.
1335 */
1336 if (ops->ndo_stop)
1337 ops->ndo_stop(dev);
1338
44345724 1339 dev->flags &= ~IFF_UP;
44345724
OP
1340 net_dmaengine_put();
1341 }
1342
1343 return 0;
1344}
1345
1346static int __dev_close(struct net_device *dev)
1347{
f87e6f47 1348 int retval;
44345724
OP
1349 LIST_HEAD(single);
1350
ca99ca14 1351 /* Temporarily disable netpoll until the interface is down */
da6e378b 1352 netpoll_rx_disable(dev);
ca99ca14 1353
5cde2829 1354 list_add(&dev->close_list, &single);
f87e6f47
LT
1355 retval = __dev_close_many(&single);
1356 list_del(&single);
ca99ca14
NH
1357
1358 netpoll_rx_enable(dev);
f87e6f47 1359 return retval;
44345724
OP
1360}
1361
3fbd8758 1362static int dev_close_many(struct list_head *head)
44345724
OP
1363{
1364 struct net_device *dev, *tmp;
1da177e4 1365
5cde2829
EB
1366 /* Remove the devices that don't need to be closed */
1367 list_for_each_entry_safe(dev, tmp, head, close_list)
44345724 1368 if (!(dev->flags & IFF_UP))
5cde2829 1369 list_del_init(&dev->close_list);
44345724
OP
1370
1371 __dev_close_many(head);
1da177e4 1372
5cde2829 1373 list_for_each_entry_safe(dev, tmp, head, close_list) {
7f294054 1374 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
44345724 1375 call_netdevice_notifiers(NETDEV_DOWN, dev);
5cde2829 1376 list_del_init(&dev->close_list);
44345724 1377 }
bd380811
PM
1378
1379 return 0;
1380}
1381
1382/**
1383 * dev_close - shutdown an interface.
1384 * @dev: device to shutdown
1385 *
1386 * This function moves an active device into down state. A
1387 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1388 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1389 * chain.
1390 */
1391int dev_close(struct net_device *dev)
1392{
e14a5993
ED
1393 if (dev->flags & IFF_UP) {
1394 LIST_HEAD(single);
1da177e4 1395
ca99ca14 1396 /* Block netpoll rx while the interface is going down */
da6e378b 1397 netpoll_rx_disable(dev);
ca99ca14 1398
5cde2829 1399 list_add(&dev->close_list, &single);
e14a5993
ED
1400 dev_close_many(&single);
1401 list_del(&single);
ca99ca14
NH
1402
1403 netpoll_rx_enable(dev);
e14a5993 1404 }
da6e378b 1405 return 0;
1da177e4 1406}
d1b19dff 1407EXPORT_SYMBOL(dev_close);
1da177e4
LT
1408
1409
0187bdfb
BH
1410/**
1411 * dev_disable_lro - disable Large Receive Offload on a device
1412 * @dev: device
1413 *
1414 * Disable Large Receive Offload (LRO) on a net device. Must be
1415 * called under RTNL. This is needed if received packets may be
1416 * forwarded to another interface.
1417 */
1418void dev_disable_lro(struct net_device *dev)
1419{
f11970e3
NH
1420 /*
1421 * If we're trying to disable lro on a vlan device
1422 * use the underlying physical device instead
1423 */
1424 if (is_vlan_dev(dev))
1425 dev = vlan_dev_real_dev(dev);
1426
529d0489
MK
1427 /* the same for macvlan devices */
1428 if (netif_is_macvlan(dev))
1429 dev = macvlan_dev_real_dev(dev);
1430
bc5787c6
MM
1431 dev->wanted_features &= ~NETIF_F_LRO;
1432 netdev_update_features(dev);
27660515 1433
22d5969f
MM
1434 if (unlikely(dev->features & NETIF_F_LRO))
1435 netdev_WARN(dev, "failed to disable LRO!\n");
0187bdfb
BH
1436}
1437EXPORT_SYMBOL(dev_disable_lro);
1438
351638e7
JP
1439static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1440 struct net_device *dev)
1441{
1442 struct netdev_notifier_info info;
1443
1444 netdev_notifier_info_init(&info, dev);
1445 return nb->notifier_call(nb, val, &info);
1446}
0187bdfb 1447
881d966b
EB
1448static int dev_boot_phase = 1;
1449
1da177e4
LT
1450/**
1451 * register_netdevice_notifier - register a network notifier block
1452 * @nb: notifier
1453 *
1454 * Register a notifier to be called when network device events occur.
1455 * The notifier passed is linked into the kernel structures and must
1456 * not be reused until it has been unregistered. A negative errno code
1457 * is returned on a failure.
1458 *
1459 * When registered all registration and up events are replayed
4ec93edb 1460 * to the new notifier to allow device to have a race free
1da177e4
LT
1461 * view of the network device list.
1462 */
1463
1464int register_netdevice_notifier(struct notifier_block *nb)
1465{
1466 struct net_device *dev;
fcc5a03a 1467 struct net_device *last;
881d966b 1468 struct net *net;
1da177e4
LT
1469 int err;
1470
1471 rtnl_lock();
f07d5b94 1472 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1473 if (err)
1474 goto unlock;
881d966b
EB
1475 if (dev_boot_phase)
1476 goto unlock;
1477 for_each_net(net) {
1478 for_each_netdev(net, dev) {
351638e7 1479 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
881d966b
EB
1480 err = notifier_to_errno(err);
1481 if (err)
1482 goto rollback;
1483
1484 if (!(dev->flags & IFF_UP))
1485 continue;
1da177e4 1486
351638e7 1487 call_netdevice_notifier(nb, NETDEV_UP, dev);
881d966b 1488 }
1da177e4 1489 }
fcc5a03a
HX
1490
1491unlock:
1da177e4
LT
1492 rtnl_unlock();
1493 return err;
fcc5a03a
HX
1494
1495rollback:
1496 last = dev;
881d966b
EB
1497 for_each_net(net) {
1498 for_each_netdev(net, dev) {
1499 if (dev == last)
8f891489 1500 goto outroll;
fcc5a03a 1501
881d966b 1502 if (dev->flags & IFF_UP) {
351638e7
JP
1503 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1504 dev);
1505 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
881d966b 1506 }
351638e7 1507 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
fcc5a03a 1508 }
fcc5a03a 1509 }
c67625a1 1510
8f891489 1511outroll:
c67625a1 1512 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1513 goto unlock;
1da177e4 1514}
d1b19dff 1515EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1516
1517/**
1518 * unregister_netdevice_notifier - unregister a network notifier block
1519 * @nb: notifier
1520 *
1521 * Unregister a notifier previously registered by
1522 * register_netdevice_notifier(). The notifier is unlinked into the
1523 * kernel structures and may then be reused. A negative errno code
1524 * is returned on a failure.
7d3d43da
EB
1525 *
1526 * After unregistering unregister and down device events are synthesized
1527 * for all devices on the device list to the removed notifier to remove
1528 * the need for special case cleanup code.
1da177e4
LT
1529 */
1530
1531int unregister_netdevice_notifier(struct notifier_block *nb)
1532{
7d3d43da
EB
1533 struct net_device *dev;
1534 struct net *net;
9f514950
HX
1535 int err;
1536
1537 rtnl_lock();
f07d5b94 1538 err = raw_notifier_chain_unregister(&netdev_chain, nb);
7d3d43da
EB
1539 if (err)
1540 goto unlock;
1541
1542 for_each_net(net) {
1543 for_each_netdev(net, dev) {
1544 if (dev->flags & IFF_UP) {
351638e7
JP
1545 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1546 dev);
1547 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
7d3d43da 1548 }
351638e7 1549 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
7d3d43da
EB
1550 }
1551 }
1552unlock:
9f514950
HX
1553 rtnl_unlock();
1554 return err;
1da177e4 1555}
d1b19dff 1556EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4 1557
351638e7
JP
1558/**
1559 * call_netdevice_notifiers_info - call all network notifier blocks
1560 * @val: value passed unmodified to notifier function
1561 * @dev: net_device pointer passed unmodified to notifier function
1562 * @info: notifier information data
1563 *
1564 * Call all network notifier blocks. Parameters and return value
1565 * are as for raw_notifier_call_chain().
1566 */
1567
1d143d9f 1568static int call_netdevice_notifiers_info(unsigned long val,
1569 struct net_device *dev,
1570 struct netdev_notifier_info *info)
351638e7
JP
1571{
1572 ASSERT_RTNL();
1573 netdev_notifier_info_init(info, dev);
1574 return raw_notifier_call_chain(&netdev_chain, val, info);
1575}
351638e7 1576
1da177e4
LT
1577/**
1578 * call_netdevice_notifiers - call all network notifier blocks
1579 * @val: value passed unmodified to notifier function
c4ea43c5 1580 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1581 *
1582 * Call all network notifier blocks. Parameters and return value
f07d5b94 1583 * are as for raw_notifier_call_chain().
1da177e4
LT
1584 */
1585
ad7379d4 1586int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1587{
351638e7
JP
1588 struct netdev_notifier_info info;
1589
1590 return call_netdevice_notifiers_info(val, dev, &info);
1da177e4 1591}
edf947f1 1592EXPORT_SYMBOL(call_netdevice_notifiers);
1da177e4 1593
c5905afb 1594static struct static_key netstamp_needed __read_mostly;
b90e5794 1595#ifdef HAVE_JUMP_LABEL
c5905afb 1596/* We are not allowed to call static_key_slow_dec() from irq context
b90e5794 1597 * If net_disable_timestamp() is called from irq context, defer the
c5905afb 1598 * static_key_slow_dec() calls.
b90e5794
ED
1599 */
1600static atomic_t netstamp_needed_deferred;
1601#endif
1da177e4
LT
1602
1603void net_enable_timestamp(void)
1604{
b90e5794
ED
1605#ifdef HAVE_JUMP_LABEL
1606 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1607
1608 if (deferred) {
1609 while (--deferred)
c5905afb 1610 static_key_slow_dec(&netstamp_needed);
b90e5794
ED
1611 return;
1612 }
1613#endif
c5905afb 1614 static_key_slow_inc(&netstamp_needed);
1da177e4 1615}
d1b19dff 1616EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1617
1618void net_disable_timestamp(void)
1619{
b90e5794
ED
1620#ifdef HAVE_JUMP_LABEL
1621 if (in_interrupt()) {
1622 atomic_inc(&netstamp_needed_deferred);
1623 return;
1624 }
1625#endif
c5905afb 1626 static_key_slow_dec(&netstamp_needed);
1da177e4 1627}
d1b19dff 1628EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1629
3b098e2d 1630static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4 1631{
588f0330 1632 skb->tstamp.tv64 = 0;
c5905afb 1633 if (static_key_false(&netstamp_needed))
a61bbcf2 1634 __net_timestamp(skb);
1da177e4
LT
1635}
1636
588f0330 1637#define net_timestamp_check(COND, SKB) \
c5905afb 1638 if (static_key_false(&netstamp_needed)) { \
588f0330
ED
1639 if ((COND) && !(SKB)->tstamp.tv64) \
1640 __net_timestamp(SKB); \
1641 } \
3b098e2d 1642
79b569f0
DL
1643static inline bool is_skb_forwardable(struct net_device *dev,
1644 struct sk_buff *skb)
1645{
1646 unsigned int len;
1647
1648 if (!(dev->flags & IFF_UP))
1649 return false;
1650
1651 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1652 if (skb->len <= len)
1653 return true;
1654
1655 /* if TSO is enabled, we don't care about the length as the packet
1656 * could be forwarded without being segmented before
1657 */
1658 if (skb_is_gso(skb))
1659 return true;
1660
1661 return false;
1662}
1663
44540960
AB
1664/**
1665 * dev_forward_skb - loopback an skb to another netif
1666 *
1667 * @dev: destination network device
1668 * @skb: buffer to forward
1669 *
1670 * return values:
1671 * NET_RX_SUCCESS (no congestion)
6ec82562 1672 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
1673 *
1674 * dev_forward_skb can be used for injecting an skb from the
1675 * start_xmit function of one device into the receive queue
1676 * of another device.
1677 *
1678 * The receiving device may be in another namespace, so
1679 * we have to clear all information in the skb that could
1680 * impact namespace isolation.
1681 */
1682int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1683{
48c83012
MT
1684 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1685 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1686 atomic_long_inc(&dev->rx_dropped);
1687 kfree_skb(skb);
1688 return NET_RX_DROP;
1689 }
1690 }
1691
79b569f0 1692 if (unlikely(!is_skb_forwardable(dev, skb))) {
caf586e5 1693 atomic_long_inc(&dev->rx_dropped);
6ec82562 1694 kfree_skb(skb);
44540960 1695 return NET_RX_DROP;
6ec82562 1696 }
06a23fe3 1697
8b27f277 1698 skb_scrub_packet(skb, true);
81b9eab5 1699 skb->protocol = eth_type_trans(skb, dev);
06a23fe3 1700
44540960
AB
1701 return netif_rx(skb);
1702}
1703EXPORT_SYMBOL_GPL(dev_forward_skb);
1704
71d9dec2
CG
1705static inline int deliver_skb(struct sk_buff *skb,
1706 struct packet_type *pt_prev,
1707 struct net_device *orig_dev)
1708{
1080e512
MT
1709 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1710 return -ENOMEM;
71d9dec2
CG
1711 atomic_inc(&skb->users);
1712 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1713}
1714
c0de08d0
EL
1715static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1716{
a3d744e9 1717 if (!ptype->af_packet_priv || !skb->sk)
c0de08d0
EL
1718 return false;
1719
1720 if (ptype->id_match)
1721 return ptype->id_match(ptype, skb->sk);
1722 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1723 return true;
1724
1725 return false;
1726}
1727
1da177e4
LT
1728/*
1729 * Support routine. Sends outgoing frames to any network
1730 * taps currently in use.
1731 */
1732
f6a78bfc 1733static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1734{
1735 struct packet_type *ptype;
71d9dec2
CG
1736 struct sk_buff *skb2 = NULL;
1737 struct packet_type *pt_prev = NULL;
a61bbcf2 1738
1da177e4
LT
1739 rcu_read_lock();
1740 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1741 /* Never send packets back to the socket
1742 * they originated from - MvS (miquels@drinkel.ow.org)
1743 */
1744 if ((ptype->dev == dev || !ptype->dev) &&
c0de08d0 1745 (!skb_loop_sk(ptype, skb))) {
71d9dec2
CG
1746 if (pt_prev) {
1747 deliver_skb(skb2, pt_prev, skb->dev);
1748 pt_prev = ptype;
1749 continue;
1750 }
1751
1752 skb2 = skb_clone(skb, GFP_ATOMIC);
1da177e4
LT
1753 if (!skb2)
1754 break;
1755
70978182
ED
1756 net_timestamp_set(skb2);
1757
1da177e4
LT
1758 /* skb->nh should be correctly
1759 set by sender, so that the second statement is
1760 just protection against buggy protocols.
1761 */
459a98ed 1762 skb_reset_mac_header(skb2);
1da177e4 1763
d56f90a7 1764 if (skb_network_header(skb2) < skb2->data ||
ced14f68 1765 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
e87cc472
JP
1766 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1767 ntohs(skb2->protocol),
1768 dev->name);
c1d2bbe1 1769 skb_reset_network_header(skb2);
1da177e4
LT
1770 }
1771
b0e380b1 1772 skb2->transport_header = skb2->network_header;
1da177e4 1773 skb2->pkt_type = PACKET_OUTGOING;
71d9dec2 1774 pt_prev = ptype;
1da177e4
LT
1775 }
1776 }
71d9dec2
CG
1777 if (pt_prev)
1778 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1da177e4
LT
1779 rcu_read_unlock();
1780}
1781
2c53040f
BH
1782/**
1783 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
4f57c087
JF
1784 * @dev: Network device
1785 * @txq: number of queues available
1786 *
1787 * If real_num_tx_queues is changed the tc mappings may no longer be
1788 * valid. To resolve this verify the tc mapping remains valid and if
1789 * not NULL the mapping. With no priorities mapping to this
1790 * offset/count pair it will no longer be used. In the worst case TC0
1791 * is invalid nothing can be done so disable priority mappings. If is
1792 * expected that drivers will fix this mapping if they can before
1793 * calling netif_set_real_num_tx_queues.
1794 */
bb134d22 1795static void netif_setup_tc(struct net_device *dev, unsigned int txq)
4f57c087
JF
1796{
1797 int i;
1798 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1799
1800 /* If TC0 is invalidated disable TC mapping */
1801 if (tc->offset + tc->count > txq) {
7b6cd1ce 1802 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
4f57c087
JF
1803 dev->num_tc = 0;
1804 return;
1805 }
1806
1807 /* Invalidated prio to tc mappings set to TC0 */
1808 for (i = 1; i < TC_BITMASK + 1; i++) {
1809 int q = netdev_get_prio_tc_map(dev, i);
1810
1811 tc = &dev->tc_to_txq[q];
1812 if (tc->offset + tc->count > txq) {
7b6cd1ce
JP
1813 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1814 i, q);
4f57c087
JF
1815 netdev_set_prio_tc_map(dev, i, 0);
1816 }
1817 }
1818}
1819
537c00de
AD
1820#ifdef CONFIG_XPS
1821static DEFINE_MUTEX(xps_map_mutex);
1822#define xmap_dereference(P) \
1823 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1824
10cdc3f3
AD
1825static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1826 int cpu, u16 index)
537c00de 1827{
10cdc3f3
AD
1828 struct xps_map *map = NULL;
1829 int pos;
537c00de 1830
10cdc3f3
AD
1831 if (dev_maps)
1832 map = xmap_dereference(dev_maps->cpu_map[cpu]);
537c00de 1833
10cdc3f3
AD
1834 for (pos = 0; map && pos < map->len; pos++) {
1835 if (map->queues[pos] == index) {
537c00de
AD
1836 if (map->len > 1) {
1837 map->queues[pos] = map->queues[--map->len];
1838 } else {
10cdc3f3 1839 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
537c00de
AD
1840 kfree_rcu(map, rcu);
1841 map = NULL;
1842 }
10cdc3f3 1843 break;
537c00de 1844 }
537c00de
AD
1845 }
1846
10cdc3f3
AD
1847 return map;
1848}
1849
024e9679 1850static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
10cdc3f3
AD
1851{
1852 struct xps_dev_maps *dev_maps;
024e9679 1853 int cpu, i;
10cdc3f3
AD
1854 bool active = false;
1855
1856 mutex_lock(&xps_map_mutex);
1857 dev_maps = xmap_dereference(dev->xps_maps);
1858
1859 if (!dev_maps)
1860 goto out_no_maps;
1861
1862 for_each_possible_cpu(cpu) {
024e9679
AD
1863 for (i = index; i < dev->num_tx_queues; i++) {
1864 if (!remove_xps_queue(dev_maps, cpu, i))
1865 break;
1866 }
1867 if (i == dev->num_tx_queues)
10cdc3f3
AD
1868 active = true;
1869 }
1870
1871 if (!active) {
537c00de
AD
1872 RCU_INIT_POINTER(dev->xps_maps, NULL);
1873 kfree_rcu(dev_maps, rcu);
1874 }
1875
024e9679
AD
1876 for (i = index; i < dev->num_tx_queues; i++)
1877 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1878 NUMA_NO_NODE);
1879
537c00de
AD
1880out_no_maps:
1881 mutex_unlock(&xps_map_mutex);
1882}
1883
01c5f864
AD
1884static struct xps_map *expand_xps_map(struct xps_map *map,
1885 int cpu, u16 index)
1886{
1887 struct xps_map *new_map;
1888 int alloc_len = XPS_MIN_MAP_ALLOC;
1889 int i, pos;
1890
1891 for (pos = 0; map && pos < map->len; pos++) {
1892 if (map->queues[pos] != index)
1893 continue;
1894 return map;
1895 }
1896
1897 /* Need to add queue to this CPU's existing map */
1898 if (map) {
1899 if (pos < map->alloc_len)
1900 return map;
1901
1902 alloc_len = map->alloc_len * 2;
1903 }
1904
1905 /* Need to allocate new map to store queue on this CPU's map */
1906 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1907 cpu_to_node(cpu));
1908 if (!new_map)
1909 return NULL;
1910
1911 for (i = 0; i < pos; i++)
1912 new_map->queues[i] = map->queues[i];
1913 new_map->alloc_len = alloc_len;
1914 new_map->len = pos;
1915
1916 return new_map;
1917}
1918
3573540c
MT
1919int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
1920 u16 index)
537c00de 1921{
01c5f864 1922 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
537c00de 1923 struct xps_map *map, *new_map;
537c00de 1924 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
01c5f864
AD
1925 int cpu, numa_node_id = -2;
1926 bool active = false;
537c00de
AD
1927
1928 mutex_lock(&xps_map_mutex);
1929
1930 dev_maps = xmap_dereference(dev->xps_maps);
1931
01c5f864
AD
1932 /* allocate memory for queue storage */
1933 for_each_online_cpu(cpu) {
1934 if (!cpumask_test_cpu(cpu, mask))
1935 continue;
1936
1937 if (!new_dev_maps)
1938 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2bb60cb9
AD
1939 if (!new_dev_maps) {
1940 mutex_unlock(&xps_map_mutex);
01c5f864 1941 return -ENOMEM;
2bb60cb9 1942 }
01c5f864
AD
1943
1944 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1945 NULL;
1946
1947 map = expand_xps_map(map, cpu, index);
1948 if (!map)
1949 goto error;
1950
1951 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1952 }
1953
1954 if (!new_dev_maps)
1955 goto out_no_new_maps;
1956
537c00de 1957 for_each_possible_cpu(cpu) {
01c5f864
AD
1958 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1959 /* add queue to CPU maps */
1960 int pos = 0;
1961
1962 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1963 while ((pos < map->len) && (map->queues[pos] != index))
1964 pos++;
1965
1966 if (pos == map->len)
1967 map->queues[map->len++] = index;
537c00de 1968#ifdef CONFIG_NUMA
537c00de
AD
1969 if (numa_node_id == -2)
1970 numa_node_id = cpu_to_node(cpu);
1971 else if (numa_node_id != cpu_to_node(cpu))
1972 numa_node_id = -1;
537c00de 1973#endif
01c5f864
AD
1974 } else if (dev_maps) {
1975 /* fill in the new device map from the old device map */
1976 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1977 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
537c00de 1978 }
01c5f864 1979
537c00de
AD
1980 }
1981
01c5f864
AD
1982 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1983
537c00de 1984 /* Cleanup old maps */
01c5f864
AD
1985 if (dev_maps) {
1986 for_each_possible_cpu(cpu) {
1987 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1988 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1989 if (map && map != new_map)
1990 kfree_rcu(map, rcu);
1991 }
537c00de 1992
01c5f864 1993 kfree_rcu(dev_maps, rcu);
537c00de
AD
1994 }
1995
01c5f864
AD
1996 dev_maps = new_dev_maps;
1997 active = true;
537c00de 1998
01c5f864
AD
1999out_no_new_maps:
2000 /* update Tx queue numa node */
537c00de
AD
2001 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2002 (numa_node_id >= 0) ? numa_node_id :
2003 NUMA_NO_NODE);
2004
01c5f864
AD
2005 if (!dev_maps)
2006 goto out_no_maps;
2007
2008 /* removes queue from unused CPUs */
2009 for_each_possible_cpu(cpu) {
2010 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2011 continue;
2012
2013 if (remove_xps_queue(dev_maps, cpu, index))
2014 active = true;
2015 }
2016
2017 /* free map if not active */
2018 if (!active) {
2019 RCU_INIT_POINTER(dev->xps_maps, NULL);
2020 kfree_rcu(dev_maps, rcu);
2021 }
2022
2023out_no_maps:
537c00de
AD
2024 mutex_unlock(&xps_map_mutex);
2025
2026 return 0;
2027error:
01c5f864
AD
2028 /* remove any maps that we added */
2029 for_each_possible_cpu(cpu) {
2030 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2031 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2032 NULL;
2033 if (new_map && new_map != map)
2034 kfree(new_map);
2035 }
2036
537c00de
AD
2037 mutex_unlock(&xps_map_mutex);
2038
537c00de
AD
2039 kfree(new_dev_maps);
2040 return -ENOMEM;
2041}
2042EXPORT_SYMBOL(netif_set_xps_queue);
2043
2044#endif
f0796d5c
JF
2045/*
2046 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2047 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2048 */
e6484930 2049int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
f0796d5c 2050{
1d24eb48
TH
2051 int rc;
2052
e6484930
TH
2053 if (txq < 1 || txq > dev->num_tx_queues)
2054 return -EINVAL;
f0796d5c 2055
5c56580b
BH
2056 if (dev->reg_state == NETREG_REGISTERED ||
2057 dev->reg_state == NETREG_UNREGISTERING) {
e6484930
TH
2058 ASSERT_RTNL();
2059
1d24eb48
TH
2060 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2061 txq);
bf264145
TH
2062 if (rc)
2063 return rc;
2064
4f57c087
JF
2065 if (dev->num_tc)
2066 netif_setup_tc(dev, txq);
2067
024e9679 2068 if (txq < dev->real_num_tx_queues) {
e6484930 2069 qdisc_reset_all_tx_gt(dev, txq);
024e9679
AD
2070#ifdef CONFIG_XPS
2071 netif_reset_xps_queues_gt(dev, txq);
2072#endif
2073 }
f0796d5c 2074 }
e6484930
TH
2075
2076 dev->real_num_tx_queues = txq;
2077 return 0;
f0796d5c
JF
2078}
2079EXPORT_SYMBOL(netif_set_real_num_tx_queues);
56079431 2080
62fe0b40
BH
2081#ifdef CONFIG_RPS
2082/**
2083 * netif_set_real_num_rx_queues - set actual number of RX queues used
2084 * @dev: Network device
2085 * @rxq: Actual number of RX queues
2086 *
2087 * This must be called either with the rtnl_lock held or before
2088 * registration of the net device. Returns 0 on success, or a
4e7f7951
BH
2089 * negative error code. If called before registration, it always
2090 * succeeds.
62fe0b40
BH
2091 */
2092int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2093{
2094 int rc;
2095
bd25fa7b
TH
2096 if (rxq < 1 || rxq > dev->num_rx_queues)
2097 return -EINVAL;
2098
62fe0b40
BH
2099 if (dev->reg_state == NETREG_REGISTERED) {
2100 ASSERT_RTNL();
2101
62fe0b40
BH
2102 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2103 rxq);
2104 if (rc)
2105 return rc;
62fe0b40
BH
2106 }
2107
2108 dev->real_num_rx_queues = rxq;
2109 return 0;
2110}
2111EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2112#endif
2113
2c53040f
BH
2114/**
2115 * netif_get_num_default_rss_queues - default number of RSS queues
16917b87
YM
2116 *
2117 * This routine should set an upper limit on the number of RSS queues
2118 * used by default by multiqueue devices.
2119 */
a55b138b 2120int netif_get_num_default_rss_queues(void)
16917b87
YM
2121{
2122 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2123}
2124EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2125
def82a1d 2126static inline void __netif_reschedule(struct Qdisc *q)
56079431 2127{
def82a1d
JP
2128 struct softnet_data *sd;
2129 unsigned long flags;
56079431 2130
def82a1d
JP
2131 local_irq_save(flags);
2132 sd = &__get_cpu_var(softnet_data);
a9cbd588
CG
2133 q->next_sched = NULL;
2134 *sd->output_queue_tailp = q;
2135 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
2136 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2137 local_irq_restore(flags);
2138}
2139
2140void __netif_schedule(struct Qdisc *q)
2141{
2142 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2143 __netif_reschedule(q);
56079431
DV
2144}
2145EXPORT_SYMBOL(__netif_schedule);
2146
e6247027
ED
2147struct dev_kfree_skb_cb {
2148 enum skb_free_reason reason;
2149};
2150
2151static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2152{
2153 return (struct dev_kfree_skb_cb *)skb->cb;
2154}
2155
2156void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
56079431 2157{
e6247027 2158 unsigned long flags;
56079431 2159
e6247027
ED
2160 if (likely(atomic_read(&skb->users) == 1)) {
2161 smp_rmb();
2162 atomic_set(&skb->users, 0);
2163 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2164 return;
bea3348e 2165 }
e6247027
ED
2166 get_kfree_skb_cb(skb)->reason = reason;
2167 local_irq_save(flags);
2168 skb->next = __this_cpu_read(softnet_data.completion_queue);
2169 __this_cpu_write(softnet_data.completion_queue, skb);
2170 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2171 local_irq_restore(flags);
56079431 2172}
e6247027 2173EXPORT_SYMBOL(__dev_kfree_skb_irq);
56079431 2174
e6247027 2175void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
56079431
DV
2176{
2177 if (in_irq() || irqs_disabled())
e6247027 2178 __dev_kfree_skb_irq(skb, reason);
56079431
DV
2179 else
2180 dev_kfree_skb(skb);
2181}
e6247027 2182EXPORT_SYMBOL(__dev_kfree_skb_any);
56079431
DV
2183
2184
bea3348e
SH
2185/**
2186 * netif_device_detach - mark device as removed
2187 * @dev: network device
2188 *
2189 * Mark device as removed from system and therefore no longer available.
2190 */
56079431
DV
2191void netif_device_detach(struct net_device *dev)
2192{
2193 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2194 netif_running(dev)) {
d543103a 2195 netif_tx_stop_all_queues(dev);
56079431
DV
2196 }
2197}
2198EXPORT_SYMBOL(netif_device_detach);
2199
bea3348e
SH
2200/**
2201 * netif_device_attach - mark device as attached
2202 * @dev: network device
2203 *
2204 * Mark device as attached from system and restart if needed.
2205 */
56079431
DV
2206void netif_device_attach(struct net_device *dev)
2207{
2208 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2209 netif_running(dev)) {
d543103a 2210 netif_tx_wake_all_queues(dev);
4ec93edb 2211 __netdev_watchdog_up(dev);
56079431
DV
2212 }
2213}
2214EXPORT_SYMBOL(netif_device_attach);
2215
36c92474
BH
2216static void skb_warn_bad_offload(const struct sk_buff *skb)
2217{
65e9d2fa 2218 static const netdev_features_t null_features = 0;
36c92474
BH
2219 struct net_device *dev = skb->dev;
2220 const char *driver = "";
2221
c846ad9b
BG
2222 if (!net_ratelimit())
2223 return;
2224
36c92474
BH
2225 if (dev && dev->dev.parent)
2226 driver = dev_driver_string(dev->dev.parent);
2227
2228 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2229 "gso_type=%d ip_summed=%d\n",
65e9d2fa
MM
2230 driver, dev ? &dev->features : &null_features,
2231 skb->sk ? &skb->sk->sk_route_caps : &null_features,
36c92474
BH
2232 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2233 skb_shinfo(skb)->gso_type, skb->ip_summed);
2234}
2235
1da177e4
LT
2236/*
2237 * Invalidate hardware checksum when packet is to be mangled, and
2238 * complete checksum manually on outgoing path.
2239 */
84fa7933 2240int skb_checksum_help(struct sk_buff *skb)
1da177e4 2241{
d3bc23e7 2242 __wsum csum;
663ead3b 2243 int ret = 0, offset;
1da177e4 2244
84fa7933 2245 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
2246 goto out_set_summed;
2247
2248 if (unlikely(skb_shinfo(skb)->gso_size)) {
36c92474
BH
2249 skb_warn_bad_offload(skb);
2250 return -EINVAL;
1da177e4
LT
2251 }
2252
cef401de
ED
2253 /* Before computing a checksum, we should make sure no frag could
2254 * be modified by an external entity : checksum could be wrong.
2255 */
2256 if (skb_has_shared_frag(skb)) {
2257 ret = __skb_linearize(skb);
2258 if (ret)
2259 goto out;
2260 }
2261
55508d60 2262 offset = skb_checksum_start_offset(skb);
a030847e
HX
2263 BUG_ON(offset >= skb_headlen(skb));
2264 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2265
2266 offset += skb->csum_offset;
2267 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2268
2269 if (skb_cloned(skb) &&
2270 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
2271 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2272 if (ret)
2273 goto out;
2274 }
2275
a030847e 2276 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
a430a43d 2277out_set_summed:
1da177e4 2278 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 2279out:
1da177e4
LT
2280 return ret;
2281}
d1b19dff 2282EXPORT_SYMBOL(skb_checksum_help);
1da177e4 2283
ec5f0615 2284__be16 skb_network_protocol(struct sk_buff *skb)
f6a78bfc 2285{
252e3346 2286 __be16 type = skb->protocol;
c80a8512 2287 int vlan_depth = ETH_HLEN;
f6a78bfc 2288
19acc327
PS
2289 /* Tunnel gso handlers can set protocol to ethernet. */
2290 if (type == htons(ETH_P_TEB)) {
2291 struct ethhdr *eth;
2292
2293 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2294 return 0;
2295
2296 eth = (struct ethhdr *)skb_mac_header(skb);
2297 type = eth->h_proto;
2298 }
2299
8ad227ff 2300 while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
c8d5bcd1 2301 struct vlan_hdr *vh;
7b9c6090 2302
c8d5bcd1 2303 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
ec5f0615 2304 return 0;
7b9c6090 2305
c8d5bcd1
JG
2306 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2307 type = vh->h_vlan_encapsulated_proto;
2308 vlan_depth += VLAN_HLEN;
7b9c6090
JG
2309 }
2310
ec5f0615
PS
2311 return type;
2312}
2313
2314/**
2315 * skb_mac_gso_segment - mac layer segmentation handler.
2316 * @skb: buffer to segment
2317 * @features: features for the output path (see dev->features)
2318 */
2319struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2320 netdev_features_t features)
2321{
2322 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2323 struct packet_offload *ptype;
2324 __be16 type = skb_network_protocol(skb);
2325
2326 if (unlikely(!type))
2327 return ERR_PTR(-EINVAL);
2328
f6a78bfc
HX
2329 __skb_pull(skb, skb->mac_len);
2330
2331 rcu_read_lock();
22061d80 2332 list_for_each_entry_rcu(ptype, &offload_base, list) {
f191a1d1 2333 if (ptype->type == type && ptype->callbacks.gso_segment) {
84fa7933 2334 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
05e8ef4a
PS
2335 int err;
2336
f191a1d1 2337 err = ptype->callbacks.gso_send_check(skb);
a430a43d
HX
2338 segs = ERR_PTR(err);
2339 if (err || skb_gso_ok(skb, features))
2340 break;
d56f90a7
ACM
2341 __skb_push(skb, (skb->data -
2342 skb_network_header(skb)));
a430a43d 2343 }
f191a1d1 2344 segs = ptype->callbacks.gso_segment(skb, features);
f6a78bfc
HX
2345 break;
2346 }
2347 }
2348 rcu_read_unlock();
2349
98e399f8 2350 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 2351
f6a78bfc
HX
2352 return segs;
2353}
05e8ef4a
PS
2354EXPORT_SYMBOL(skb_mac_gso_segment);
2355
2356
2357/* openvswitch calls this on rx path, so we need a different check.
2358 */
2359static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2360{
2361 if (tx_path)
2362 return skb->ip_summed != CHECKSUM_PARTIAL;
2363 else
2364 return skb->ip_summed == CHECKSUM_NONE;
2365}
2366
2367/**
2368 * __skb_gso_segment - Perform segmentation on skb.
2369 * @skb: buffer to segment
2370 * @features: features for the output path (see dev->features)
2371 * @tx_path: whether it is called in TX path
2372 *
2373 * This function segments the given skb and returns a list of segments.
2374 *
2375 * It may return NULL if the skb requires no segmentation. This is
2376 * only possible when GSO is used for verifying header integrity.
2377 */
2378struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2379 netdev_features_t features, bool tx_path)
2380{
2381 if (unlikely(skb_needs_check(skb, tx_path))) {
2382 int err;
2383
2384 skb_warn_bad_offload(skb);
2385
2386 if (skb_header_cloned(skb) &&
2387 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2388 return ERR_PTR(err);
2389 }
2390
68c33163 2391 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3347c960
ED
2392 SKB_GSO_CB(skb)->encap_level = 0;
2393
05e8ef4a
PS
2394 skb_reset_mac_header(skb);
2395 skb_reset_mac_len(skb);
2396
2397 return skb_mac_gso_segment(skb, features);
2398}
12b0004d 2399EXPORT_SYMBOL(__skb_gso_segment);
f6a78bfc 2400
fb286bb2
HX
2401/* Take action when hardware reception checksum errors are detected. */
2402#ifdef CONFIG_BUG
2403void netdev_rx_csum_fault(struct net_device *dev)
2404{
2405 if (net_ratelimit()) {
7b6cd1ce 2406 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
fb286bb2
HX
2407 dump_stack();
2408 }
2409}
2410EXPORT_SYMBOL(netdev_rx_csum_fault);
2411#endif
2412
1da177e4
LT
2413/* Actually, we should eliminate this check as soon as we know, that:
2414 * 1. IOMMU is present and allows to map all the memory.
2415 * 2. No high memory really exists on this machine.
2416 */
2417
9092c658 2418static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 2419{
3d3a8533 2420#ifdef CONFIG_HIGHMEM
1da177e4 2421 int i;
5acbbd42 2422 if (!(dev->features & NETIF_F_HIGHDMA)) {
ea2ab693
IC
2423 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2424 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2425 if (PageHighMem(skb_frag_page(frag)))
5acbbd42 2426 return 1;
ea2ab693 2427 }
5acbbd42 2428 }
1da177e4 2429
5acbbd42
FT
2430 if (PCI_DMA_BUS_IS_PHYS) {
2431 struct device *pdev = dev->dev.parent;
1da177e4 2432
9092c658
ED
2433 if (!pdev)
2434 return 0;
5acbbd42 2435 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
ea2ab693
IC
2436 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2437 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
5acbbd42
FT
2438 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2439 return 1;
2440 }
2441 }
3d3a8533 2442#endif
1da177e4
LT
2443 return 0;
2444}
1da177e4 2445
f6a78bfc
HX
2446struct dev_gso_cb {
2447 void (*destructor)(struct sk_buff *skb);
2448};
2449
2450#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2451
2452static void dev_gso_skb_destructor(struct sk_buff *skb)
2453{
2454 struct dev_gso_cb *cb;
2455
289dccbe
ED
2456 kfree_skb_list(skb->next);
2457 skb->next = NULL;
f6a78bfc
HX
2458
2459 cb = DEV_GSO_CB(skb);
2460 if (cb->destructor)
2461 cb->destructor(skb);
2462}
2463
2464/**
2465 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2466 * @skb: buffer to segment
91ecb63c 2467 * @features: device features as applicable to this skb
f6a78bfc
HX
2468 *
2469 * This function segments the given skb and stores the list of segments
2470 * in skb->next.
2471 */
c8f44aff 2472static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
f6a78bfc 2473{
f6a78bfc 2474 struct sk_buff *segs;
576a30eb
HX
2475
2476 segs = skb_gso_segment(skb, features);
2477
2478 /* Verifying header integrity only. */
2479 if (!segs)
2480 return 0;
f6a78bfc 2481
801678c5 2482 if (IS_ERR(segs))
f6a78bfc
HX
2483 return PTR_ERR(segs);
2484
2485 skb->next = segs;
2486 DEV_GSO_CB(skb)->destructor = skb->destructor;
2487 skb->destructor = dev_gso_skb_destructor;
2488
2489 return 0;
2490}
2491
c8f44aff 2492static netdev_features_t harmonize_features(struct sk_buff *skb,
cdbaa0bb 2493 netdev_features_t features)
f01a5236 2494{
c0d680e5 2495 if (skb->ip_summed != CHECKSUM_NONE &&
cdbaa0bb 2496 !can_checksum_protocol(features, skb_network_protocol(skb))) {
f01a5236 2497 features &= ~NETIF_F_ALL_CSUM;
f01a5236
JG
2498 } else if (illegal_highdma(skb->dev, skb)) {
2499 features &= ~NETIF_F_SG;
2500 }
2501
2502 return features;
2503}
2504
c8f44aff 2505netdev_features_t netif_skb_features(struct sk_buff *skb)
58e998c6
JG
2506{
2507 __be16 protocol = skb->protocol;
c8f44aff 2508 netdev_features_t features = skb->dev->features;
58e998c6 2509
30b678d8
BH
2510 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2511 features &= ~NETIF_F_GSO_MASK;
2512
8ad227ff 2513 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
58e998c6
JG
2514 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2515 protocol = veh->h_vlan_encapsulated_proto;
f01a5236 2516 } else if (!vlan_tx_tag_present(skb)) {
cdbaa0bb 2517 return harmonize_features(skb, features);
f01a5236 2518 }
58e998c6 2519
8ad227ff
PM
2520 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2521 NETIF_F_HW_VLAN_STAG_TX);
f01a5236 2522
cdbaa0bb 2523 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
f01a5236 2524 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
8ad227ff
PM
2525 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2526 NETIF_F_HW_VLAN_STAG_TX;
cdbaa0bb
AD
2527
2528 return harmonize_features(skb, features);
58e998c6 2529}
f01a5236 2530EXPORT_SYMBOL(netif_skb_features);
58e998c6 2531
fd2ea0a7 2532int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
a6cc0cfa 2533 struct netdev_queue *txq, void *accel_priv)
f6a78bfc 2534{
00829823 2535 const struct net_device_ops *ops = dev->netdev_ops;
572a9d7b 2536 int rc = NETDEV_TX_OK;
ec764bf0 2537 unsigned int skb_len;
00829823 2538
f6a78bfc 2539 if (likely(!skb->next)) {
c8f44aff 2540 netdev_features_t features;
fc741216 2541
93f154b5 2542 /*
25985edc 2543 * If device doesn't need skb->dst, release it right now while
93f154b5
ED
2544 * its hot in this cpu cache
2545 */
adf30907
ED
2546 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2547 skb_dst_drop(skb);
2548
fc741216
JG
2549 features = netif_skb_features(skb);
2550
7b9c6090 2551 if (vlan_tx_tag_present(skb) &&
86a9bad3
PM
2552 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
2553 skb = __vlan_put_tag(skb, skb->vlan_proto,
2554 vlan_tx_tag_get(skb));
7b9c6090
JG
2555 if (unlikely(!skb))
2556 goto out;
2557
2558 skb->vlan_tci = 0;
2559 }
2560
fc70fb64
AD
2561 /* If encapsulation offload request, verify we are testing
2562 * hardware encapsulation features instead of standard
2563 * features for the netdev
2564 */
2565 if (skb->encapsulation)
2566 features &= dev->hw_enc_features;
2567
fc741216 2568 if (netif_needs_gso(skb, features)) {
91ecb63c 2569 if (unlikely(dev_gso_segment(skb, features)))
9ccb8975
DM
2570 goto out_kfree_skb;
2571 if (skb->next)
2572 goto gso;
6afff0ca 2573 } else {
02932ce9 2574 if (skb_needs_linearize(skb, features) &&
6afff0ca
JF
2575 __skb_linearize(skb))
2576 goto out_kfree_skb;
2577
2578 /* If packet is not checksummed and device does not
2579 * support checksumming for this protocol, complete
2580 * checksumming here.
2581 */
2582 if (skb->ip_summed == CHECKSUM_PARTIAL) {
fc70fb64
AD
2583 if (skb->encapsulation)
2584 skb_set_inner_transport_header(skb,
2585 skb_checksum_start_offset(skb));
2586 else
2587 skb_set_transport_header(skb,
2588 skb_checksum_start_offset(skb));
03634668 2589 if (!(features & NETIF_F_ALL_CSUM) &&
6afff0ca
JF
2590 skb_checksum_help(skb))
2591 goto out_kfree_skb;
2592 }
9ccb8975
DM
2593 }
2594
b40863c6
ED
2595 if (!list_empty(&ptype_all))
2596 dev_queue_xmit_nit(skb, dev);
2597
ec764bf0 2598 skb_len = skb->len;
a6cc0cfa
JF
2599 if (accel_priv)
2600 rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv);
2601 else
2602 rc = ops->ndo_start_xmit(skb, dev);
2603
ec764bf0 2604 trace_net_dev_xmit(skb, rc, dev, skb_len);
a6cc0cfa 2605 if (rc == NETDEV_TX_OK && txq)
08baf561 2606 txq_trans_update(txq);
ac45f602 2607 return rc;
f6a78bfc
HX
2608 }
2609
576a30eb 2610gso:
f6a78bfc
HX
2611 do {
2612 struct sk_buff *nskb = skb->next;
f6a78bfc
HX
2613
2614 skb->next = nskb->next;
2615 nskb->next = NULL;
068a2de5 2616
b40863c6
ED
2617 if (!list_empty(&ptype_all))
2618 dev_queue_xmit_nit(nskb, dev);
2619
ec764bf0 2620 skb_len = nskb->len;
a6cc0cfa
JF
2621 if (accel_priv)
2622 rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv);
2623 else
2624 rc = ops->ndo_start_xmit(nskb, dev);
ec764bf0 2625 trace_net_dev_xmit(nskb, rc, dev, skb_len);
ec634fe3 2626 if (unlikely(rc != NETDEV_TX_OK)) {
572a9d7b
PM
2627 if (rc & ~NETDEV_TX_MASK)
2628 goto out_kfree_gso_skb;
f54d9e8d 2629 nskb->next = skb->next;
f6a78bfc
HX
2630 skb->next = nskb;
2631 return rc;
2632 }
08baf561 2633 txq_trans_update(txq);
73466498 2634 if (unlikely(netif_xmit_stopped(txq) && skb->next))
f54d9e8d 2635 return NETDEV_TX_BUSY;
f6a78bfc 2636 } while (skb->next);
4ec93edb 2637
572a9d7b 2638out_kfree_gso_skb:
0c772159 2639 if (likely(skb->next == NULL)) {
572a9d7b 2640 skb->destructor = DEV_GSO_CB(skb)->destructor;
0c772159
SS
2641 consume_skb(skb);
2642 return rc;
2643 }
f6a78bfc
HX
2644out_kfree_skb:
2645 kfree_skb(skb);
7b9c6090 2646out:
572a9d7b 2647 return rc;
f6a78bfc 2648}
a6cc0cfa 2649EXPORT_SYMBOL_GPL(dev_hard_start_xmit);
f6a78bfc 2650
1def9238
ED
2651static void qdisc_pkt_len_init(struct sk_buff *skb)
2652{
2653 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2654
2655 qdisc_skb_cb(skb)->pkt_len = skb->len;
2656
2657 /* To get more precise estimation of bytes sent on wire,
2658 * we add to pkt_len the headers size of all segments
2659 */
2660 if (shinfo->gso_size) {
757b8b1d 2661 unsigned int hdr_len;
15e5a030 2662 u16 gso_segs = shinfo->gso_segs;
1def9238 2663
757b8b1d
ED
2664 /* mac layer + network layer */
2665 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2666
2667 /* + transport layer */
1def9238
ED
2668 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2669 hdr_len += tcp_hdrlen(skb);
2670 else
2671 hdr_len += sizeof(struct udphdr);
15e5a030
JW
2672
2673 if (shinfo->gso_type & SKB_GSO_DODGY)
2674 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2675 shinfo->gso_size);
2676
2677 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
1def9238
ED
2678 }
2679}
2680
bbd8a0d3
KK
2681static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2682 struct net_device *dev,
2683 struct netdev_queue *txq)
2684{
2685 spinlock_t *root_lock = qdisc_lock(q);
a2da570d 2686 bool contended;
bbd8a0d3
KK
2687 int rc;
2688
1def9238 2689 qdisc_pkt_len_init(skb);
a2da570d 2690 qdisc_calculate_pkt_len(skb, q);
79640a4c
ED
2691 /*
2692 * Heuristic to force contended enqueues to serialize on a
2693 * separate lock before trying to get qdisc main lock.
2694 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2695 * and dequeue packets faster.
2696 */
a2da570d 2697 contended = qdisc_is_running(q);
79640a4c
ED
2698 if (unlikely(contended))
2699 spin_lock(&q->busylock);
2700
bbd8a0d3
KK
2701 spin_lock(root_lock);
2702 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2703 kfree_skb(skb);
2704 rc = NET_XMIT_DROP;
2705 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
bc135b23 2706 qdisc_run_begin(q)) {
bbd8a0d3
KK
2707 /*
2708 * This is a work-conserving queue; there are no old skbs
2709 * waiting to be sent out; and the qdisc is not running -
2710 * xmit the skb directly.
2711 */
7fee226a
ED
2712 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2713 skb_dst_force(skb);
bfe0d029 2714
bfe0d029
ED
2715 qdisc_bstats_update(q, skb);
2716
79640a4c
ED
2717 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2718 if (unlikely(contended)) {
2719 spin_unlock(&q->busylock);
2720 contended = false;
2721 }
bbd8a0d3 2722 __qdisc_run(q);
79640a4c 2723 } else
bc135b23 2724 qdisc_run_end(q);
bbd8a0d3
KK
2725
2726 rc = NET_XMIT_SUCCESS;
2727 } else {
7fee226a 2728 skb_dst_force(skb);
a2da570d 2729 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
79640a4c
ED
2730 if (qdisc_run_begin(q)) {
2731 if (unlikely(contended)) {
2732 spin_unlock(&q->busylock);
2733 contended = false;
2734 }
2735 __qdisc_run(q);
2736 }
bbd8a0d3
KK
2737 }
2738 spin_unlock(root_lock);
79640a4c
ED
2739 if (unlikely(contended))
2740 spin_unlock(&q->busylock);
bbd8a0d3
KK
2741 return rc;
2742}
2743
86f8515f 2744#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
5bc1421e
NH
2745static void skb_update_prio(struct sk_buff *skb)
2746{
6977a79d 2747 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
5bc1421e 2748
91c68ce2
ED
2749 if (!skb->priority && skb->sk && map) {
2750 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2751
2752 if (prioidx < map->priomap_len)
2753 skb->priority = map->priomap[prioidx];
2754 }
5bc1421e
NH
2755}
2756#else
2757#define skb_update_prio(skb)
2758#endif
2759
745e20f1 2760static DEFINE_PER_CPU(int, xmit_recursion);
11a766ce 2761#define RECURSION_LIMIT 10
745e20f1 2762
95603e22
MM
2763/**
2764 * dev_loopback_xmit - loop back @skb
2765 * @skb: buffer to transmit
2766 */
2767int dev_loopback_xmit(struct sk_buff *skb)
2768{
2769 skb_reset_mac_header(skb);
2770 __skb_pull(skb, skb_network_offset(skb));
2771 skb->pkt_type = PACKET_LOOPBACK;
2772 skb->ip_summed = CHECKSUM_UNNECESSARY;
2773 WARN_ON(!skb_dst(skb));
2774 skb_dst_force(skb);
2775 netif_rx_ni(skb);
2776 return 0;
2777}
2778EXPORT_SYMBOL(dev_loopback_xmit);
2779
d29f749e
DJ
2780/**
2781 * dev_queue_xmit - transmit a buffer
2782 * @skb: buffer to transmit
2783 *
2784 * Queue a buffer for transmission to a network device. The caller must
2785 * have set the device and priority and built the buffer before calling
2786 * this function. The function can be called from an interrupt.
2787 *
2788 * A negative errno code is returned on a failure. A success does not
2789 * guarantee the frame will be transmitted as it may be dropped due
2790 * to congestion or traffic shaping.
2791 *
2792 * -----------------------------------------------------------------------------------
2793 * I notice this method can also return errors from the queue disciplines,
2794 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2795 * be positive.
2796 *
2797 * Regardless of the return value, the skb is consumed, so it is currently
2798 * difficult to retry a send to this method. (You can bump the ref count
2799 * before sending to hold a reference for retry if you are careful.)
2800 *
2801 * When calling this method, interrupts MUST be enabled. This is because
2802 * the BH enable code must have IRQs enabled so that it will not deadlock.
2803 * --BLG
2804 */
1da177e4
LT
2805int dev_queue_xmit(struct sk_buff *skb)
2806{
2807 struct net_device *dev = skb->dev;
dc2b4847 2808 struct netdev_queue *txq;
1da177e4
LT
2809 struct Qdisc *q;
2810 int rc = -ENOMEM;
2811
6d1ccff6
ED
2812 skb_reset_mac_header(skb);
2813
4ec93edb
YH
2814 /* Disable soft irqs for various locks below. Also
2815 * stops preemption for RCU.
1da177e4 2816 */
4ec93edb 2817 rcu_read_lock_bh();
1da177e4 2818
5bc1421e
NH
2819 skb_update_prio(skb);
2820
8c4c49df 2821 txq = netdev_pick_tx(dev, skb);
a898def2 2822 q = rcu_dereference_bh(txq->qdisc);
37437bb2 2823
1da177e4 2824#ifdef CONFIG_NET_CLS_ACT
d1b19dff 2825 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
1da177e4 2826#endif
cf66ba58 2827 trace_net_dev_queue(skb);
1da177e4 2828 if (q->enqueue) {
bbd8a0d3 2829 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 2830 goto out;
1da177e4
LT
2831 }
2832
2833 /* The device has no queue. Common case for software devices:
2834 loopback, all the sorts of tunnels...
2835
932ff279
HX
2836 Really, it is unlikely that netif_tx_lock protection is necessary
2837 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1da177e4
LT
2838 counters.)
2839 However, it is possible, that they rely on protection
2840 made by us here.
2841
2842 Check this and shot the lock. It is not prone from deadlocks.
2843 Either shot noqueue qdisc, it is even simpler 8)
2844 */
2845 if (dev->flags & IFF_UP) {
2846 int cpu = smp_processor_id(); /* ok because BHs are off */
2847
c773e847 2848 if (txq->xmit_lock_owner != cpu) {
1da177e4 2849
745e20f1
ED
2850 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2851 goto recursion_alert;
2852
c773e847 2853 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 2854
73466498 2855 if (!netif_xmit_stopped(txq)) {
745e20f1 2856 __this_cpu_inc(xmit_recursion);
a6cc0cfa 2857 rc = dev_hard_start_xmit(skb, dev, txq, NULL);
745e20f1 2858 __this_cpu_dec(xmit_recursion);
572a9d7b 2859 if (dev_xmit_complete(rc)) {
c773e847 2860 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
2861 goto out;
2862 }
2863 }
c773e847 2864 HARD_TX_UNLOCK(dev, txq);
e87cc472
JP
2865 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2866 dev->name);
1da177e4
LT
2867 } else {
2868 /* Recursion is detected! It is possible,
745e20f1
ED
2869 * unfortunately
2870 */
2871recursion_alert:
e87cc472
JP
2872 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2873 dev->name);
1da177e4
LT
2874 }
2875 }
2876
2877 rc = -ENETDOWN;
d4828d85 2878 rcu_read_unlock_bh();
1da177e4 2879
1da177e4
LT
2880 kfree_skb(skb);
2881 return rc;
2882out:
d4828d85 2883 rcu_read_unlock_bh();
1da177e4
LT
2884 return rc;
2885}
d1b19dff 2886EXPORT_SYMBOL(dev_queue_xmit);
1da177e4
LT
2887
2888
2889/*=======================================================================
2890 Receiver routines
2891 =======================================================================*/
2892
6b2bedc3 2893int netdev_max_backlog __read_mostly = 1000;
c9e6bc64
ED
2894EXPORT_SYMBOL(netdev_max_backlog);
2895
3b098e2d 2896int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3
SH
2897int netdev_budget __read_mostly = 300;
2898int weight_p __read_mostly = 64; /* old backlog weight */
1da177e4 2899
eecfd7c4
ED
2900/* Called with irq disabled */
2901static inline void ____napi_schedule(struct softnet_data *sd,
2902 struct napi_struct *napi)
2903{
2904 list_add_tail(&napi->poll_list, &sd->poll_list);
2905 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2906}
2907
bfb564e7
KK
2908#ifdef CONFIG_RPS
2909
2910/* One global table that all flow-based protocols share. */
6e3f7faf 2911struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
bfb564e7
KK
2912EXPORT_SYMBOL(rps_sock_flow_table);
2913
c5905afb 2914struct static_key rps_needed __read_mostly;
adc9300e 2915
c445477d
BH
2916static struct rps_dev_flow *
2917set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2918 struct rps_dev_flow *rflow, u16 next_cpu)
2919{
09994d1b 2920 if (next_cpu != RPS_NO_CPU) {
c445477d
BH
2921#ifdef CONFIG_RFS_ACCEL
2922 struct netdev_rx_queue *rxqueue;
2923 struct rps_dev_flow_table *flow_table;
2924 struct rps_dev_flow *old_rflow;
2925 u32 flow_id;
2926 u16 rxq_index;
2927 int rc;
2928
2929 /* Should we steer this flow to a different hardware queue? */
69a19ee6
BH
2930 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2931 !(dev->features & NETIF_F_NTUPLE))
c445477d
BH
2932 goto out;
2933 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2934 if (rxq_index == skb_get_rx_queue(skb))
2935 goto out;
2936
2937 rxqueue = dev->_rx + rxq_index;
2938 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2939 if (!flow_table)
2940 goto out;
2941 flow_id = skb->rxhash & flow_table->mask;
2942 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2943 rxq_index, flow_id);
2944 if (rc < 0)
2945 goto out;
2946 old_rflow = rflow;
2947 rflow = &flow_table->flows[flow_id];
c445477d
BH
2948 rflow->filter = rc;
2949 if (old_rflow->filter == rflow->filter)
2950 old_rflow->filter = RPS_NO_FILTER;
2951 out:
2952#endif
2953 rflow->last_qtail =
09994d1b 2954 per_cpu(softnet_data, next_cpu).input_queue_head;
c445477d
BH
2955 }
2956
09994d1b 2957 rflow->cpu = next_cpu;
c445477d
BH
2958 return rflow;
2959}
2960
bfb564e7
KK
2961/*
2962 * get_rps_cpu is called from netif_receive_skb and returns the target
2963 * CPU from the RPS map of the receiving queue for a given skb.
2964 * rcu_read_lock must be held on entry.
2965 */
2966static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2967 struct rps_dev_flow **rflowp)
2968{
2969 struct netdev_rx_queue *rxqueue;
6e3f7faf 2970 struct rps_map *map;
bfb564e7
KK
2971 struct rps_dev_flow_table *flow_table;
2972 struct rps_sock_flow_table *sock_flow_table;
2973 int cpu = -1;
2974 u16 tcpu;
2975
2976 if (skb_rx_queue_recorded(skb)) {
2977 u16 index = skb_get_rx_queue(skb);
62fe0b40
BH
2978 if (unlikely(index >= dev->real_num_rx_queues)) {
2979 WARN_ONCE(dev->real_num_rx_queues > 1,
2980 "%s received packet on queue %u, but number "
2981 "of RX queues is %u\n",
2982 dev->name, index, dev->real_num_rx_queues);
bfb564e7
KK
2983 goto done;
2984 }
2985 rxqueue = dev->_rx + index;
2986 } else
2987 rxqueue = dev->_rx;
2988
6e3f7faf
ED
2989 map = rcu_dereference(rxqueue->rps_map);
2990 if (map) {
85875236 2991 if (map->len == 1 &&
33d480ce 2992 !rcu_access_pointer(rxqueue->rps_flow_table)) {
6febfca9
CG
2993 tcpu = map->cpus[0];
2994 if (cpu_online(tcpu))
2995 cpu = tcpu;
2996 goto done;
2997 }
33d480ce 2998 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
bfb564e7 2999 goto done;
6febfca9 3000 }
bfb564e7 3001
2d47b459 3002 skb_reset_network_header(skb);
3958afa1 3003 if (!skb_get_hash(skb))
bfb564e7
KK
3004 goto done;
3005
fec5e652
TH
3006 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3007 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3008 if (flow_table && sock_flow_table) {
3009 u16 next_cpu;
3010 struct rps_dev_flow *rflow;
3011
3012 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
3013 tcpu = rflow->cpu;
3014
3015 next_cpu = sock_flow_table->ents[skb->rxhash &
3016 sock_flow_table->mask];
3017
3018 /*
3019 * If the desired CPU (where last recvmsg was done) is
3020 * different from current CPU (one in the rx-queue flow
3021 * table entry), switch if one of the following holds:
3022 * - Current CPU is unset (equal to RPS_NO_CPU).
3023 * - Current CPU is offline.
3024 * - The current CPU's queue tail has advanced beyond the
3025 * last packet that was enqueued using this table entry.
3026 * This guarantees that all previous packets for the flow
3027 * have been dequeued, thus preserving in order delivery.
3028 */
3029 if (unlikely(tcpu != next_cpu) &&
3030 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3031 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
baefa31d
TH
3032 rflow->last_qtail)) >= 0)) {
3033 tcpu = next_cpu;
c445477d 3034 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
baefa31d 3035 }
c445477d 3036
fec5e652
TH
3037 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3038 *rflowp = rflow;
3039 cpu = tcpu;
3040 goto done;
3041 }
3042 }
3043
0a9627f2 3044 if (map) {
fec5e652 3045 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
0a9627f2
TH
3046
3047 if (cpu_online(tcpu)) {
3048 cpu = tcpu;
3049 goto done;
3050 }
3051 }
3052
3053done:
0a9627f2
TH
3054 return cpu;
3055}
3056
c445477d
BH
3057#ifdef CONFIG_RFS_ACCEL
3058
3059/**
3060 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3061 * @dev: Device on which the filter was set
3062 * @rxq_index: RX queue index
3063 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3064 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3065 *
3066 * Drivers that implement ndo_rx_flow_steer() should periodically call
3067 * this function for each installed filter and remove the filters for
3068 * which it returns %true.
3069 */
3070bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3071 u32 flow_id, u16 filter_id)
3072{
3073 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3074 struct rps_dev_flow_table *flow_table;
3075 struct rps_dev_flow *rflow;
3076 bool expire = true;
3077 int cpu;
3078
3079 rcu_read_lock();
3080 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3081 if (flow_table && flow_id <= flow_table->mask) {
3082 rflow = &flow_table->flows[flow_id];
3083 cpu = ACCESS_ONCE(rflow->cpu);
3084 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3085 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3086 rflow->last_qtail) <
3087 (int)(10 * flow_table->mask)))
3088 expire = false;
3089 }
3090 rcu_read_unlock();
3091 return expire;
3092}
3093EXPORT_SYMBOL(rps_may_expire_flow);
3094
3095#endif /* CONFIG_RFS_ACCEL */
3096
0a9627f2 3097/* Called from hardirq (IPI) context */
e36fa2f7 3098static void rps_trigger_softirq(void *data)
0a9627f2 3099{
e36fa2f7
ED
3100 struct softnet_data *sd = data;
3101
eecfd7c4 3102 ____napi_schedule(sd, &sd->backlog);
dee42870 3103 sd->received_rps++;
0a9627f2 3104}
e36fa2f7 3105
fec5e652 3106#endif /* CONFIG_RPS */
0a9627f2 3107
e36fa2f7
ED
3108/*
3109 * Check if this softnet_data structure is another cpu one
3110 * If yes, queue it to our IPI list and return 1
3111 * If no, return 0
3112 */
3113static int rps_ipi_queued(struct softnet_data *sd)
3114{
3115#ifdef CONFIG_RPS
3116 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3117
3118 if (sd != mysd) {
3119 sd->rps_ipi_next = mysd->rps_ipi_list;
3120 mysd->rps_ipi_list = sd;
3121
3122 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3123 return 1;
3124 }
3125#endif /* CONFIG_RPS */
3126 return 0;
3127}
3128
99bbc707
WB
3129#ifdef CONFIG_NET_FLOW_LIMIT
3130int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3131#endif
3132
3133static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3134{
3135#ifdef CONFIG_NET_FLOW_LIMIT
3136 struct sd_flow_limit *fl;
3137 struct softnet_data *sd;
3138 unsigned int old_flow, new_flow;
3139
3140 if (qlen < (netdev_max_backlog >> 1))
3141 return false;
3142
3143 sd = &__get_cpu_var(softnet_data);
3144
3145 rcu_read_lock();
3146 fl = rcu_dereference(sd->flow_limit);
3147 if (fl) {
3958afa1 3148 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
99bbc707
WB
3149 old_flow = fl->history[fl->history_head];
3150 fl->history[fl->history_head] = new_flow;
3151
3152 fl->history_head++;
3153 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3154
3155 if (likely(fl->buckets[old_flow]))
3156 fl->buckets[old_flow]--;
3157
3158 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3159 fl->count++;
3160 rcu_read_unlock();
3161 return true;
3162 }
3163 }
3164 rcu_read_unlock();
3165#endif
3166 return false;
3167}
3168
0a9627f2
TH
3169/*
3170 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3171 * queue (may be a remote CPU queue).
3172 */
fec5e652
TH
3173static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3174 unsigned int *qtail)
0a9627f2 3175{
e36fa2f7 3176 struct softnet_data *sd;
0a9627f2 3177 unsigned long flags;
99bbc707 3178 unsigned int qlen;
0a9627f2 3179
e36fa2f7 3180 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
3181
3182 local_irq_save(flags);
0a9627f2 3183
e36fa2f7 3184 rps_lock(sd);
99bbc707
WB
3185 qlen = skb_queue_len(&sd->input_pkt_queue);
3186 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
6e7676c1 3187 if (skb_queue_len(&sd->input_pkt_queue)) {
0a9627f2 3188enqueue:
e36fa2f7 3189 __skb_queue_tail(&sd->input_pkt_queue, skb);
76cc8b13 3190 input_queue_tail_incr_save(sd, qtail);
e36fa2f7 3191 rps_unlock(sd);
152102c7 3192 local_irq_restore(flags);
0a9627f2
TH
3193 return NET_RX_SUCCESS;
3194 }
3195
ebda37c2
ED
3196 /* Schedule NAPI for backlog device
3197 * We can use non atomic operation since we own the queue lock
3198 */
3199 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
e36fa2f7 3200 if (!rps_ipi_queued(sd))
eecfd7c4 3201 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
3202 }
3203 goto enqueue;
3204 }
3205
dee42870 3206 sd->dropped++;
e36fa2f7 3207 rps_unlock(sd);
0a9627f2 3208
0a9627f2
TH
3209 local_irq_restore(flags);
3210
caf586e5 3211 atomic_long_inc(&skb->dev->rx_dropped);
0a9627f2
TH
3212 kfree_skb(skb);
3213 return NET_RX_DROP;
3214}
1da177e4 3215
1da177e4
LT
3216/**
3217 * netif_rx - post buffer to the network code
3218 * @skb: buffer to post
3219 *
3220 * This function receives a packet from a device driver and queues it for
3221 * the upper (protocol) levels to process. It always succeeds. The buffer
3222 * may be dropped during processing for congestion control or by the
3223 * protocol layers.
3224 *
3225 * return values:
3226 * NET_RX_SUCCESS (no congestion)
1da177e4
LT
3227 * NET_RX_DROP (packet was dropped)
3228 *
3229 */
3230
3231int netif_rx(struct sk_buff *skb)
3232{
b0e28f1e 3233 int ret;
1da177e4
LT
3234
3235 /* if netpoll wants it, pretend we never saw it */
3236 if (netpoll_rx(skb))
3237 return NET_RX_DROP;
3238
588f0330 3239 net_timestamp_check(netdev_tstamp_prequeue, skb);
1da177e4 3240
cf66ba58 3241 trace_netif_rx(skb);
df334545 3242#ifdef CONFIG_RPS
c5905afb 3243 if (static_key_false(&rps_needed)) {
fec5e652 3244 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
3245 int cpu;
3246
cece1945 3247 preempt_disable();
b0e28f1e 3248 rcu_read_lock();
fec5e652
TH
3249
3250 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
3251 if (cpu < 0)
3252 cpu = smp_processor_id();
fec5e652
TH
3253
3254 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3255
b0e28f1e 3256 rcu_read_unlock();
cece1945 3257 preempt_enable();
adc9300e
ED
3258 } else
3259#endif
fec5e652
TH
3260 {
3261 unsigned int qtail;
3262 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3263 put_cpu();
3264 }
b0e28f1e 3265 return ret;
1da177e4 3266}
d1b19dff 3267EXPORT_SYMBOL(netif_rx);
1da177e4
LT
3268
3269int netif_rx_ni(struct sk_buff *skb)
3270{
3271 int err;
3272
3273 preempt_disable();
3274 err = netif_rx(skb);
3275 if (local_softirq_pending())
3276 do_softirq();
3277 preempt_enable();
3278
3279 return err;
3280}
1da177e4
LT
3281EXPORT_SYMBOL(netif_rx_ni);
3282
1da177e4
LT
3283static void net_tx_action(struct softirq_action *h)
3284{
3285 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3286
3287 if (sd->completion_queue) {
3288 struct sk_buff *clist;
3289
3290 local_irq_disable();
3291 clist = sd->completion_queue;
3292 sd->completion_queue = NULL;
3293 local_irq_enable();
3294
3295 while (clist) {
3296 struct sk_buff *skb = clist;
3297 clist = clist->next;
3298
547b792c 3299 WARN_ON(atomic_read(&skb->users));
e6247027
ED
3300 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3301 trace_consume_skb(skb);
3302 else
3303 trace_kfree_skb(skb, net_tx_action);
1da177e4
LT
3304 __kfree_skb(skb);
3305 }
3306 }
3307
3308 if (sd->output_queue) {
37437bb2 3309 struct Qdisc *head;
1da177e4
LT
3310
3311 local_irq_disable();
3312 head = sd->output_queue;
3313 sd->output_queue = NULL;
a9cbd588 3314 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
3315 local_irq_enable();
3316
3317 while (head) {
37437bb2
DM
3318 struct Qdisc *q = head;
3319 spinlock_t *root_lock;
3320
1da177e4
LT
3321 head = head->next_sched;
3322
5fb66229 3323 root_lock = qdisc_lock(q);
37437bb2 3324 if (spin_trylock(root_lock)) {
def82a1d
JP
3325 smp_mb__before_clear_bit();
3326 clear_bit(__QDISC_STATE_SCHED,
3327 &q->state);
37437bb2
DM
3328 qdisc_run(q);
3329 spin_unlock(root_lock);
1da177e4 3330 } else {
195648bb 3331 if (!test_bit(__QDISC_STATE_DEACTIVATED,
e8a83e10 3332 &q->state)) {
195648bb 3333 __netif_reschedule(q);
e8a83e10
JP
3334 } else {
3335 smp_mb__before_clear_bit();
3336 clear_bit(__QDISC_STATE_SCHED,
3337 &q->state);
3338 }
1da177e4
LT
3339 }
3340 }
3341 }
3342}
3343
ab95bfe0
JP
3344#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3345 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
da678292
MM
3346/* This hook is defined here for ATM LANE */
3347int (*br_fdb_test_addr_hook)(struct net_device *dev,
3348 unsigned char *addr) __read_mostly;
4fb019a0 3349EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 3350#endif
1da177e4 3351
1da177e4
LT
3352#ifdef CONFIG_NET_CLS_ACT
3353/* TODO: Maybe we should just force sch_ingress to be compiled in
3354 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3355 * a compare and 2 stores extra right now if we dont have it on
3356 * but have CONFIG_NET_CLS_ACT
25985edc
LDM
3357 * NOTE: This doesn't stop any functionality; if you dont have
3358 * the ingress scheduler, you just can't add policies on ingress.
1da177e4
LT
3359 *
3360 */
24824a09 3361static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
1da177e4 3362{
1da177e4 3363 struct net_device *dev = skb->dev;
f697c3e8 3364 u32 ttl = G_TC_RTTL(skb->tc_verd);
555353cf
DM
3365 int result = TC_ACT_OK;
3366 struct Qdisc *q;
4ec93edb 3367
de384830 3368 if (unlikely(MAX_RED_LOOP < ttl++)) {
e87cc472
JP
3369 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3370 skb->skb_iif, dev->ifindex);
f697c3e8
HX
3371 return TC_ACT_SHOT;
3372 }
1da177e4 3373
f697c3e8
HX
3374 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3375 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
1da177e4 3376
83874000 3377 q = rxq->qdisc;
8d50b53d 3378 if (q != &noop_qdisc) {
83874000 3379 spin_lock(qdisc_lock(q));
a9312ae8
DM
3380 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3381 result = qdisc_enqueue_root(skb, q);
83874000
DM
3382 spin_unlock(qdisc_lock(q));
3383 }
f697c3e8
HX
3384
3385 return result;
3386}
86e65da9 3387
f697c3e8
HX
3388static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3389 struct packet_type **pt_prev,
3390 int *ret, struct net_device *orig_dev)
3391{
24824a09
ED
3392 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3393
3394 if (!rxq || rxq->qdisc == &noop_qdisc)
f697c3e8 3395 goto out;
1da177e4 3396
f697c3e8
HX
3397 if (*pt_prev) {
3398 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3399 *pt_prev = NULL;
1da177e4
LT
3400 }
3401
24824a09 3402 switch (ing_filter(skb, rxq)) {
f697c3e8
HX
3403 case TC_ACT_SHOT:
3404 case TC_ACT_STOLEN:
3405 kfree_skb(skb);
3406 return NULL;
3407 }
3408
3409out:
3410 skb->tc_verd = 0;
3411 return skb;
1da177e4
LT
3412}
3413#endif
3414
ab95bfe0
JP
3415/**
3416 * netdev_rx_handler_register - register receive handler
3417 * @dev: device to register a handler for
3418 * @rx_handler: receive handler to register
93e2c32b 3419 * @rx_handler_data: data pointer that is used by rx handler
ab95bfe0
JP
3420 *
3421 * Register a receive hander for a device. This handler will then be
3422 * called from __netif_receive_skb. A negative errno code is returned
3423 * on a failure.
3424 *
3425 * The caller must hold the rtnl_mutex.
8a4eb573
JP
3426 *
3427 * For a general description of rx_handler, see enum rx_handler_result.
ab95bfe0
JP
3428 */
3429int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
3430 rx_handler_func_t *rx_handler,
3431 void *rx_handler_data)
ab95bfe0
JP
3432{
3433 ASSERT_RTNL();
3434
3435 if (dev->rx_handler)
3436 return -EBUSY;
3437
00cfec37 3438 /* Note: rx_handler_data must be set before rx_handler */
93e2c32b 3439 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
ab95bfe0
JP
3440 rcu_assign_pointer(dev->rx_handler, rx_handler);
3441
3442 return 0;
3443}
3444EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3445
3446/**
3447 * netdev_rx_handler_unregister - unregister receive handler
3448 * @dev: device to unregister a handler from
3449 *
166ec369 3450 * Unregister a receive handler from a device.
ab95bfe0
JP
3451 *
3452 * The caller must hold the rtnl_mutex.
3453 */
3454void netdev_rx_handler_unregister(struct net_device *dev)
3455{
3456
3457 ASSERT_RTNL();
a9b3cd7f 3458 RCU_INIT_POINTER(dev->rx_handler, NULL);
00cfec37
ED
3459 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3460 * section has a guarantee to see a non NULL rx_handler_data
3461 * as well.
3462 */
3463 synchronize_net();
a9b3cd7f 3464 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
ab95bfe0
JP
3465}
3466EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3467
b4b9e355
MG
3468/*
3469 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3470 * the special handling of PFMEMALLOC skbs.
3471 */
3472static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3473{
3474 switch (skb->protocol) {
3475 case __constant_htons(ETH_P_ARP):
3476 case __constant_htons(ETH_P_IP):
3477 case __constant_htons(ETH_P_IPV6):
3478 case __constant_htons(ETH_P_8021Q):
8ad227ff 3479 case __constant_htons(ETH_P_8021AD):
b4b9e355
MG
3480 return true;
3481 default:
3482 return false;
3483 }
3484}
3485
9754e293 3486static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
1da177e4
LT
3487{
3488 struct packet_type *ptype, *pt_prev;
ab95bfe0 3489 rx_handler_func_t *rx_handler;
f2ccd8fa 3490 struct net_device *orig_dev;
63d8ea7f 3491 struct net_device *null_or_dev;
8a4eb573 3492 bool deliver_exact = false;
1da177e4 3493 int ret = NET_RX_DROP;
252e3346 3494 __be16 type;
1da177e4 3495
588f0330 3496 net_timestamp_check(!netdev_tstamp_prequeue, skb);
81bbb3d4 3497
cf66ba58 3498 trace_netif_receive_skb(skb);
9b22ea56 3499
1da177e4 3500 /* if we've gotten here through NAPI, check netpoll */
bea3348e 3501 if (netpoll_receive_skb(skb))
b4b9e355 3502 goto out;
1da177e4 3503
cc9bd5ce 3504 orig_dev = skb->dev;
8f903c70 3505
c1d2bbe1 3506 skb_reset_network_header(skb);
fda55eca
ED
3507 if (!skb_transport_header_was_set(skb))
3508 skb_reset_transport_header(skb);
0b5c9db1 3509 skb_reset_mac_len(skb);
1da177e4
LT
3510
3511 pt_prev = NULL;
3512
3513 rcu_read_lock();
3514
63d8ea7f 3515another_round:
b6858177 3516 skb->skb_iif = skb->dev->ifindex;
63d8ea7f
DM
3517
3518 __this_cpu_inc(softnet_data.processed);
3519
8ad227ff
PM
3520 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3521 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
bcc6d479
JP
3522 skb = vlan_untag(skb);
3523 if (unlikely(!skb))
b4b9e355 3524 goto unlock;
bcc6d479
JP
3525 }
3526
1da177e4
LT
3527#ifdef CONFIG_NET_CLS_ACT
3528 if (skb->tc_verd & TC_NCLS) {
3529 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3530 goto ncls;
3531 }
3532#endif
3533
9754e293 3534 if (pfmemalloc)
b4b9e355
MG
3535 goto skip_taps;
3536
1da177e4 3537 list_for_each_entry_rcu(ptype, &ptype_all, list) {
63d8ea7f 3538 if (!ptype->dev || ptype->dev == skb->dev) {
4ec93edb 3539 if (pt_prev)
f2ccd8fa 3540 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
3541 pt_prev = ptype;
3542 }
3543 }
3544
b4b9e355 3545skip_taps:
1da177e4 3546#ifdef CONFIG_NET_CLS_ACT
f697c3e8
HX
3547 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3548 if (!skb)
b4b9e355 3549 goto unlock;
1da177e4
LT
3550ncls:
3551#endif
3552
9754e293 3553 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
b4b9e355
MG
3554 goto drop;
3555
2425717b
JF
3556 if (vlan_tx_tag_present(skb)) {
3557 if (pt_prev) {
3558 ret = deliver_skb(skb, pt_prev, orig_dev);
3559 pt_prev = NULL;
3560 }
48cc32d3 3561 if (vlan_do_receive(&skb))
2425717b
JF
3562 goto another_round;
3563 else if (unlikely(!skb))
b4b9e355 3564 goto unlock;
2425717b
JF
3565 }
3566
48cc32d3 3567 rx_handler = rcu_dereference(skb->dev->rx_handler);
ab95bfe0
JP
3568 if (rx_handler) {
3569 if (pt_prev) {
3570 ret = deliver_skb(skb, pt_prev, orig_dev);
3571 pt_prev = NULL;
3572 }
8a4eb573
JP
3573 switch (rx_handler(&skb)) {
3574 case RX_HANDLER_CONSUMED:
3bc1b1ad 3575 ret = NET_RX_SUCCESS;
b4b9e355 3576 goto unlock;
8a4eb573 3577 case RX_HANDLER_ANOTHER:
63d8ea7f 3578 goto another_round;
8a4eb573
JP
3579 case RX_HANDLER_EXACT:
3580 deliver_exact = true;
3581 case RX_HANDLER_PASS:
3582 break;
3583 default:
3584 BUG();
3585 }
ab95bfe0 3586 }
1da177e4 3587
d4b812de
ED
3588 if (unlikely(vlan_tx_tag_present(skb))) {
3589 if (vlan_tx_tag_get_id(skb))
3590 skb->pkt_type = PACKET_OTHERHOST;
3591 /* Note: we might in the future use prio bits
3592 * and set skb->priority like in vlan_do_receive()
3593 * For the time being, just ignore Priority Code Point
3594 */
3595 skb->vlan_tci = 0;
3596 }
48cc32d3 3597
63d8ea7f 3598 /* deliver only exact match when indicated */
8a4eb573 3599 null_or_dev = deliver_exact ? skb->dev : NULL;
1f3c8804 3600
1da177e4 3601 type = skb->protocol;
82d8a867
PE
3602 list_for_each_entry_rcu(ptype,
3603 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
63d8ea7f 3604 if (ptype->type == type &&
e3f48d37
JP
3605 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3606 ptype->dev == orig_dev)) {
4ec93edb 3607 if (pt_prev)
f2ccd8fa 3608 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
3609 pt_prev = ptype;
3610 }
3611 }
3612
3613 if (pt_prev) {
1080e512 3614 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
0e698bf6 3615 goto drop;
1080e512
MT
3616 else
3617 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4 3618 } else {
b4b9e355 3619drop:
caf586e5 3620 atomic_long_inc(&skb->dev->rx_dropped);
1da177e4
LT
3621 kfree_skb(skb);
3622 /* Jamal, now you will not able to escape explaining
3623 * me how you were going to use this. :-)
3624 */
3625 ret = NET_RX_DROP;
3626 }
3627
b4b9e355 3628unlock:
1da177e4 3629 rcu_read_unlock();
b4b9e355 3630out:
9754e293
DM
3631 return ret;
3632}
3633
3634static int __netif_receive_skb(struct sk_buff *skb)
3635{
3636 int ret;
3637
3638 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3639 unsigned long pflags = current->flags;
3640
3641 /*
3642 * PFMEMALLOC skbs are special, they should
3643 * - be delivered to SOCK_MEMALLOC sockets only
3644 * - stay away from userspace
3645 * - have bounded memory usage
3646 *
3647 * Use PF_MEMALLOC as this saves us from propagating the allocation
3648 * context down to all allocation sites.
3649 */
3650 current->flags |= PF_MEMALLOC;
3651 ret = __netif_receive_skb_core(skb, true);
3652 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3653 } else
3654 ret = __netif_receive_skb_core(skb, false);
3655
1da177e4
LT
3656 return ret;
3657}
0a9627f2
TH
3658
3659/**
3660 * netif_receive_skb - process receive buffer from network
3661 * @skb: buffer to process
3662 *
3663 * netif_receive_skb() is the main receive data processing function.
3664 * It always succeeds. The buffer may be dropped during processing
3665 * for congestion control or by the protocol layers.
3666 *
3667 * This function may only be called from softirq context and interrupts
3668 * should be enabled.
3669 *
3670 * Return values (usually ignored):
3671 * NET_RX_SUCCESS: no congestion
3672 * NET_RX_DROP: packet was dropped
3673 */
3674int netif_receive_skb(struct sk_buff *skb)
3675{
588f0330 3676 net_timestamp_check(netdev_tstamp_prequeue, skb);
3b098e2d 3677
c1f19b51
RC
3678 if (skb_defer_rx_timestamp(skb))
3679 return NET_RX_SUCCESS;
3680
df334545 3681#ifdef CONFIG_RPS
c5905afb 3682 if (static_key_false(&rps_needed)) {
3b098e2d
ED
3683 struct rps_dev_flow voidflow, *rflow = &voidflow;
3684 int cpu, ret;
fec5e652 3685
3b098e2d
ED
3686 rcu_read_lock();
3687
3688 cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 3689
3b098e2d
ED
3690 if (cpu >= 0) {
3691 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3692 rcu_read_unlock();
adc9300e 3693 return ret;
3b098e2d 3694 }
adc9300e 3695 rcu_read_unlock();
fec5e652 3696 }
1e94d72f 3697#endif
adc9300e 3698 return __netif_receive_skb(skb);
0a9627f2 3699}
d1b19dff 3700EXPORT_SYMBOL(netif_receive_skb);
1da177e4 3701
88751275
ED
3702/* Network device is going away, flush any packets still pending
3703 * Called with irqs disabled.
3704 */
152102c7 3705static void flush_backlog(void *arg)
6e583ce5 3706{
152102c7 3707 struct net_device *dev = arg;
e36fa2f7 3708 struct softnet_data *sd = &__get_cpu_var(softnet_data);
6e583ce5
SH
3709 struct sk_buff *skb, *tmp;
3710
e36fa2f7 3711 rps_lock(sd);
6e7676c1 3712 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
6e583ce5 3713 if (skb->dev == dev) {
e36fa2f7 3714 __skb_unlink(skb, &sd->input_pkt_queue);
6e583ce5 3715 kfree_skb(skb);
76cc8b13 3716 input_queue_head_incr(sd);
6e583ce5 3717 }
6e7676c1 3718 }
e36fa2f7 3719 rps_unlock(sd);
6e7676c1
CG
3720
3721 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3722 if (skb->dev == dev) {
3723 __skb_unlink(skb, &sd->process_queue);
3724 kfree_skb(skb);
76cc8b13 3725 input_queue_head_incr(sd);
6e7676c1
CG
3726 }
3727 }
6e583ce5
SH
3728}
3729
d565b0a1
HX
3730static int napi_gro_complete(struct sk_buff *skb)
3731{
22061d80 3732 struct packet_offload *ptype;
d565b0a1 3733 __be16 type = skb->protocol;
22061d80 3734 struct list_head *head = &offload_base;
d565b0a1
HX
3735 int err = -ENOENT;
3736
c3c7c254
ED
3737 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3738
fc59f9a3
HX
3739 if (NAPI_GRO_CB(skb)->count == 1) {
3740 skb_shinfo(skb)->gso_size = 0;
d565b0a1 3741 goto out;
fc59f9a3 3742 }
d565b0a1
HX
3743
3744 rcu_read_lock();
3745 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 3746 if (ptype->type != type || !ptype->callbacks.gro_complete)
d565b0a1
HX
3747 continue;
3748
299603e8 3749 err = ptype->callbacks.gro_complete(skb, 0);
d565b0a1
HX
3750 break;
3751 }
3752 rcu_read_unlock();
3753
3754 if (err) {
3755 WARN_ON(&ptype->list == head);
3756 kfree_skb(skb);
3757 return NET_RX_SUCCESS;
3758 }
3759
3760out:
d565b0a1
HX
3761 return netif_receive_skb(skb);
3762}
3763
2e71a6f8
ED
3764/* napi->gro_list contains packets ordered by age.
3765 * youngest packets at the head of it.
3766 * Complete skbs in reverse order to reduce latencies.
3767 */
3768void napi_gro_flush(struct napi_struct *napi, bool flush_old)
d565b0a1 3769{
2e71a6f8 3770 struct sk_buff *skb, *prev = NULL;
d565b0a1 3771
2e71a6f8
ED
3772 /* scan list and build reverse chain */
3773 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3774 skb->prev = prev;
3775 prev = skb;
3776 }
3777
3778 for (skb = prev; skb; skb = prev) {
d565b0a1 3779 skb->next = NULL;
2e71a6f8
ED
3780
3781 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3782 return;
3783
3784 prev = skb->prev;
d565b0a1 3785 napi_gro_complete(skb);
2e71a6f8 3786 napi->gro_count--;
d565b0a1
HX
3787 }
3788
3789 napi->gro_list = NULL;
3790}
86cac58b 3791EXPORT_SYMBOL(napi_gro_flush);
d565b0a1 3792
89c5fa33
ED
3793static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3794{
3795 struct sk_buff *p;
3796 unsigned int maclen = skb->dev->hard_header_len;
3797
3798 for (p = napi->gro_list; p; p = p->next) {
3799 unsigned long diffs;
3800
3801 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3802 diffs |= p->vlan_tci ^ skb->vlan_tci;
3803 if (maclen == ETH_HLEN)
3804 diffs |= compare_ether_header(skb_mac_header(p),
3805 skb_gro_mac_header(skb));
3806 else if (!diffs)
3807 diffs = memcmp(skb_mac_header(p),
3808 skb_gro_mac_header(skb),
3809 maclen);
3810 NAPI_GRO_CB(p)->same_flow = !diffs;
3811 NAPI_GRO_CB(p)->flush = 0;
3812 }
3813}
3814
299603e8
JC
3815static void skb_gro_reset_offset(struct sk_buff *skb)
3816{
3817 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3818 const skb_frag_t *frag0 = &pinfo->frags[0];
3819
3820 NAPI_GRO_CB(skb)->data_offset = 0;
3821 NAPI_GRO_CB(skb)->frag0 = NULL;
3822 NAPI_GRO_CB(skb)->frag0_len = 0;
3823
3824 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
3825 pinfo->nr_frags &&
3826 !PageHighMem(skb_frag_page(frag0))) {
3827 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3828 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
3829 }
3830}
3831
bb728820 3832static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1
HX
3833{
3834 struct sk_buff **pp = NULL;
22061d80 3835 struct packet_offload *ptype;
d565b0a1 3836 __be16 type = skb->protocol;
22061d80 3837 struct list_head *head = &offload_base;
0da2afd5 3838 int same_flow;
5b252f0c 3839 enum gro_result ret;
d565b0a1 3840
ce9e76c8 3841 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
d565b0a1
HX
3842 goto normal;
3843
21dc3301 3844 if (skb_is_gso(skb) || skb_has_frag_list(skb))
f17f5c91
HX
3845 goto normal;
3846
299603e8 3847 skb_gro_reset_offset(skb);
89c5fa33 3848 gro_list_prepare(napi, skb);
bf5a755f 3849 NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */
89c5fa33 3850
d565b0a1
HX
3851 rcu_read_lock();
3852 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 3853 if (ptype->type != type || !ptype->callbacks.gro_receive)
d565b0a1
HX
3854 continue;
3855
86911732 3856 skb_set_network_header(skb, skb_gro_offset(skb));
efd9450e 3857 skb_reset_mac_len(skb);
d565b0a1
HX
3858 NAPI_GRO_CB(skb)->same_flow = 0;
3859 NAPI_GRO_CB(skb)->flush = 0;
5d38a079 3860 NAPI_GRO_CB(skb)->free = 0;
d565b0a1 3861
f191a1d1 3862 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
d565b0a1
HX
3863 break;
3864 }
3865 rcu_read_unlock();
3866
3867 if (&ptype->list == head)
3868 goto normal;
3869
0da2afd5 3870 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 3871 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 3872
d565b0a1
HX
3873 if (pp) {
3874 struct sk_buff *nskb = *pp;
3875
3876 *pp = nskb->next;
3877 nskb->next = NULL;
3878 napi_gro_complete(nskb);
4ae5544f 3879 napi->gro_count--;
d565b0a1
HX
3880 }
3881
0da2afd5 3882 if (same_flow)
d565b0a1
HX
3883 goto ok;
3884
600adc18 3885 if (NAPI_GRO_CB(skb)->flush)
d565b0a1 3886 goto normal;
d565b0a1 3887
600adc18
ED
3888 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
3889 struct sk_buff *nskb = napi->gro_list;
3890
3891 /* locate the end of the list to select the 'oldest' flow */
3892 while (nskb->next) {
3893 pp = &nskb->next;
3894 nskb = *pp;
3895 }
3896 *pp = NULL;
3897 nskb->next = NULL;
3898 napi_gro_complete(nskb);
3899 } else {
3900 napi->gro_count++;
3901 }
d565b0a1 3902 NAPI_GRO_CB(skb)->count = 1;
2e71a6f8 3903 NAPI_GRO_CB(skb)->age = jiffies;
86911732 3904 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
d565b0a1
HX
3905 skb->next = napi->gro_list;
3906 napi->gro_list = skb;
5d0d9be8 3907 ret = GRO_HELD;
d565b0a1 3908
ad0f9904 3909pull:
cb18978c
HX
3910 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3911 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3912
3913 BUG_ON(skb->end - skb->tail < grow);
3914
3915 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3916
3917 skb->tail += grow;
3918 skb->data_len -= grow;
3919
3920 skb_shinfo(skb)->frags[0].page_offset += grow;
9e903e08 3921 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
cb18978c 3922
9e903e08 3923 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
ea2ab693 3924 skb_frag_unref(skb, 0);
cb18978c
HX
3925 memmove(skb_shinfo(skb)->frags,
3926 skb_shinfo(skb)->frags + 1,
e5093aec 3927 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
cb18978c 3928 }
ad0f9904
HX
3929 }
3930
d565b0a1 3931ok:
5d0d9be8 3932 return ret;
d565b0a1
HX
3933
3934normal:
ad0f9904
HX
3935 ret = GRO_NORMAL;
3936 goto pull;
5d38a079 3937}
96e93eab 3938
bf5a755f
JC
3939struct packet_offload *gro_find_receive_by_type(__be16 type)
3940{
3941 struct list_head *offload_head = &offload_base;
3942 struct packet_offload *ptype;
3943
3944 list_for_each_entry_rcu(ptype, offload_head, list) {
3945 if (ptype->type != type || !ptype->callbacks.gro_receive)
3946 continue;
3947 return ptype;
3948 }
3949 return NULL;
3950}
3951
3952struct packet_offload *gro_find_complete_by_type(__be16 type)
3953{
3954 struct list_head *offload_head = &offload_base;
3955 struct packet_offload *ptype;
3956
3957 list_for_each_entry_rcu(ptype, offload_head, list) {
3958 if (ptype->type != type || !ptype->callbacks.gro_complete)
3959 continue;
3960 return ptype;
3961 }
3962 return NULL;
3963}
5d38a079 3964
bb728820 3965static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 3966{
5d0d9be8
HX
3967 switch (ret) {
3968 case GRO_NORMAL:
c7c4b3b6
BH
3969 if (netif_receive_skb(skb))
3970 ret = GRO_DROP;
3971 break;
5d38a079 3972
5d0d9be8 3973 case GRO_DROP:
5d38a079
HX
3974 kfree_skb(skb);
3975 break;
5b252f0c 3976
daa86548 3977 case GRO_MERGED_FREE:
d7e8883c
ED
3978 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3979 kmem_cache_free(skbuff_head_cache, skb);
3980 else
3981 __kfree_skb(skb);
daa86548
ED
3982 break;
3983
5b252f0c
BH
3984 case GRO_HELD:
3985 case GRO_MERGED:
3986 break;
5d38a079
HX
3987 }
3988
c7c4b3b6 3989 return ret;
5d0d9be8 3990}
5d0d9be8 3991
c7c4b3b6 3992gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 3993{
89c5fa33 3994 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
d565b0a1
HX
3995}
3996EXPORT_SYMBOL(napi_gro_receive);
3997
d0c2b0d2 3998static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
96e93eab 3999{
96e93eab 4000 __skb_pull(skb, skb_headlen(skb));
2a2a459e
ED
4001 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4002 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3701e513 4003 skb->vlan_tci = 0;
66c46d74 4004 skb->dev = napi->dev;
6d152e23 4005 skb->skb_iif = 0;
96e93eab
HX
4006
4007 napi->skb = skb;
4008}
96e93eab 4009
76620aaf 4010struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 4011{
5d38a079 4012 struct sk_buff *skb = napi->skb;
5d38a079
HX
4013
4014 if (!skb) {
89d71a66 4015 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
84b9cd63 4016 napi->skb = skb;
80595d59 4017 }
96e93eab
HX
4018 return skb;
4019}
76620aaf 4020EXPORT_SYMBOL(napi_get_frags);
96e93eab 4021
bb728820 4022static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
c7c4b3b6 4023 gro_result_t ret)
96e93eab 4024{
5d0d9be8
HX
4025 switch (ret) {
4026 case GRO_NORMAL:
299603e8 4027 if (netif_receive_skb(skb))
c7c4b3b6 4028 ret = GRO_DROP;
86911732 4029 break;
5d38a079 4030
5d0d9be8 4031 case GRO_DROP:
5d0d9be8
HX
4032 case GRO_MERGED_FREE:
4033 napi_reuse_skb(napi, skb);
4034 break;
5b252f0c 4035
299603e8 4036 case GRO_HELD:
5b252f0c
BH
4037 case GRO_MERGED:
4038 break;
5d0d9be8 4039 }
5d38a079 4040
c7c4b3b6 4041 return ret;
5d38a079 4042}
5d0d9be8 4043
4adb9c4a 4044static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
76620aaf
HX
4045{
4046 struct sk_buff *skb = napi->skb;
76620aaf
HX
4047
4048 napi->skb = NULL;
4049
299603e8
JC
4050 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) {
4051 napi_reuse_skb(napi, skb);
4052 return NULL;
76620aaf 4053 }
299603e8 4054 skb->protocol = eth_type_trans(skb, skb->dev);
76620aaf 4055
76620aaf
HX
4056 return skb;
4057}
76620aaf 4058
c7c4b3b6 4059gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 4060{
76620aaf 4061 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
4062
4063 if (!skb)
c7c4b3b6 4064 return GRO_DROP;
5d0d9be8 4065
89c5fa33 4066 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
5d0d9be8 4067}
5d38a079
HX
4068EXPORT_SYMBOL(napi_gro_frags);
4069
e326bed2 4070/*
855abcf0 4071 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
e326bed2
ED
4072 * Note: called with local irq disabled, but exits with local irq enabled.
4073 */
4074static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4075{
4076#ifdef CONFIG_RPS
4077 struct softnet_data *remsd = sd->rps_ipi_list;
4078
4079 if (remsd) {
4080 sd->rps_ipi_list = NULL;
4081
4082 local_irq_enable();
4083
4084 /* Send pending IPI's to kick RPS processing on remote cpus. */
4085 while (remsd) {
4086 struct softnet_data *next = remsd->rps_ipi_next;
4087
4088 if (cpu_online(remsd->cpu))
4089 __smp_call_function_single(remsd->cpu,
4090 &remsd->csd, 0);
4091 remsd = next;
4092 }
4093 } else
4094#endif
4095 local_irq_enable();
4096}
4097
bea3348e 4098static int process_backlog(struct napi_struct *napi, int quota)
1da177e4
LT
4099{
4100 int work = 0;
eecfd7c4 4101 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
1da177e4 4102
e326bed2
ED
4103#ifdef CONFIG_RPS
4104 /* Check if we have pending ipi, its better to send them now,
4105 * not waiting net_rx_action() end.
4106 */
4107 if (sd->rps_ipi_list) {
4108 local_irq_disable();
4109 net_rps_action_and_irq_enable(sd);
4110 }
4111#endif
bea3348e 4112 napi->weight = weight_p;
6e7676c1
CG
4113 local_irq_disable();
4114 while (work < quota) {
1da177e4 4115 struct sk_buff *skb;
6e7676c1
CG
4116 unsigned int qlen;
4117
4118 while ((skb = __skb_dequeue(&sd->process_queue))) {
4119 local_irq_enable();
4120 __netif_receive_skb(skb);
6e7676c1 4121 local_irq_disable();
76cc8b13
TH
4122 input_queue_head_incr(sd);
4123 if (++work >= quota) {
4124 local_irq_enable();
4125 return work;
4126 }
6e7676c1 4127 }
1da177e4 4128
e36fa2f7 4129 rps_lock(sd);
6e7676c1 4130 qlen = skb_queue_len(&sd->input_pkt_queue);
76cc8b13 4131 if (qlen)
6e7676c1
CG
4132 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4133 &sd->process_queue);
76cc8b13 4134
6e7676c1 4135 if (qlen < quota - work) {
eecfd7c4
ED
4136 /*
4137 * Inline a custom version of __napi_complete().
4138 * only current cpu owns and manipulates this napi,
4139 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
4140 * we can use a plain write instead of clear_bit(),
4141 * and we dont need an smp_mb() memory barrier.
4142 */
4143 list_del(&napi->poll_list);
4144 napi->state = 0;
4145
6e7676c1 4146 quota = work + qlen;
bea3348e 4147 }
e36fa2f7 4148 rps_unlock(sd);
6e7676c1
CG
4149 }
4150 local_irq_enable();
1da177e4 4151
bea3348e
SH
4152 return work;
4153}
1da177e4 4154
bea3348e
SH
4155/**
4156 * __napi_schedule - schedule for receive
c4ea43c5 4157 * @n: entry to schedule
bea3348e
SH
4158 *
4159 * The entry's receive function will be scheduled to run
4160 */
b5606c2d 4161void __napi_schedule(struct napi_struct *n)
bea3348e
SH
4162{
4163 unsigned long flags;
1da177e4 4164
bea3348e 4165 local_irq_save(flags);
eecfd7c4 4166 ____napi_schedule(&__get_cpu_var(softnet_data), n);
bea3348e 4167 local_irq_restore(flags);
1da177e4 4168}
bea3348e
SH
4169EXPORT_SYMBOL(__napi_schedule);
4170
d565b0a1
HX
4171void __napi_complete(struct napi_struct *n)
4172{
4173 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4174 BUG_ON(n->gro_list);
4175
4176 list_del(&n->poll_list);
4177 smp_mb__before_clear_bit();
4178 clear_bit(NAPI_STATE_SCHED, &n->state);
4179}
4180EXPORT_SYMBOL(__napi_complete);
4181
4182void napi_complete(struct napi_struct *n)
4183{
4184 unsigned long flags;
4185
4186 /*
4187 * don't let napi dequeue from the cpu poll list
4188 * just in case its running on a different cpu
4189 */
4190 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4191 return;
4192
2e71a6f8 4193 napi_gro_flush(n, false);
d565b0a1
HX
4194 local_irq_save(flags);
4195 __napi_complete(n);
4196 local_irq_restore(flags);
4197}
4198EXPORT_SYMBOL(napi_complete);
4199
af12fa6e
ET
4200/* must be called under rcu_read_lock(), as we dont take a reference */
4201struct napi_struct *napi_by_id(unsigned int napi_id)
4202{
4203 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4204 struct napi_struct *napi;
4205
4206 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4207 if (napi->napi_id == napi_id)
4208 return napi;
4209
4210 return NULL;
4211}
4212EXPORT_SYMBOL_GPL(napi_by_id);
4213
4214void napi_hash_add(struct napi_struct *napi)
4215{
4216 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4217
4218 spin_lock(&napi_hash_lock);
4219
4220 /* 0 is not a valid id, we also skip an id that is taken
4221 * we expect both events to be extremely rare
4222 */
4223 napi->napi_id = 0;
4224 while (!napi->napi_id) {
4225 napi->napi_id = ++napi_gen_id;
4226 if (napi_by_id(napi->napi_id))
4227 napi->napi_id = 0;
4228 }
4229
4230 hlist_add_head_rcu(&napi->napi_hash_node,
4231 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4232
4233 spin_unlock(&napi_hash_lock);
4234 }
4235}
4236EXPORT_SYMBOL_GPL(napi_hash_add);
4237
4238/* Warning : caller is responsible to make sure rcu grace period
4239 * is respected before freeing memory containing @napi
4240 */
4241void napi_hash_del(struct napi_struct *napi)
4242{
4243 spin_lock(&napi_hash_lock);
4244
4245 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4246 hlist_del_rcu(&napi->napi_hash_node);
4247
4248 spin_unlock(&napi_hash_lock);
4249}
4250EXPORT_SYMBOL_GPL(napi_hash_del);
4251
d565b0a1
HX
4252void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4253 int (*poll)(struct napi_struct *, int), int weight)
4254{
4255 INIT_LIST_HEAD(&napi->poll_list);
4ae5544f 4256 napi->gro_count = 0;
d565b0a1 4257 napi->gro_list = NULL;
5d38a079 4258 napi->skb = NULL;
d565b0a1 4259 napi->poll = poll;
82dc3c63
ED
4260 if (weight > NAPI_POLL_WEIGHT)
4261 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4262 weight, dev->name);
d565b0a1
HX
4263 napi->weight = weight;
4264 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 4265 napi->dev = dev;
5d38a079 4266#ifdef CONFIG_NETPOLL
d565b0a1
HX
4267 spin_lock_init(&napi->poll_lock);
4268 napi->poll_owner = -1;
4269#endif
4270 set_bit(NAPI_STATE_SCHED, &napi->state);
4271}
4272EXPORT_SYMBOL(netif_napi_add);
4273
4274void netif_napi_del(struct napi_struct *napi)
4275{
d7b06636 4276 list_del_init(&napi->dev_list);
76620aaf 4277 napi_free_frags(napi);
d565b0a1 4278
289dccbe 4279 kfree_skb_list(napi->gro_list);
d565b0a1 4280 napi->gro_list = NULL;
4ae5544f 4281 napi->gro_count = 0;
d565b0a1
HX
4282}
4283EXPORT_SYMBOL(netif_napi_del);
4284
1da177e4
LT
4285static void net_rx_action(struct softirq_action *h)
4286{
e326bed2 4287 struct softnet_data *sd = &__get_cpu_var(softnet_data);
24f8b238 4288 unsigned long time_limit = jiffies + 2;
51b0bded 4289 int budget = netdev_budget;
53fb95d3
MM
4290 void *have;
4291
1da177e4
LT
4292 local_irq_disable();
4293
e326bed2 4294 while (!list_empty(&sd->poll_list)) {
bea3348e
SH
4295 struct napi_struct *n;
4296 int work, weight;
1da177e4 4297
bea3348e 4298 /* If softirq window is exhuasted then punt.
24f8b238
SH
4299 * Allow this to run for 2 jiffies since which will allow
4300 * an average latency of 1.5/HZ.
bea3348e 4301 */
d1f41b67 4302 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
1da177e4
LT
4303 goto softnet_break;
4304
4305 local_irq_enable();
4306
bea3348e
SH
4307 /* Even though interrupts have been re-enabled, this
4308 * access is safe because interrupts can only add new
4309 * entries to the tail of this list, and only ->poll()
4310 * calls can remove this head entry from the list.
4311 */
e326bed2 4312 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
1da177e4 4313
bea3348e
SH
4314 have = netpoll_poll_lock(n);
4315
4316 weight = n->weight;
4317
0a7606c1
DM
4318 /* This NAPI_STATE_SCHED test is for avoiding a race
4319 * with netpoll's poll_napi(). Only the entity which
4320 * obtains the lock and sees NAPI_STATE_SCHED set will
4321 * actually make the ->poll() call. Therefore we avoid
25985edc 4322 * accidentally calling ->poll() when NAPI is not scheduled.
0a7606c1
DM
4323 */
4324 work = 0;
4ea7e386 4325 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
0a7606c1 4326 work = n->poll(n, weight);
4ea7e386
NH
4327 trace_napi_poll(n);
4328 }
bea3348e
SH
4329
4330 WARN_ON_ONCE(work > weight);
4331
4332 budget -= work;
4333
4334 local_irq_disable();
4335
4336 /* Drivers must not modify the NAPI state if they
4337 * consume the entire weight. In such cases this code
4338 * still "owns" the NAPI instance and therefore can
4339 * move the instance around on the list at-will.
4340 */
fed17f30 4341 if (unlikely(work == weight)) {
ff780cd8
HX
4342 if (unlikely(napi_disable_pending(n))) {
4343 local_irq_enable();
4344 napi_complete(n);
4345 local_irq_disable();
2e71a6f8
ED
4346 } else {
4347 if (n->gro_list) {
4348 /* flush too old packets
4349 * If HZ < 1000, flush all packets.
4350 */
4351 local_irq_enable();
4352 napi_gro_flush(n, HZ >= 1000);
4353 local_irq_disable();
4354 }
e326bed2 4355 list_move_tail(&n->poll_list, &sd->poll_list);
2e71a6f8 4356 }
fed17f30 4357 }
bea3348e
SH
4358
4359 netpoll_poll_unlock(have);
1da177e4
LT
4360 }
4361out:
e326bed2 4362 net_rps_action_and_irq_enable(sd);
0a9627f2 4363
db217334
CL
4364#ifdef CONFIG_NET_DMA
4365 /*
4366 * There may not be any more sk_buffs coming right now, so push
4367 * any pending DMA copies to hardware
4368 */
2ba05622 4369 dma_issue_pending_all();
db217334 4370#endif
bea3348e 4371
1da177e4
LT
4372 return;
4373
4374softnet_break:
dee42870 4375 sd->time_squeeze++;
1da177e4
LT
4376 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4377 goto out;
4378}
4379
aa9d8560 4380struct netdev_adjacent {
9ff162a8 4381 struct net_device *dev;
5d261913
VF
4382
4383 /* upper master flag, there can only be one master device per list */
9ff162a8 4384 bool master;
5d261913 4385
5d261913
VF
4386 /* counter for the number of times this device was added to us */
4387 u16 ref_nr;
4388
402dae96
VF
4389 /* private field for the users */
4390 void *private;
4391
9ff162a8
JP
4392 struct list_head list;
4393 struct rcu_head rcu;
9ff162a8
JP
4394};
4395
5d261913
VF
4396static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
4397 struct net_device *adj_dev,
2f268f12 4398 struct list_head *adj_list)
9ff162a8 4399{
5d261913 4400 struct netdev_adjacent *adj;
5d261913 4401
2f268f12 4402 list_for_each_entry(adj, adj_list, list) {
5d261913
VF
4403 if (adj->dev == adj_dev)
4404 return adj;
9ff162a8
JP
4405 }
4406 return NULL;
4407}
4408
4409/**
4410 * netdev_has_upper_dev - Check if device is linked to an upper device
4411 * @dev: device
4412 * @upper_dev: upper device to check
4413 *
4414 * Find out if a device is linked to specified upper device and return true
4415 * in case it is. Note that this checks only immediate upper device,
4416 * not through a complete stack of devices. The caller must hold the RTNL lock.
4417 */
4418bool netdev_has_upper_dev(struct net_device *dev,
4419 struct net_device *upper_dev)
4420{
4421 ASSERT_RTNL();
4422
2f268f12 4423 return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
9ff162a8
JP
4424}
4425EXPORT_SYMBOL(netdev_has_upper_dev);
4426
4427/**
4428 * netdev_has_any_upper_dev - Check if device is linked to some device
4429 * @dev: device
4430 *
4431 * Find out if a device is linked to an upper device and return true in case
4432 * it is. The caller must hold the RTNL lock.
4433 */
1d143d9f 4434static bool netdev_has_any_upper_dev(struct net_device *dev)
9ff162a8
JP
4435{
4436 ASSERT_RTNL();
4437
2f268f12 4438 return !list_empty(&dev->all_adj_list.upper);
9ff162a8 4439}
9ff162a8
JP
4440
4441/**
4442 * netdev_master_upper_dev_get - Get master upper device
4443 * @dev: device
4444 *
4445 * Find a master upper device and return pointer to it or NULL in case
4446 * it's not there. The caller must hold the RTNL lock.
4447 */
4448struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4449{
aa9d8560 4450 struct netdev_adjacent *upper;
9ff162a8
JP
4451
4452 ASSERT_RTNL();
4453
2f268f12 4454 if (list_empty(&dev->adj_list.upper))
9ff162a8
JP
4455 return NULL;
4456
2f268f12 4457 upper = list_first_entry(&dev->adj_list.upper,
aa9d8560 4458 struct netdev_adjacent, list);
9ff162a8
JP
4459 if (likely(upper->master))
4460 return upper->dev;
4461 return NULL;
4462}
4463EXPORT_SYMBOL(netdev_master_upper_dev_get);
4464
b6ccba4c
VF
4465void *netdev_adjacent_get_private(struct list_head *adj_list)
4466{
4467 struct netdev_adjacent *adj;
4468
4469 adj = list_entry(adj_list, struct netdev_adjacent, list);
4470
4471 return adj->private;
4472}
4473EXPORT_SYMBOL(netdev_adjacent_get_private);
4474
31088a11
VF
4475/**
4476 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
48311f46
VF
4477 * @dev: device
4478 * @iter: list_head ** of the current position
4479 *
4480 * Gets the next device from the dev's upper list, starting from iter
4481 * position. The caller must hold RCU read lock.
4482 */
2f268f12
VF
4483struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4484 struct list_head **iter)
48311f46
VF
4485{
4486 struct netdev_adjacent *upper;
4487
85328240 4488 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
48311f46
VF
4489
4490 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4491
2f268f12 4492 if (&upper->list == &dev->all_adj_list.upper)
48311f46
VF
4493 return NULL;
4494
4495 *iter = &upper->list;
4496
4497 return upper->dev;
4498}
2f268f12 4499EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
48311f46 4500
31088a11
VF
4501/**
4502 * netdev_lower_get_next_private - Get the next ->private from the
4503 * lower neighbour list
4504 * @dev: device
4505 * @iter: list_head ** of the current position
4506 *
4507 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4508 * list, starting from iter position. The caller must hold either hold the
4509 * RTNL lock or its own locking that guarantees that the neighbour lower
4510 * list will remain unchainged.
4511 */
4512void *netdev_lower_get_next_private(struct net_device *dev,
4513 struct list_head **iter)
4514{
4515 struct netdev_adjacent *lower;
4516
4517 lower = list_entry(*iter, struct netdev_adjacent, list);
4518
4519 if (&lower->list == &dev->adj_list.lower)
4520 return NULL;
4521
4522 if (iter)
4523 *iter = lower->list.next;
4524
4525 return lower->private;
4526}
4527EXPORT_SYMBOL(netdev_lower_get_next_private);
4528
4529/**
4530 * netdev_lower_get_next_private_rcu - Get the next ->private from the
4531 * lower neighbour list, RCU
4532 * variant
4533 * @dev: device
4534 * @iter: list_head ** of the current position
4535 *
4536 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4537 * list, starting from iter position. The caller must hold RCU read lock.
4538 */
4539void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4540 struct list_head **iter)
4541{
4542 struct netdev_adjacent *lower;
4543
4544 WARN_ON_ONCE(!rcu_read_lock_held());
4545
4546 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4547
4548 if (&lower->list == &dev->adj_list.lower)
4549 return NULL;
4550
4551 if (iter)
4552 *iter = &lower->list;
4553
4554 return lower->private;
4555}
4556EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4557
e001bfad 4558/**
4559 * netdev_lower_get_first_private_rcu - Get the first ->private from the
4560 * lower neighbour list, RCU
4561 * variant
4562 * @dev: device
4563 *
4564 * Gets the first netdev_adjacent->private from the dev's lower neighbour
4565 * list. The caller must hold RCU read lock.
4566 */
4567void *netdev_lower_get_first_private_rcu(struct net_device *dev)
4568{
4569 struct netdev_adjacent *lower;
4570
4571 lower = list_first_or_null_rcu(&dev->adj_list.lower,
4572 struct netdev_adjacent, list);
4573 if (lower)
4574 return lower->private;
4575 return NULL;
4576}
4577EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
4578
9ff162a8
JP
4579/**
4580 * netdev_master_upper_dev_get_rcu - Get master upper device
4581 * @dev: device
4582 *
4583 * Find a master upper device and return pointer to it or NULL in case
4584 * it's not there. The caller must hold the RCU read lock.
4585 */
4586struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4587{
aa9d8560 4588 struct netdev_adjacent *upper;
9ff162a8 4589
2f268f12 4590 upper = list_first_or_null_rcu(&dev->adj_list.upper,
aa9d8560 4591 struct netdev_adjacent, list);
9ff162a8
JP
4592 if (upper && likely(upper->master))
4593 return upper->dev;
4594 return NULL;
4595}
4596EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4597
5d261913
VF
4598static int __netdev_adjacent_dev_insert(struct net_device *dev,
4599 struct net_device *adj_dev,
7863c054 4600 struct list_head *dev_list,
402dae96 4601 void *private, bool master)
5d261913
VF
4602{
4603 struct netdev_adjacent *adj;
5831d66e 4604 char linkname[IFNAMSIZ+7];
842d67a7 4605 int ret;
5d261913 4606
7863c054 4607 adj = __netdev_find_adj(dev, adj_dev, dev_list);
5d261913
VF
4608
4609 if (adj) {
5d261913
VF
4610 adj->ref_nr++;
4611 return 0;
4612 }
4613
4614 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
4615 if (!adj)
4616 return -ENOMEM;
4617
4618 adj->dev = adj_dev;
4619 adj->master = master;
5d261913 4620 adj->ref_nr = 1;
402dae96 4621 adj->private = private;
5d261913 4622 dev_hold(adj_dev);
2f268f12
VF
4623
4624 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
4625 adj_dev->name, dev->name, adj_dev->name);
5d261913 4626
5831d66e
VF
4627 if (dev_list == &dev->adj_list.lower) {
4628 sprintf(linkname, "lower_%s", adj_dev->name);
4629 ret = sysfs_create_link(&(dev->dev.kobj),
4630 &(adj_dev->dev.kobj), linkname);
4631 if (ret)
4632 goto free_adj;
4633 } else if (dev_list == &dev->adj_list.upper) {
4634 sprintf(linkname, "upper_%s", adj_dev->name);
4635 ret = sysfs_create_link(&(dev->dev.kobj),
4636 &(adj_dev->dev.kobj), linkname);
4637 if (ret)
4638 goto free_adj;
4639 }
4640
7863c054 4641 /* Ensure that master link is always the first item in list. */
842d67a7
VF
4642 if (master) {
4643 ret = sysfs_create_link(&(dev->dev.kobj),
4644 &(adj_dev->dev.kobj), "master");
4645 if (ret)
5831d66e 4646 goto remove_symlinks;
842d67a7 4647
7863c054 4648 list_add_rcu(&adj->list, dev_list);
842d67a7 4649 } else {
7863c054 4650 list_add_tail_rcu(&adj->list, dev_list);
842d67a7 4651 }
5d261913
VF
4652
4653 return 0;
842d67a7 4654
5831d66e
VF
4655remove_symlinks:
4656 if (dev_list == &dev->adj_list.lower) {
4657 sprintf(linkname, "lower_%s", adj_dev->name);
4658 sysfs_remove_link(&(dev->dev.kobj), linkname);
4659 } else if (dev_list == &dev->adj_list.upper) {
4660 sprintf(linkname, "upper_%s", adj_dev->name);
4661 sysfs_remove_link(&(dev->dev.kobj), linkname);
4662 }
4663
842d67a7
VF
4664free_adj:
4665 kfree(adj);
974daef7 4666 dev_put(adj_dev);
842d67a7
VF
4667
4668 return ret;
5d261913
VF
4669}
4670
1d143d9f 4671static void __netdev_adjacent_dev_remove(struct net_device *dev,
4672 struct net_device *adj_dev,
4673 struct list_head *dev_list)
5d261913
VF
4674{
4675 struct netdev_adjacent *adj;
5831d66e 4676 char linkname[IFNAMSIZ+7];
5d261913 4677
7863c054 4678 adj = __netdev_find_adj(dev, adj_dev, dev_list);
5d261913 4679
2f268f12
VF
4680 if (!adj) {
4681 pr_err("tried to remove device %s from %s\n",
4682 dev->name, adj_dev->name);
5d261913 4683 BUG();
2f268f12 4684 }
5d261913
VF
4685
4686 if (adj->ref_nr > 1) {
2f268f12
VF
4687 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
4688 adj->ref_nr-1);
5d261913
VF
4689 adj->ref_nr--;
4690 return;
4691 }
4692
842d67a7
VF
4693 if (adj->master)
4694 sysfs_remove_link(&(dev->dev.kobj), "master");
4695
5831d66e
VF
4696 if (dev_list == &dev->adj_list.lower) {
4697 sprintf(linkname, "lower_%s", adj_dev->name);
4698 sysfs_remove_link(&(dev->dev.kobj), linkname);
4699 } else if (dev_list == &dev->adj_list.upper) {
4700 sprintf(linkname, "upper_%s", adj_dev->name);
4701 sysfs_remove_link(&(dev->dev.kobj), linkname);
4702 }
4703
5d261913 4704 list_del_rcu(&adj->list);
2f268f12
VF
4705 pr_debug("dev_put for %s, because link removed from %s to %s\n",
4706 adj_dev->name, dev->name, adj_dev->name);
5d261913
VF
4707 dev_put(adj_dev);
4708 kfree_rcu(adj, rcu);
4709}
4710
1d143d9f 4711static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
4712 struct net_device *upper_dev,
4713 struct list_head *up_list,
4714 struct list_head *down_list,
4715 void *private, bool master)
5d261913
VF
4716{
4717 int ret;
4718
402dae96
VF
4719 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
4720 master);
5d261913
VF
4721 if (ret)
4722 return ret;
4723
402dae96
VF
4724 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
4725 false);
5d261913 4726 if (ret) {
2f268f12 4727 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5d261913
VF
4728 return ret;
4729 }
4730
4731 return 0;
4732}
4733
1d143d9f 4734static int __netdev_adjacent_dev_link(struct net_device *dev,
4735 struct net_device *upper_dev)
5d261913 4736{
2f268f12
VF
4737 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
4738 &dev->all_adj_list.upper,
4739 &upper_dev->all_adj_list.lower,
402dae96 4740 NULL, false);
5d261913
VF
4741}
4742
1d143d9f 4743static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
4744 struct net_device *upper_dev,
4745 struct list_head *up_list,
4746 struct list_head *down_list)
5d261913 4747{
2f268f12
VF
4748 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
4749 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
5d261913
VF
4750}
4751
1d143d9f 4752static void __netdev_adjacent_dev_unlink(struct net_device *dev,
4753 struct net_device *upper_dev)
5d261913 4754{
2f268f12
VF
4755 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
4756 &dev->all_adj_list.upper,
4757 &upper_dev->all_adj_list.lower);
4758}
4759
1d143d9f 4760static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
4761 struct net_device *upper_dev,
4762 void *private, bool master)
2f268f12
VF
4763{
4764 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
4765
4766 if (ret)
4767 return ret;
4768
4769 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
4770 &dev->adj_list.upper,
4771 &upper_dev->adj_list.lower,
402dae96 4772 private, master);
2f268f12
VF
4773 if (ret) {
4774 __netdev_adjacent_dev_unlink(dev, upper_dev);
4775 return ret;
4776 }
4777
4778 return 0;
5d261913
VF
4779}
4780
1d143d9f 4781static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
4782 struct net_device *upper_dev)
2f268f12
VF
4783{
4784 __netdev_adjacent_dev_unlink(dev, upper_dev);
4785 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
4786 &dev->adj_list.upper,
4787 &upper_dev->adj_list.lower);
4788}
5d261913 4789
9ff162a8 4790static int __netdev_upper_dev_link(struct net_device *dev,
402dae96
VF
4791 struct net_device *upper_dev, bool master,
4792 void *private)
9ff162a8 4793{
5d261913
VF
4794 struct netdev_adjacent *i, *j, *to_i, *to_j;
4795 int ret = 0;
9ff162a8
JP
4796
4797 ASSERT_RTNL();
4798
4799 if (dev == upper_dev)
4800 return -EBUSY;
4801
4802 /* To prevent loops, check if dev is not upper device to upper_dev. */
2f268f12 4803 if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
9ff162a8
JP
4804 return -EBUSY;
4805
2f268f12 4806 if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
9ff162a8
JP
4807 return -EEXIST;
4808
4809 if (master && netdev_master_upper_dev_get(dev))
4810 return -EBUSY;
4811
402dae96
VF
4812 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
4813 master);
5d261913
VF
4814 if (ret)
4815 return ret;
9ff162a8 4816
5d261913 4817 /* Now that we linked these devs, make all the upper_dev's
2f268f12 4818 * all_adj_list.upper visible to every dev's all_adj_list.lower an
5d261913
VF
4819 * versa, and don't forget the devices itself. All of these
4820 * links are non-neighbours.
4821 */
2f268f12
VF
4822 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
4823 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
4824 pr_debug("Interlinking %s with %s, non-neighbour\n",
4825 i->dev->name, j->dev->name);
5d261913
VF
4826 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
4827 if (ret)
4828 goto rollback_mesh;
4829 }
4830 }
4831
4832 /* add dev to every upper_dev's upper device */
2f268f12
VF
4833 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
4834 pr_debug("linking %s's upper device %s with %s\n",
4835 upper_dev->name, i->dev->name, dev->name);
5d261913
VF
4836 ret = __netdev_adjacent_dev_link(dev, i->dev);
4837 if (ret)
4838 goto rollback_upper_mesh;
4839 }
4840
4841 /* add upper_dev to every dev's lower device */
2f268f12
VF
4842 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
4843 pr_debug("linking %s's lower device %s with %s\n", dev->name,
4844 i->dev->name, upper_dev->name);
5d261913
VF
4845 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
4846 if (ret)
4847 goto rollback_lower_mesh;
4848 }
9ff162a8 4849
42e52bf9 4850 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
9ff162a8 4851 return 0;
5d261913
VF
4852
4853rollback_lower_mesh:
4854 to_i = i;
2f268f12 4855 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5d261913
VF
4856 if (i == to_i)
4857 break;
4858 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
4859 }
4860
4861 i = NULL;
4862
4863rollback_upper_mesh:
4864 to_i = i;
2f268f12 4865 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5d261913
VF
4866 if (i == to_i)
4867 break;
4868 __netdev_adjacent_dev_unlink(dev, i->dev);
4869 }
4870
4871 i = j = NULL;
4872
4873rollback_mesh:
4874 to_i = i;
4875 to_j = j;
2f268f12
VF
4876 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
4877 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5d261913
VF
4878 if (i == to_i && j == to_j)
4879 break;
4880 __netdev_adjacent_dev_unlink(i->dev, j->dev);
4881 }
4882 if (i == to_i)
4883 break;
4884 }
4885
2f268f12 4886 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
4887
4888 return ret;
9ff162a8
JP
4889}
4890
4891/**
4892 * netdev_upper_dev_link - Add a link to the upper device
4893 * @dev: device
4894 * @upper_dev: new upper device
4895 *
4896 * Adds a link to device which is upper to this one. The caller must hold
4897 * the RTNL lock. On a failure a negative errno code is returned.
4898 * On success the reference counts are adjusted and the function
4899 * returns zero.
4900 */
4901int netdev_upper_dev_link(struct net_device *dev,
4902 struct net_device *upper_dev)
4903{
402dae96 4904 return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
9ff162a8
JP
4905}
4906EXPORT_SYMBOL(netdev_upper_dev_link);
4907
4908/**
4909 * netdev_master_upper_dev_link - Add a master link to the upper device
4910 * @dev: device
4911 * @upper_dev: new upper device
4912 *
4913 * Adds a link to device which is upper to this one. In this case, only
4914 * one master upper device can be linked, although other non-master devices
4915 * might be linked as well. The caller must hold the RTNL lock.
4916 * On a failure a negative errno code is returned. On success the reference
4917 * counts are adjusted and the function returns zero.
4918 */
4919int netdev_master_upper_dev_link(struct net_device *dev,
4920 struct net_device *upper_dev)
4921{
402dae96 4922 return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
9ff162a8
JP
4923}
4924EXPORT_SYMBOL(netdev_master_upper_dev_link);
4925
402dae96
VF
4926int netdev_master_upper_dev_link_private(struct net_device *dev,
4927 struct net_device *upper_dev,
4928 void *private)
4929{
4930 return __netdev_upper_dev_link(dev, upper_dev, true, private);
4931}
4932EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
4933
9ff162a8
JP
4934/**
4935 * netdev_upper_dev_unlink - Removes a link to upper device
4936 * @dev: device
4937 * @upper_dev: new upper device
4938 *
4939 * Removes a link to device which is upper to this one. The caller must hold
4940 * the RTNL lock.
4941 */
4942void netdev_upper_dev_unlink(struct net_device *dev,
4943 struct net_device *upper_dev)
4944{
5d261913 4945 struct netdev_adjacent *i, *j;
9ff162a8
JP
4946 ASSERT_RTNL();
4947
2f268f12 4948 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
4949
4950 /* Here is the tricky part. We must remove all dev's lower
4951 * devices from all upper_dev's upper devices and vice
4952 * versa, to maintain the graph relationship.
4953 */
2f268f12
VF
4954 list_for_each_entry(i, &dev->all_adj_list.lower, list)
4955 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
5d261913
VF
4956 __netdev_adjacent_dev_unlink(i->dev, j->dev);
4957
4958 /* remove also the devices itself from lower/upper device
4959 * list
4960 */
2f268f12 4961 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5d261913
VF
4962 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
4963
2f268f12 4964 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
5d261913
VF
4965 __netdev_adjacent_dev_unlink(dev, i->dev);
4966
42e52bf9 4967 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
9ff162a8
JP
4968}
4969EXPORT_SYMBOL(netdev_upper_dev_unlink);
4970
402dae96
VF
4971void *netdev_lower_dev_get_private(struct net_device *dev,
4972 struct net_device *lower_dev)
4973{
4974 struct netdev_adjacent *lower;
4975
4976 if (!lower_dev)
4977 return NULL;
4978 lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
4979 if (!lower)
4980 return NULL;
4981
4982 return lower->private;
4983}
4984EXPORT_SYMBOL(netdev_lower_dev_get_private);
4985
b6c40d68
PM
4986static void dev_change_rx_flags(struct net_device *dev, int flags)
4987{
d314774c
SH
4988 const struct net_device_ops *ops = dev->netdev_ops;
4989
d2615bf4 4990 if (ops->ndo_change_rx_flags)
d314774c 4991 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
4992}
4993
991fb3f7 4994static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
1da177e4 4995{
b536db93 4996 unsigned int old_flags = dev->flags;
d04a48b0
EB
4997 kuid_t uid;
4998 kgid_t gid;
1da177e4 4999
24023451
PM
5000 ASSERT_RTNL();
5001
dad9b335
WC
5002 dev->flags |= IFF_PROMISC;
5003 dev->promiscuity += inc;
5004 if (dev->promiscuity == 0) {
5005 /*
5006 * Avoid overflow.
5007 * If inc causes overflow, untouch promisc and return error.
5008 */
5009 if (inc < 0)
5010 dev->flags &= ~IFF_PROMISC;
5011 else {
5012 dev->promiscuity -= inc;
7b6cd1ce
JP
5013 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5014 dev->name);
dad9b335
WC
5015 return -EOVERFLOW;
5016 }
5017 }
52609c0b 5018 if (dev->flags != old_flags) {
7b6cd1ce
JP
5019 pr_info("device %s %s promiscuous mode\n",
5020 dev->name,
5021 dev->flags & IFF_PROMISC ? "entered" : "left");
8192b0c4
DH
5022 if (audit_enabled) {
5023 current_uid_gid(&uid, &gid);
7759db82
KHK
5024 audit_log(current->audit_context, GFP_ATOMIC,
5025 AUDIT_ANOM_PROMISCUOUS,
5026 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5027 dev->name, (dev->flags & IFF_PROMISC),
5028 (old_flags & IFF_PROMISC),
e1760bd5 5029 from_kuid(&init_user_ns, audit_get_loginuid(current)),
d04a48b0
EB
5030 from_kuid(&init_user_ns, uid),
5031 from_kgid(&init_user_ns, gid),
7759db82 5032 audit_get_sessionid(current));
8192b0c4 5033 }
24023451 5034
b6c40d68 5035 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 5036 }
991fb3f7
ND
5037 if (notify)
5038 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
dad9b335 5039 return 0;
1da177e4
LT
5040}
5041
4417da66
PM
5042/**
5043 * dev_set_promiscuity - update promiscuity count on a device
5044 * @dev: device
5045 * @inc: modifier
5046 *
5047 * Add or remove promiscuity from a device. While the count in the device
5048 * remains above zero the interface remains promiscuous. Once it hits zero
5049 * the device reverts back to normal filtering operation. A negative inc
5050 * value is used to drop promiscuity on the device.
dad9b335 5051 * Return 0 if successful or a negative errno code on error.
4417da66 5052 */
dad9b335 5053int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66 5054{
b536db93 5055 unsigned int old_flags = dev->flags;
dad9b335 5056 int err;
4417da66 5057
991fb3f7 5058 err = __dev_set_promiscuity(dev, inc, true);
4b5a698e 5059 if (err < 0)
dad9b335 5060 return err;
4417da66
PM
5061 if (dev->flags != old_flags)
5062 dev_set_rx_mode(dev);
dad9b335 5063 return err;
4417da66 5064}
d1b19dff 5065EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 5066
991fb3f7 5067static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
1da177e4 5068{
991fb3f7 5069 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
1da177e4 5070
24023451
PM
5071 ASSERT_RTNL();
5072
1da177e4 5073 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
5074 dev->allmulti += inc;
5075 if (dev->allmulti == 0) {
5076 /*
5077 * Avoid overflow.
5078 * If inc causes overflow, untouch allmulti and return error.
5079 */
5080 if (inc < 0)
5081 dev->flags &= ~IFF_ALLMULTI;
5082 else {
5083 dev->allmulti -= inc;
7b6cd1ce
JP
5084 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5085 dev->name);
dad9b335
WC
5086 return -EOVERFLOW;
5087 }
5088 }
24023451 5089 if (dev->flags ^ old_flags) {
b6c40d68 5090 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 5091 dev_set_rx_mode(dev);
991fb3f7
ND
5092 if (notify)
5093 __dev_notify_flags(dev, old_flags,
5094 dev->gflags ^ old_gflags);
24023451 5095 }
dad9b335 5096 return 0;
4417da66 5097}
991fb3f7
ND
5098
5099/**
5100 * dev_set_allmulti - update allmulti count on a device
5101 * @dev: device
5102 * @inc: modifier
5103 *
5104 * Add or remove reception of all multicast frames to a device. While the
5105 * count in the device remains above zero the interface remains listening
5106 * to all interfaces. Once it hits zero the device reverts back to normal
5107 * filtering operation. A negative @inc value is used to drop the counter
5108 * when releasing a resource needing all multicasts.
5109 * Return 0 if successful or a negative errno code on error.
5110 */
5111
5112int dev_set_allmulti(struct net_device *dev, int inc)
5113{
5114 return __dev_set_allmulti(dev, inc, true);
5115}
d1b19dff 5116EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
5117
5118/*
5119 * Upload unicast and multicast address lists to device and
5120 * configure RX filtering. When the device doesn't support unicast
53ccaae1 5121 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
5122 * are present.
5123 */
5124void __dev_set_rx_mode(struct net_device *dev)
5125{
d314774c
SH
5126 const struct net_device_ops *ops = dev->netdev_ops;
5127
4417da66
PM
5128 /* dev_open will call this function so the list will stay sane. */
5129 if (!(dev->flags&IFF_UP))
5130 return;
5131
5132 if (!netif_device_present(dev))
40b77c94 5133 return;
4417da66 5134
01789349 5135 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4417da66
PM
5136 /* Unicast addresses changes may only happen under the rtnl,
5137 * therefore calling __dev_set_promiscuity here is safe.
5138 */
32e7bfc4 5139 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
991fb3f7 5140 __dev_set_promiscuity(dev, 1, false);
2d348d1f 5141 dev->uc_promisc = true;
32e7bfc4 5142 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
991fb3f7 5143 __dev_set_promiscuity(dev, -1, false);
2d348d1f 5144 dev->uc_promisc = false;
4417da66 5145 }
4417da66 5146 }
01789349
JP
5147
5148 if (ops->ndo_set_rx_mode)
5149 ops->ndo_set_rx_mode(dev);
4417da66
PM
5150}
5151
5152void dev_set_rx_mode(struct net_device *dev)
5153{
b9e40857 5154 netif_addr_lock_bh(dev);
4417da66 5155 __dev_set_rx_mode(dev);
b9e40857 5156 netif_addr_unlock_bh(dev);
1da177e4
LT
5157}
5158
f0db275a
SH
5159/**
5160 * dev_get_flags - get flags reported to userspace
5161 * @dev: device
5162 *
5163 * Get the combination of flag bits exported through APIs to userspace.
5164 */
95c96174 5165unsigned int dev_get_flags(const struct net_device *dev)
1da177e4 5166{
95c96174 5167 unsigned int flags;
1da177e4
LT
5168
5169 flags = (dev->flags & ~(IFF_PROMISC |
5170 IFF_ALLMULTI |
b00055aa
SR
5171 IFF_RUNNING |
5172 IFF_LOWER_UP |
5173 IFF_DORMANT)) |
1da177e4
LT
5174 (dev->gflags & (IFF_PROMISC |
5175 IFF_ALLMULTI));
5176
b00055aa
SR
5177 if (netif_running(dev)) {
5178 if (netif_oper_up(dev))
5179 flags |= IFF_RUNNING;
5180 if (netif_carrier_ok(dev))
5181 flags |= IFF_LOWER_UP;
5182 if (netif_dormant(dev))
5183 flags |= IFF_DORMANT;
5184 }
1da177e4
LT
5185
5186 return flags;
5187}
d1b19dff 5188EXPORT_SYMBOL(dev_get_flags);
1da177e4 5189
bd380811 5190int __dev_change_flags(struct net_device *dev, unsigned int flags)
1da177e4 5191{
b536db93 5192 unsigned int old_flags = dev->flags;
bd380811 5193 int ret;
1da177e4 5194
24023451
PM
5195 ASSERT_RTNL();
5196
1da177e4
LT
5197 /*
5198 * Set the flags on our device.
5199 */
5200
5201 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5202 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5203 IFF_AUTOMEDIA)) |
5204 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5205 IFF_ALLMULTI));
5206
5207 /*
5208 * Load in the correct multicast list now the flags have changed.
5209 */
5210
b6c40d68
PM
5211 if ((old_flags ^ flags) & IFF_MULTICAST)
5212 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 5213
4417da66 5214 dev_set_rx_mode(dev);
1da177e4
LT
5215
5216 /*
5217 * Have we downed the interface. We handle IFF_UP ourselves
5218 * according to user attempts to set it, rather than blindly
5219 * setting it.
5220 */
5221
5222 ret = 0;
5223 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
bd380811 5224 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
1da177e4
LT
5225
5226 if (!ret)
4417da66 5227 dev_set_rx_mode(dev);
1da177e4
LT
5228 }
5229
1da177e4 5230 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff 5231 int inc = (flags & IFF_PROMISC) ? 1 : -1;
991fb3f7 5232 unsigned int old_flags = dev->flags;
d1b19dff 5233
1da177e4 5234 dev->gflags ^= IFF_PROMISC;
991fb3f7
ND
5235
5236 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5237 if (dev->flags != old_flags)
5238 dev_set_rx_mode(dev);
1da177e4
LT
5239 }
5240
5241 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5242 is important. Some (broken) drivers set IFF_PROMISC, when
5243 IFF_ALLMULTI is requested not asking us and not reporting.
5244 */
5245 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
5246 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5247
1da177e4 5248 dev->gflags ^= IFF_ALLMULTI;
991fb3f7 5249 __dev_set_allmulti(dev, inc, false);
1da177e4
LT
5250 }
5251
bd380811
PM
5252 return ret;
5253}
5254
a528c219
ND
5255void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5256 unsigned int gchanges)
bd380811
PM
5257{
5258 unsigned int changes = dev->flags ^ old_flags;
5259
a528c219 5260 if (gchanges)
7f294054 5261 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
a528c219 5262
bd380811
PM
5263 if (changes & IFF_UP) {
5264 if (dev->flags & IFF_UP)
5265 call_netdevice_notifiers(NETDEV_UP, dev);
5266 else
5267 call_netdevice_notifiers(NETDEV_DOWN, dev);
5268 }
5269
5270 if (dev->flags & IFF_UP &&
be9efd36
JP
5271 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5272 struct netdev_notifier_change_info change_info;
5273
5274 change_info.flags_changed = changes;
5275 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5276 &change_info.info);
5277 }
bd380811
PM
5278}
5279
5280/**
5281 * dev_change_flags - change device settings
5282 * @dev: device
5283 * @flags: device state flags
5284 *
5285 * Change settings on device based state flags. The flags are
5286 * in the userspace exported format.
5287 */
b536db93 5288int dev_change_flags(struct net_device *dev, unsigned int flags)
bd380811 5289{
b536db93 5290 int ret;
991fb3f7 5291 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
bd380811
PM
5292
5293 ret = __dev_change_flags(dev, flags);
5294 if (ret < 0)
5295 return ret;
5296
991fb3f7 5297 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
a528c219 5298 __dev_notify_flags(dev, old_flags, changes);
1da177e4
LT
5299 return ret;
5300}
d1b19dff 5301EXPORT_SYMBOL(dev_change_flags);
1da177e4 5302
f0db275a
SH
5303/**
5304 * dev_set_mtu - Change maximum transfer unit
5305 * @dev: device
5306 * @new_mtu: new transfer unit
5307 *
5308 * Change the maximum transfer size of the network device.
5309 */
1da177e4
LT
5310int dev_set_mtu(struct net_device *dev, int new_mtu)
5311{
d314774c 5312 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
5313 int err;
5314
5315 if (new_mtu == dev->mtu)
5316 return 0;
5317
5318 /* MTU must be positive. */
5319 if (new_mtu < 0)
5320 return -EINVAL;
5321
5322 if (!netif_device_present(dev))
5323 return -ENODEV;
5324
5325 err = 0;
d314774c
SH
5326 if (ops->ndo_change_mtu)
5327 err = ops->ndo_change_mtu(dev, new_mtu);
1da177e4
LT
5328 else
5329 dev->mtu = new_mtu;
d314774c 5330
e3d8fabe 5331 if (!err)
056925ab 5332 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
1da177e4
LT
5333 return err;
5334}
d1b19dff 5335EXPORT_SYMBOL(dev_set_mtu);
1da177e4 5336
cbda10fa
VD
5337/**
5338 * dev_set_group - Change group this device belongs to
5339 * @dev: device
5340 * @new_group: group this device should belong to
5341 */
5342void dev_set_group(struct net_device *dev, int new_group)
5343{
5344 dev->group = new_group;
5345}
5346EXPORT_SYMBOL(dev_set_group);
5347
f0db275a
SH
5348/**
5349 * dev_set_mac_address - Change Media Access Control Address
5350 * @dev: device
5351 * @sa: new address
5352 *
5353 * Change the hardware (MAC) address of the device
5354 */
1da177e4
LT
5355int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5356{
d314774c 5357 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
5358 int err;
5359
d314774c 5360 if (!ops->ndo_set_mac_address)
1da177e4
LT
5361 return -EOPNOTSUPP;
5362 if (sa->sa_family != dev->type)
5363 return -EINVAL;
5364 if (!netif_device_present(dev))
5365 return -ENODEV;
d314774c 5366 err = ops->ndo_set_mac_address(dev, sa);
f6521516
JP
5367 if (err)
5368 return err;
fbdeca2d 5369 dev->addr_assign_type = NET_ADDR_SET;
f6521516 5370 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
7bf23575 5371 add_device_randomness(dev->dev_addr, dev->addr_len);
f6521516 5372 return 0;
1da177e4 5373}
d1b19dff 5374EXPORT_SYMBOL(dev_set_mac_address);
1da177e4 5375
4bf84c35
JP
5376/**
5377 * dev_change_carrier - Change device carrier
5378 * @dev: device
691b3b7e 5379 * @new_carrier: new value
4bf84c35
JP
5380 *
5381 * Change device carrier
5382 */
5383int dev_change_carrier(struct net_device *dev, bool new_carrier)
5384{
5385 const struct net_device_ops *ops = dev->netdev_ops;
5386
5387 if (!ops->ndo_change_carrier)
5388 return -EOPNOTSUPP;
5389 if (!netif_device_present(dev))
5390 return -ENODEV;
5391 return ops->ndo_change_carrier(dev, new_carrier);
5392}
5393EXPORT_SYMBOL(dev_change_carrier);
5394
66b52b0d
JP
5395/**
5396 * dev_get_phys_port_id - Get device physical port ID
5397 * @dev: device
5398 * @ppid: port ID
5399 *
5400 * Get device physical port ID
5401 */
5402int dev_get_phys_port_id(struct net_device *dev,
5403 struct netdev_phys_port_id *ppid)
5404{
5405 const struct net_device_ops *ops = dev->netdev_ops;
5406
5407 if (!ops->ndo_get_phys_port_id)
5408 return -EOPNOTSUPP;
5409 return ops->ndo_get_phys_port_id(dev, ppid);
5410}
5411EXPORT_SYMBOL(dev_get_phys_port_id);
5412
1da177e4
LT
5413/**
5414 * dev_new_index - allocate an ifindex
c4ea43c5 5415 * @net: the applicable net namespace
1da177e4
LT
5416 *
5417 * Returns a suitable unique value for a new device interface
5418 * number. The caller must hold the rtnl semaphore or the
5419 * dev_base_lock to be sure it remains unique.
5420 */
881d966b 5421static int dev_new_index(struct net *net)
1da177e4 5422{
aa79e66e 5423 int ifindex = net->ifindex;
1da177e4
LT
5424 for (;;) {
5425 if (++ifindex <= 0)
5426 ifindex = 1;
881d966b 5427 if (!__dev_get_by_index(net, ifindex))
aa79e66e 5428 return net->ifindex = ifindex;
1da177e4
LT
5429 }
5430}
5431
1da177e4 5432/* Delayed registration/unregisteration */
3b5b34fd 5433static LIST_HEAD(net_todo_list);
50624c93 5434static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
1da177e4 5435
6f05f629 5436static void net_set_todo(struct net_device *dev)
1da177e4 5437{
1da177e4 5438 list_add_tail(&dev->todo_list, &net_todo_list);
50624c93 5439 dev_net(dev)->dev_unreg_count++;
1da177e4
LT
5440}
5441
9b5e383c 5442static void rollback_registered_many(struct list_head *head)
93ee31f1 5443{
e93737b0 5444 struct net_device *dev, *tmp;
5cde2829 5445 LIST_HEAD(close_head);
9b5e383c 5446
93ee31f1
DL
5447 BUG_ON(dev_boot_phase);
5448 ASSERT_RTNL();
5449
e93737b0 5450 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 5451 /* Some devices call without registering
e93737b0
KK
5452 * for initialization unwind. Remove those
5453 * devices and proceed with the remaining.
9b5e383c
ED
5454 */
5455 if (dev->reg_state == NETREG_UNINITIALIZED) {
7b6cd1ce
JP
5456 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5457 dev->name, dev);
93ee31f1 5458
9b5e383c 5459 WARN_ON(1);
e93737b0
KK
5460 list_del(&dev->unreg_list);
5461 continue;
9b5e383c 5462 }
449f4544 5463 dev->dismantle = true;
9b5e383c 5464 BUG_ON(dev->reg_state != NETREG_REGISTERED);
44345724 5465 }
93ee31f1 5466
44345724 5467 /* If device is running, close it first. */
5cde2829
EB
5468 list_for_each_entry(dev, head, unreg_list)
5469 list_add_tail(&dev->close_list, &close_head);
5470 dev_close_many(&close_head);
93ee31f1 5471
44345724 5472 list_for_each_entry(dev, head, unreg_list) {
9b5e383c
ED
5473 /* And unlink it from device chain. */
5474 unlist_netdevice(dev);
93ee31f1 5475
9b5e383c
ED
5476 dev->reg_state = NETREG_UNREGISTERING;
5477 }
93ee31f1
DL
5478
5479 synchronize_net();
5480
9b5e383c
ED
5481 list_for_each_entry(dev, head, unreg_list) {
5482 /* Shutdown queueing discipline. */
5483 dev_shutdown(dev);
93ee31f1
DL
5484
5485
9b5e383c
ED
5486 /* Notify protocols, that we are about to destroy
5487 this device. They should clean all the things.
5488 */
5489 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 5490
a2835763
PM
5491 if (!dev->rtnl_link_ops ||
5492 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7f294054 5493 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
a2835763 5494
9b5e383c
ED
5495 /*
5496 * Flush the unicast and multicast chains
5497 */
a748ee24 5498 dev_uc_flush(dev);
22bedad3 5499 dev_mc_flush(dev);
93ee31f1 5500
9b5e383c
ED
5501 if (dev->netdev_ops->ndo_uninit)
5502 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 5503
9ff162a8
JP
5504 /* Notifier chain MUST detach us all upper devices. */
5505 WARN_ON(netdev_has_any_upper_dev(dev));
93ee31f1 5506
9b5e383c
ED
5507 /* Remove entries from kobject tree */
5508 netdev_unregister_kobject(dev);
024e9679
AD
5509#ifdef CONFIG_XPS
5510 /* Remove XPS queueing entries */
5511 netif_reset_xps_queues_gt(dev, 0);
5512#endif
9b5e383c 5513 }
93ee31f1 5514
850a545b 5515 synchronize_net();
395264d5 5516
a5ee1551 5517 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
5518 dev_put(dev);
5519}
5520
5521static void rollback_registered(struct net_device *dev)
5522{
5523 LIST_HEAD(single);
5524
5525 list_add(&dev->unreg_list, &single);
5526 rollback_registered_many(&single);
ceaaec98 5527 list_del(&single);
93ee31f1
DL
5528}
5529
c8f44aff
MM
5530static netdev_features_t netdev_fix_features(struct net_device *dev,
5531 netdev_features_t features)
b63365a2 5532{
57422dc5
MM
5533 /* Fix illegal checksum combinations */
5534 if ((features & NETIF_F_HW_CSUM) &&
5535 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 5536 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
57422dc5
MM
5537 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5538 }
5539
b63365a2 5540 /* TSO requires that SG is present as well. */
ea2d3688 5541 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6f404e44 5542 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
ea2d3688 5543 features &= ~NETIF_F_ALL_TSO;
b63365a2
HX
5544 }
5545
ec5f0615
PS
5546 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
5547 !(features & NETIF_F_IP_CSUM)) {
5548 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
5549 features &= ~NETIF_F_TSO;
5550 features &= ~NETIF_F_TSO_ECN;
5551 }
5552
5553 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
5554 !(features & NETIF_F_IPV6_CSUM)) {
5555 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
5556 features &= ~NETIF_F_TSO6;
5557 }
5558
31d8b9e0
BH
5559 /* TSO ECN requires that TSO is present as well. */
5560 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5561 features &= ~NETIF_F_TSO_ECN;
5562
212b573f
MM
5563 /* Software GSO depends on SG. */
5564 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6f404e44 5565 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
212b573f
MM
5566 features &= ~NETIF_F_GSO;
5567 }
5568
acd1130e 5569 /* UFO needs SG and checksumming */
b63365a2 5570 if (features & NETIF_F_UFO) {
79032644
MM
5571 /* maybe split UFO into V4 and V6? */
5572 if (!((features & NETIF_F_GEN_CSUM) ||
5573 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5574 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 5575 netdev_dbg(dev,
acd1130e 5576 "Dropping NETIF_F_UFO since no checksum offload features.\n");
b63365a2
HX
5577 features &= ~NETIF_F_UFO;
5578 }
5579
5580 if (!(features & NETIF_F_SG)) {
6f404e44 5581 netdev_dbg(dev,
acd1130e 5582 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
b63365a2
HX
5583 features &= ~NETIF_F_UFO;
5584 }
5585 }
5586
5587 return features;
5588}
b63365a2 5589
6cb6a27c 5590int __netdev_update_features(struct net_device *dev)
5455c699 5591{
c8f44aff 5592 netdev_features_t features;
5455c699
MM
5593 int err = 0;
5594
87267485
MM
5595 ASSERT_RTNL();
5596
5455c699
MM
5597 features = netdev_get_wanted_features(dev);
5598
5599 if (dev->netdev_ops->ndo_fix_features)
5600 features = dev->netdev_ops->ndo_fix_features(dev, features);
5601
5602 /* driver might be less strict about feature dependencies */
5603 features = netdev_fix_features(dev, features);
5604
5605 if (dev->features == features)
6cb6a27c 5606 return 0;
5455c699 5607
c8f44aff
MM
5608 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5609 &dev->features, &features);
5455c699
MM
5610
5611 if (dev->netdev_ops->ndo_set_features)
5612 err = dev->netdev_ops->ndo_set_features(dev, features);
5613
6cb6a27c 5614 if (unlikely(err < 0)) {
5455c699 5615 netdev_err(dev,
c8f44aff
MM
5616 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5617 err, &features, &dev->features);
6cb6a27c
MM
5618 return -1;
5619 }
5620
5621 if (!err)
5622 dev->features = features;
5623
5624 return 1;
5625}
5626
afe12cc8
MM
5627/**
5628 * netdev_update_features - recalculate device features
5629 * @dev: the device to check
5630 *
5631 * Recalculate dev->features set and send notifications if it
5632 * has changed. Should be called after driver or hardware dependent
5633 * conditions might have changed that influence the features.
5634 */
6cb6a27c
MM
5635void netdev_update_features(struct net_device *dev)
5636{
5637 if (__netdev_update_features(dev))
5638 netdev_features_change(dev);
5455c699
MM
5639}
5640EXPORT_SYMBOL(netdev_update_features);
5641
afe12cc8
MM
5642/**
5643 * netdev_change_features - recalculate device features
5644 * @dev: the device to check
5645 *
5646 * Recalculate dev->features set and send notifications even
5647 * if they have not changed. Should be called instead of
5648 * netdev_update_features() if also dev->vlan_features might
5649 * have changed to allow the changes to be propagated to stacked
5650 * VLAN devices.
5651 */
5652void netdev_change_features(struct net_device *dev)
5653{
5654 __netdev_update_features(dev);
5655 netdev_features_change(dev);
5656}
5657EXPORT_SYMBOL(netdev_change_features);
5658
fc4a7489
PM
5659/**
5660 * netif_stacked_transfer_operstate - transfer operstate
5661 * @rootdev: the root or lower level device to transfer state from
5662 * @dev: the device to transfer operstate to
5663 *
5664 * Transfer operational state from root to device. This is normally
5665 * called when a stacking relationship exists between the root
5666 * device and the device(a leaf device).
5667 */
5668void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5669 struct net_device *dev)
5670{
5671 if (rootdev->operstate == IF_OPER_DORMANT)
5672 netif_dormant_on(dev);
5673 else
5674 netif_dormant_off(dev);
5675
5676 if (netif_carrier_ok(rootdev)) {
5677 if (!netif_carrier_ok(dev))
5678 netif_carrier_on(dev);
5679 } else {
5680 if (netif_carrier_ok(dev))
5681 netif_carrier_off(dev);
5682 }
5683}
5684EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5685
bf264145 5686#ifdef CONFIG_RPS
1b4bf461
ED
5687static int netif_alloc_rx_queues(struct net_device *dev)
5688{
1b4bf461 5689 unsigned int i, count = dev->num_rx_queues;
bd25fa7b 5690 struct netdev_rx_queue *rx;
1b4bf461 5691
bd25fa7b 5692 BUG_ON(count < 1);
1b4bf461 5693
bd25fa7b 5694 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
62b5942a 5695 if (!rx)
bd25fa7b 5696 return -ENOMEM;
62b5942a 5697
bd25fa7b
TH
5698 dev->_rx = rx;
5699
bd25fa7b 5700 for (i = 0; i < count; i++)
fe822240 5701 rx[i].dev = dev;
1b4bf461
ED
5702 return 0;
5703}
bf264145 5704#endif
1b4bf461 5705
aa942104
CG
5706static void netdev_init_one_queue(struct net_device *dev,
5707 struct netdev_queue *queue, void *_unused)
5708{
5709 /* Initialize queue lock */
5710 spin_lock_init(&queue->_xmit_lock);
5711 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5712 queue->xmit_lock_owner = -1;
b236da69 5713 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
aa942104 5714 queue->dev = dev;
114cf580
TH
5715#ifdef CONFIG_BQL
5716 dql_init(&queue->dql, HZ);
5717#endif
aa942104
CG
5718}
5719
60877a32
ED
5720static void netif_free_tx_queues(struct net_device *dev)
5721{
5722 if (is_vmalloc_addr(dev->_tx))
5723 vfree(dev->_tx);
5724 else
5725 kfree(dev->_tx);
5726}
5727
e6484930
TH
5728static int netif_alloc_netdev_queues(struct net_device *dev)
5729{
5730 unsigned int count = dev->num_tx_queues;
5731 struct netdev_queue *tx;
60877a32 5732 size_t sz = count * sizeof(*tx);
e6484930 5733
60877a32 5734 BUG_ON(count < 1 || count > 0xffff);
62b5942a 5735
60877a32
ED
5736 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
5737 if (!tx) {
5738 tx = vzalloc(sz);
5739 if (!tx)
5740 return -ENOMEM;
5741 }
e6484930 5742 dev->_tx = tx;
1d24eb48 5743
e6484930
TH
5744 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5745 spin_lock_init(&dev->tx_global_lock);
aa942104
CG
5746
5747 return 0;
e6484930
TH
5748}
5749
1da177e4
LT
5750/**
5751 * register_netdevice - register a network device
5752 * @dev: device to register
5753 *
5754 * Take a completed network device structure and add it to the kernel
5755 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5756 * chain. 0 is returned on success. A negative errno code is returned
5757 * on a failure to set up the device, or if the name is a duplicate.
5758 *
5759 * Callers must hold the rtnl semaphore. You may want
5760 * register_netdev() instead of this.
5761 *
5762 * BUGS:
5763 * The locking appears insufficient to guarantee two parallel registers
5764 * will not get the same name.
5765 */
5766
5767int register_netdevice(struct net_device *dev)
5768{
1da177e4 5769 int ret;
d314774c 5770 struct net *net = dev_net(dev);
1da177e4
LT
5771
5772 BUG_ON(dev_boot_phase);
5773 ASSERT_RTNL();
5774
b17a7c17
SH
5775 might_sleep();
5776
1da177e4
LT
5777 /* When net_device's are persistent, this will be fatal. */
5778 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 5779 BUG_ON(!net);
1da177e4 5780
f1f28aa3 5781 spin_lock_init(&dev->addr_list_lock);
cf508b12 5782 netdev_set_addr_lockdep_class(dev);
1da177e4 5783
1da177e4
LT
5784 dev->iflink = -1;
5785
828de4f6 5786 ret = dev_get_valid_name(net, dev, dev->name);
0696c3a8
PP
5787 if (ret < 0)
5788 goto out;
5789
1da177e4 5790 /* Init, if this function is available */
d314774c
SH
5791 if (dev->netdev_ops->ndo_init) {
5792 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
5793 if (ret) {
5794 if (ret > 0)
5795 ret = -EIO;
90833aa4 5796 goto out;
1da177e4
LT
5797 }
5798 }
4ec93edb 5799
f646968f
PM
5800 if (((dev->hw_features | dev->features) &
5801 NETIF_F_HW_VLAN_CTAG_FILTER) &&
d2ed273d
MM
5802 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
5803 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
5804 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
5805 ret = -EINVAL;
5806 goto err_uninit;
5807 }
5808
9c7dafbf
PE
5809 ret = -EBUSY;
5810 if (!dev->ifindex)
5811 dev->ifindex = dev_new_index(net);
5812 else if (__dev_get_by_index(net, dev->ifindex))
5813 goto err_uninit;
5814
1da177e4
LT
5815 if (dev->iflink == -1)
5816 dev->iflink = dev->ifindex;
5817
5455c699
MM
5818 /* Transfer changeable features to wanted_features and enable
5819 * software offloads (GSO and GRO).
5820 */
5821 dev->hw_features |= NETIF_F_SOFT_FEATURES;
14d1232f
MM
5822 dev->features |= NETIF_F_SOFT_FEATURES;
5823 dev->wanted_features = dev->features & dev->hw_features;
1da177e4 5824
34324dc2
MM
5825 if (!(dev->flags & IFF_LOOPBACK)) {
5826 dev->hw_features |= NETIF_F_NOCACHE_COPY;
c6e1a0d1
TH
5827 }
5828
1180e7d6 5829 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
16c3ea78 5830 */
1180e7d6 5831 dev->vlan_features |= NETIF_F_HIGHDMA;
16c3ea78 5832
ee579677
PS
5833 /* Make NETIF_F_SG inheritable to tunnel devices.
5834 */
5835 dev->hw_enc_features |= NETIF_F_SG;
5836
0d89d203
SH
5837 /* Make NETIF_F_SG inheritable to MPLS.
5838 */
5839 dev->mpls_features |= NETIF_F_SG;
5840
7ffbe3fd
JB
5841 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5842 ret = notifier_to_errno(ret);
5843 if (ret)
5844 goto err_uninit;
5845
8b41d188 5846 ret = netdev_register_kobject(dev);
b17a7c17 5847 if (ret)
7ce1b0ed 5848 goto err_uninit;
b17a7c17
SH
5849 dev->reg_state = NETREG_REGISTERED;
5850
6cb6a27c 5851 __netdev_update_features(dev);
8e9b59b2 5852
1da177e4
LT
5853 /*
5854 * Default initial state at registry is that the
5855 * device is present.
5856 */
5857
5858 set_bit(__LINK_STATE_PRESENT, &dev->state);
5859
8f4cccbb
BH
5860 linkwatch_init_dev(dev);
5861
1da177e4 5862 dev_init_scheduler(dev);
1da177e4 5863 dev_hold(dev);
ce286d32 5864 list_netdevice(dev);
7bf23575 5865 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 5866
948b337e
JP
5867 /* If the device has permanent device address, driver should
5868 * set dev_addr and also addr_assign_type should be set to
5869 * NET_ADDR_PERM (default value).
5870 */
5871 if (dev->addr_assign_type == NET_ADDR_PERM)
5872 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5873
1da177e4 5874 /* Notify protocols, that a new device appeared. */
056925ab 5875 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 5876 ret = notifier_to_errno(ret);
93ee31f1
DL
5877 if (ret) {
5878 rollback_registered(dev);
5879 dev->reg_state = NETREG_UNREGISTERED;
5880 }
d90a909e
EB
5881 /*
5882 * Prevent userspace races by waiting until the network
5883 * device is fully setup before sending notifications.
5884 */
a2835763
PM
5885 if (!dev->rtnl_link_ops ||
5886 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7f294054 5887 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
1da177e4
LT
5888
5889out:
5890 return ret;
7ce1b0ed
HX
5891
5892err_uninit:
d314774c
SH
5893 if (dev->netdev_ops->ndo_uninit)
5894 dev->netdev_ops->ndo_uninit(dev);
7ce1b0ed 5895 goto out;
1da177e4 5896}
d1b19dff 5897EXPORT_SYMBOL(register_netdevice);
1da177e4 5898
937f1ba5
BH
5899/**
5900 * init_dummy_netdev - init a dummy network device for NAPI
5901 * @dev: device to init
5902 *
5903 * This takes a network device structure and initialize the minimum
5904 * amount of fields so it can be used to schedule NAPI polls without
5905 * registering a full blown interface. This is to be used by drivers
5906 * that need to tie several hardware interfaces to a single NAPI
5907 * poll scheduler due to HW limitations.
5908 */
5909int init_dummy_netdev(struct net_device *dev)
5910{
5911 /* Clear everything. Note we don't initialize spinlocks
5912 * are they aren't supposed to be taken by any of the
5913 * NAPI code and this dummy netdev is supposed to be
5914 * only ever used for NAPI polls
5915 */
5916 memset(dev, 0, sizeof(struct net_device));
5917
5918 /* make sure we BUG if trying to hit standard
5919 * register/unregister code path
5920 */
5921 dev->reg_state = NETREG_DUMMY;
5922
937f1ba5
BH
5923 /* NAPI wants this */
5924 INIT_LIST_HEAD(&dev->napi_list);
5925
5926 /* a dummy interface is started by default */
5927 set_bit(__LINK_STATE_PRESENT, &dev->state);
5928 set_bit(__LINK_STATE_START, &dev->state);
5929
29b4433d
ED
5930 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5931 * because users of this 'device' dont need to change
5932 * its refcount.
5933 */
5934
937f1ba5
BH
5935 return 0;
5936}
5937EXPORT_SYMBOL_GPL(init_dummy_netdev);
5938
5939
1da177e4
LT
5940/**
5941 * register_netdev - register a network device
5942 * @dev: device to register
5943 *
5944 * Take a completed network device structure and add it to the kernel
5945 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5946 * chain. 0 is returned on success. A negative errno code is returned
5947 * on a failure to set up the device, or if the name is a duplicate.
5948 *
38b4da38 5949 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
5950 * and expands the device name if you passed a format string to
5951 * alloc_netdev.
5952 */
5953int register_netdev(struct net_device *dev)
5954{
5955 int err;
5956
5957 rtnl_lock();
1da177e4 5958 err = register_netdevice(dev);
1da177e4
LT
5959 rtnl_unlock();
5960 return err;
5961}
5962EXPORT_SYMBOL(register_netdev);
5963
29b4433d
ED
5964int netdev_refcnt_read(const struct net_device *dev)
5965{
5966 int i, refcnt = 0;
5967
5968 for_each_possible_cpu(i)
5969 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5970 return refcnt;
5971}
5972EXPORT_SYMBOL(netdev_refcnt_read);
5973
2c53040f 5974/**
1da177e4 5975 * netdev_wait_allrefs - wait until all references are gone.
3de7a37b 5976 * @dev: target net_device
1da177e4
LT
5977 *
5978 * This is called when unregistering network devices.
5979 *
5980 * Any protocol or device that holds a reference should register
5981 * for netdevice notification, and cleanup and put back the
5982 * reference if they receive an UNREGISTER event.
5983 * We can get stuck here if buggy protocols don't correctly
4ec93edb 5984 * call dev_put.
1da177e4
LT
5985 */
5986static void netdev_wait_allrefs(struct net_device *dev)
5987{
5988 unsigned long rebroadcast_time, warning_time;
29b4433d 5989 int refcnt;
1da177e4 5990
e014debe
ED
5991 linkwatch_forget_dev(dev);
5992
1da177e4 5993 rebroadcast_time = warning_time = jiffies;
29b4433d
ED
5994 refcnt = netdev_refcnt_read(dev);
5995
5996 while (refcnt != 0) {
1da177e4 5997 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 5998 rtnl_lock();
1da177e4
LT
5999
6000 /* Rebroadcast unregister notification */
056925ab 6001 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
1da177e4 6002
748e2d93 6003 __rtnl_unlock();
0115e8e3 6004 rcu_barrier();
748e2d93
ED
6005 rtnl_lock();
6006
0115e8e3 6007 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
1da177e4
LT
6008 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6009 &dev->state)) {
6010 /* We must not have linkwatch events
6011 * pending on unregister. If this
6012 * happens, we simply run the queue
6013 * unscheduled, resulting in a noop
6014 * for this device.
6015 */
6016 linkwatch_run_queue();
6017 }
6018
6756ae4b 6019 __rtnl_unlock();
1da177e4
LT
6020
6021 rebroadcast_time = jiffies;
6022 }
6023
6024 msleep(250);
6025
29b4433d
ED
6026 refcnt = netdev_refcnt_read(dev);
6027
1da177e4 6028 if (time_after(jiffies, warning_time + 10 * HZ)) {
7b6cd1ce
JP
6029 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6030 dev->name, refcnt);
1da177e4
LT
6031 warning_time = jiffies;
6032 }
6033 }
6034}
6035
6036/* The sequence is:
6037 *
6038 * rtnl_lock();
6039 * ...
6040 * register_netdevice(x1);
6041 * register_netdevice(x2);
6042 * ...
6043 * unregister_netdevice(y1);
6044 * unregister_netdevice(y2);
6045 * ...
6046 * rtnl_unlock();
6047 * free_netdev(y1);
6048 * free_netdev(y2);
6049 *
58ec3b4d 6050 * We are invoked by rtnl_unlock().
1da177e4 6051 * This allows us to deal with problems:
b17a7c17 6052 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
6053 * without deadlocking with linkwatch via keventd.
6054 * 2) Since we run with the RTNL semaphore not held, we can sleep
6055 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
6056 *
6057 * We must not return until all unregister events added during
6058 * the interval the lock was held have been completed.
1da177e4 6059 */
1da177e4
LT
6060void netdev_run_todo(void)
6061{
626ab0e6 6062 struct list_head list;
1da177e4 6063
1da177e4 6064 /* Snapshot list, allow later requests */
626ab0e6 6065 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
6066
6067 __rtnl_unlock();
626ab0e6 6068
0115e8e3
ED
6069
6070 /* Wait for rcu callbacks to finish before next phase */
850a545b
EB
6071 if (!list_empty(&list))
6072 rcu_barrier();
6073
1da177e4
LT
6074 while (!list_empty(&list)) {
6075 struct net_device *dev
e5e26d75 6076 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
6077 list_del(&dev->todo_list);
6078
748e2d93 6079 rtnl_lock();
0115e8e3 6080 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
748e2d93 6081 __rtnl_unlock();
0115e8e3 6082
b17a7c17 6083 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
7b6cd1ce 6084 pr_err("network todo '%s' but state %d\n",
b17a7c17
SH
6085 dev->name, dev->reg_state);
6086 dump_stack();
6087 continue;
6088 }
1da177e4 6089
b17a7c17 6090 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 6091
152102c7 6092 on_each_cpu(flush_backlog, dev, 1);
6e583ce5 6093
b17a7c17 6094 netdev_wait_allrefs(dev);
1da177e4 6095
b17a7c17 6096 /* paranoia */
29b4433d 6097 BUG_ON(netdev_refcnt_read(dev));
33d480ce
ED
6098 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6099 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
547b792c 6100 WARN_ON(dev->dn_ptr);
1da177e4 6101
b17a7c17
SH
6102 if (dev->destructor)
6103 dev->destructor(dev);
9093bbb2 6104
50624c93
EB
6105 /* Report a network device has been unregistered */
6106 rtnl_lock();
6107 dev_net(dev)->dev_unreg_count--;
6108 __rtnl_unlock();
6109 wake_up(&netdev_unregistering_wq);
6110
9093bbb2
SH
6111 /* Free network device */
6112 kobject_put(&dev->dev.kobj);
1da177e4 6113 }
1da177e4
LT
6114}
6115
3cfde79c
BH
6116/* Convert net_device_stats to rtnl_link_stats64. They have the same
6117 * fields in the same order, with only the type differing.
6118 */
77a1abf5
ED
6119void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6120 const struct net_device_stats *netdev_stats)
3cfde79c
BH
6121{
6122#if BITS_PER_LONG == 64
77a1abf5
ED
6123 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6124 memcpy(stats64, netdev_stats, sizeof(*stats64));
3cfde79c
BH
6125#else
6126 size_t i, n = sizeof(*stats64) / sizeof(u64);
6127 const unsigned long *src = (const unsigned long *)netdev_stats;
6128 u64 *dst = (u64 *)stats64;
6129
6130 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6131 sizeof(*stats64) / sizeof(u64));
6132 for (i = 0; i < n; i++)
6133 dst[i] = src[i];
6134#endif
6135}
77a1abf5 6136EXPORT_SYMBOL(netdev_stats_to_stats64);
3cfde79c 6137
eeda3fd6
SH
6138/**
6139 * dev_get_stats - get network device statistics
6140 * @dev: device to get statistics from
28172739 6141 * @storage: place to store stats
eeda3fd6 6142 *
d7753516
BH
6143 * Get network statistics from device. Return @storage.
6144 * The device driver may provide its own method by setting
6145 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6146 * otherwise the internal statistics structure is used.
eeda3fd6 6147 */
d7753516
BH
6148struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6149 struct rtnl_link_stats64 *storage)
7004bf25 6150{
eeda3fd6
SH
6151 const struct net_device_ops *ops = dev->netdev_ops;
6152
28172739
ED
6153 if (ops->ndo_get_stats64) {
6154 memset(storage, 0, sizeof(*storage));
caf586e5
ED
6155 ops->ndo_get_stats64(dev, storage);
6156 } else if (ops->ndo_get_stats) {
3cfde79c 6157 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
caf586e5
ED
6158 } else {
6159 netdev_stats_to_stats64(storage, &dev->stats);
28172739 6160 }
caf586e5 6161 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
28172739 6162 return storage;
c45d286e 6163}
eeda3fd6 6164EXPORT_SYMBOL(dev_get_stats);
c45d286e 6165
24824a09 6166struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
dc2b4847 6167{
24824a09 6168 struct netdev_queue *queue = dev_ingress_queue(dev);
dc2b4847 6169
24824a09
ED
6170#ifdef CONFIG_NET_CLS_ACT
6171 if (queue)
6172 return queue;
6173 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6174 if (!queue)
6175 return NULL;
6176 netdev_init_one_queue(dev, queue, NULL);
24824a09
ED
6177 queue->qdisc = &noop_qdisc;
6178 queue->qdisc_sleeping = &noop_qdisc;
6179 rcu_assign_pointer(dev->ingress_queue, queue);
6180#endif
6181 return queue;
bb949fbd
DM
6182}
6183
2c60db03
ED
6184static const struct ethtool_ops default_ethtool_ops;
6185
d07d7507
SG
6186void netdev_set_default_ethtool_ops(struct net_device *dev,
6187 const struct ethtool_ops *ops)
6188{
6189 if (dev->ethtool_ops == &default_ethtool_ops)
6190 dev->ethtool_ops = ops;
6191}
6192EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6193
74d332c1
ED
6194void netdev_freemem(struct net_device *dev)
6195{
6196 char *addr = (char *)dev - dev->padded;
6197
6198 if (is_vmalloc_addr(addr))
6199 vfree(addr);
6200 else
6201 kfree(addr);
6202}
6203
1da177e4 6204/**
36909ea4 6205 * alloc_netdev_mqs - allocate network device
1da177e4
LT
6206 * @sizeof_priv: size of private data to allocate space for
6207 * @name: device name format string
6208 * @setup: callback to initialize device
36909ea4
TH
6209 * @txqs: the number of TX subqueues to allocate
6210 * @rxqs: the number of RX subqueues to allocate
1da177e4
LT
6211 *
6212 * Allocates a struct net_device with private data area for driver use
f25f4e44 6213 * and performs basic initialization. Also allocates subquue structs
36909ea4 6214 * for each queue on the device.
1da177e4 6215 */
36909ea4
TH
6216struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6217 void (*setup)(struct net_device *),
6218 unsigned int txqs, unsigned int rxqs)
1da177e4 6219{
1da177e4 6220 struct net_device *dev;
7943986c 6221 size_t alloc_size;
1ce8e7b5 6222 struct net_device *p;
1da177e4 6223
b6fe17d6
SH
6224 BUG_ON(strlen(name) >= sizeof(dev->name));
6225
36909ea4 6226 if (txqs < 1) {
7b6cd1ce 6227 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
55513fb4
TH
6228 return NULL;
6229 }
6230
36909ea4
TH
6231#ifdef CONFIG_RPS
6232 if (rxqs < 1) {
7b6cd1ce 6233 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
36909ea4
TH
6234 return NULL;
6235 }
6236#endif
6237
fd2ea0a7 6238 alloc_size = sizeof(struct net_device);
d1643d24
AD
6239 if (sizeof_priv) {
6240 /* ensure 32-byte alignment of private area */
1ce8e7b5 6241 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
6242 alloc_size += sizeof_priv;
6243 }
6244 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 6245 alloc_size += NETDEV_ALIGN - 1;
1da177e4 6246
74d332c1
ED
6247 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6248 if (!p)
6249 p = vzalloc(alloc_size);
62b5942a 6250 if (!p)
1da177e4 6251 return NULL;
1da177e4 6252
1ce8e7b5 6253 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 6254 dev->padded = (char *)dev - (char *)p;
ab9c73cc 6255
29b4433d
ED
6256 dev->pcpu_refcnt = alloc_percpu(int);
6257 if (!dev->pcpu_refcnt)
74d332c1 6258 goto free_dev;
ab9c73cc 6259
ab9c73cc 6260 if (dev_addr_init(dev))
29b4433d 6261 goto free_pcpu;
ab9c73cc 6262
22bedad3 6263 dev_mc_init(dev);
a748ee24 6264 dev_uc_init(dev);
ccffad25 6265
c346dca1 6266 dev_net_set(dev, &init_net);
1da177e4 6267
8d3bdbd5 6268 dev->gso_max_size = GSO_MAX_SIZE;
30b678d8 6269 dev->gso_max_segs = GSO_MAX_SEGS;
8d3bdbd5 6270
8d3bdbd5
DM
6271 INIT_LIST_HEAD(&dev->napi_list);
6272 INIT_LIST_HEAD(&dev->unreg_list);
5cde2829 6273 INIT_LIST_HEAD(&dev->close_list);
8d3bdbd5 6274 INIT_LIST_HEAD(&dev->link_watch_list);
2f268f12
VF
6275 INIT_LIST_HEAD(&dev->adj_list.upper);
6276 INIT_LIST_HEAD(&dev->adj_list.lower);
6277 INIT_LIST_HEAD(&dev->all_adj_list.upper);
6278 INIT_LIST_HEAD(&dev->all_adj_list.lower);
8d3bdbd5
DM
6279 dev->priv_flags = IFF_XMIT_DST_RELEASE;
6280 setup(dev);
6281
36909ea4
TH
6282 dev->num_tx_queues = txqs;
6283 dev->real_num_tx_queues = txqs;
ed9af2e8 6284 if (netif_alloc_netdev_queues(dev))
8d3bdbd5 6285 goto free_all;
e8a0464c 6286
df334545 6287#ifdef CONFIG_RPS
36909ea4
TH
6288 dev->num_rx_queues = rxqs;
6289 dev->real_num_rx_queues = rxqs;
fe822240 6290 if (netif_alloc_rx_queues(dev))
8d3bdbd5 6291 goto free_all;
df334545 6292#endif
0a9627f2 6293
1da177e4 6294 strcpy(dev->name, name);
cbda10fa 6295 dev->group = INIT_NETDEV_GROUP;
2c60db03
ED
6296 if (!dev->ethtool_ops)
6297 dev->ethtool_ops = &default_ethtool_ops;
1da177e4 6298 return dev;
ab9c73cc 6299
8d3bdbd5
DM
6300free_all:
6301 free_netdev(dev);
6302 return NULL;
6303
29b4433d
ED
6304free_pcpu:
6305 free_percpu(dev->pcpu_refcnt);
60877a32 6306 netif_free_tx_queues(dev);
fe822240
TH
6307#ifdef CONFIG_RPS
6308 kfree(dev->_rx);
6309#endif
6310
74d332c1
ED
6311free_dev:
6312 netdev_freemem(dev);
ab9c73cc 6313 return NULL;
1da177e4 6314}
36909ea4 6315EXPORT_SYMBOL(alloc_netdev_mqs);
1da177e4
LT
6316
6317/**
6318 * free_netdev - free network device
6319 * @dev: device
6320 *
4ec93edb
YH
6321 * This function does the last stage of destroying an allocated device
6322 * interface. The reference to the device object is released.
1da177e4
LT
6323 * If this is the last reference then it will be freed.
6324 */
6325void free_netdev(struct net_device *dev)
6326{
d565b0a1
HX
6327 struct napi_struct *p, *n;
6328
f3005d7f
DL
6329 release_net(dev_net(dev));
6330
60877a32 6331 netif_free_tx_queues(dev);
fe822240
TH
6332#ifdef CONFIG_RPS
6333 kfree(dev->_rx);
6334#endif
e8a0464c 6335
33d480ce 6336 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
24824a09 6337
f001fde5
JP
6338 /* Flush device addresses */
6339 dev_addr_flush(dev);
6340
d565b0a1
HX
6341 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6342 netif_napi_del(p);
6343
29b4433d
ED
6344 free_percpu(dev->pcpu_refcnt);
6345 dev->pcpu_refcnt = NULL;
6346
3041a069 6347 /* Compatibility with error handling in drivers */
1da177e4 6348 if (dev->reg_state == NETREG_UNINITIALIZED) {
74d332c1 6349 netdev_freemem(dev);
1da177e4
LT
6350 return;
6351 }
6352
6353 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6354 dev->reg_state = NETREG_RELEASED;
6355
43cb76d9
GKH
6356 /* will free via device release */
6357 put_device(&dev->dev);
1da177e4 6358}
d1b19dff 6359EXPORT_SYMBOL(free_netdev);
4ec93edb 6360
f0db275a
SH
6361/**
6362 * synchronize_net - Synchronize with packet receive processing
6363 *
6364 * Wait for packets currently being received to be done.
6365 * Does not block later packets from starting.
6366 */
4ec93edb 6367void synchronize_net(void)
1da177e4
LT
6368{
6369 might_sleep();
be3fc413
ED
6370 if (rtnl_is_locked())
6371 synchronize_rcu_expedited();
6372 else
6373 synchronize_rcu();
1da177e4 6374}
d1b19dff 6375EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
6376
6377/**
44a0873d 6378 * unregister_netdevice_queue - remove device from the kernel
1da177e4 6379 * @dev: device
44a0873d 6380 * @head: list
6ebfbc06 6381 *
1da177e4 6382 * This function shuts down a device interface and removes it
d59b54b1 6383 * from the kernel tables.
44a0873d 6384 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
6385 *
6386 * Callers must hold the rtnl semaphore. You may want
6387 * unregister_netdev() instead of this.
6388 */
6389
44a0873d 6390void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 6391{
a6620712
HX
6392 ASSERT_RTNL();
6393
44a0873d 6394 if (head) {
9fdce099 6395 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
6396 } else {
6397 rollback_registered(dev);
6398 /* Finish processing unregister after unlock */
6399 net_set_todo(dev);
6400 }
1da177e4 6401}
44a0873d 6402EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 6403
9b5e383c
ED
6404/**
6405 * unregister_netdevice_many - unregister many devices
6406 * @head: list of devices
9b5e383c
ED
6407 */
6408void unregister_netdevice_many(struct list_head *head)
6409{
6410 struct net_device *dev;
6411
6412 if (!list_empty(head)) {
6413 rollback_registered_many(head);
6414 list_for_each_entry(dev, head, unreg_list)
6415 net_set_todo(dev);
6416 }
6417}
63c8099d 6418EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 6419
1da177e4
LT
6420/**
6421 * unregister_netdev - remove device from the kernel
6422 * @dev: device
6423 *
6424 * This function shuts down a device interface and removes it
d59b54b1 6425 * from the kernel tables.
1da177e4
LT
6426 *
6427 * This is just a wrapper for unregister_netdevice that takes
6428 * the rtnl semaphore. In general you want to use this and not
6429 * unregister_netdevice.
6430 */
6431void unregister_netdev(struct net_device *dev)
6432{
6433 rtnl_lock();
6434 unregister_netdevice(dev);
6435 rtnl_unlock();
6436}
1da177e4
LT
6437EXPORT_SYMBOL(unregister_netdev);
6438
ce286d32
EB
6439/**
6440 * dev_change_net_namespace - move device to different nethost namespace
6441 * @dev: device
6442 * @net: network namespace
6443 * @pat: If not NULL name pattern to try if the current device name
6444 * is already taken in the destination network namespace.
6445 *
6446 * This function shuts down a device interface and moves it
6447 * to a new network namespace. On success 0 is returned, on
6448 * a failure a netagive errno code is returned.
6449 *
6450 * Callers must hold the rtnl semaphore.
6451 */
6452
6453int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6454{
ce286d32
EB
6455 int err;
6456
6457 ASSERT_RTNL();
6458
6459 /* Don't allow namespace local devices to be moved. */
6460 err = -EINVAL;
6461 if (dev->features & NETIF_F_NETNS_LOCAL)
6462 goto out;
6463
6464 /* Ensure the device has been registrered */
ce286d32
EB
6465 if (dev->reg_state != NETREG_REGISTERED)
6466 goto out;
6467
6468 /* Get out if there is nothing todo */
6469 err = 0;
878628fb 6470 if (net_eq(dev_net(dev), net))
ce286d32
EB
6471 goto out;
6472
6473 /* Pick the destination device name, and ensure
6474 * we can use it in the destination network namespace.
6475 */
6476 err = -EEXIST;
d9031024 6477 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
6478 /* We get here if we can't use the current device name */
6479 if (!pat)
6480 goto out;
828de4f6 6481 if (dev_get_valid_name(net, dev, pat) < 0)
ce286d32
EB
6482 goto out;
6483 }
6484
6485 /*
6486 * And now a mini version of register_netdevice unregister_netdevice.
6487 */
6488
6489 /* If device is running close it first. */
9b772652 6490 dev_close(dev);
ce286d32
EB
6491
6492 /* And unlink it from device chain */
6493 err = -ENODEV;
6494 unlist_netdevice(dev);
6495
6496 synchronize_net();
6497
6498 /* Shutdown queueing discipline. */
6499 dev_shutdown(dev);
6500
6501 /* Notify protocols, that we are about to destroy
6502 this device. They should clean all the things.
3b27e105
DL
6503
6504 Note that dev->reg_state stays at NETREG_REGISTERED.
6505 This is wanted because this way 8021q and macvlan know
6506 the device is just moving and can keep their slaves up.
ce286d32
EB
6507 */
6508 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6549dd43
G
6509 rcu_barrier();
6510 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7f294054 6511 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
ce286d32
EB
6512
6513 /*
6514 * Flush the unicast and multicast chains
6515 */
a748ee24 6516 dev_uc_flush(dev);
22bedad3 6517 dev_mc_flush(dev);
ce286d32 6518
4e66ae2e
SH
6519 /* Send a netdev-removed uevent to the old namespace */
6520 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
6521
ce286d32 6522 /* Actually switch the network namespace */
c346dca1 6523 dev_net_set(dev, net);
ce286d32 6524
ce286d32
EB
6525 /* If there is an ifindex conflict assign a new one */
6526 if (__dev_get_by_index(net, dev->ifindex)) {
6527 int iflink = (dev->iflink == dev->ifindex);
6528 dev->ifindex = dev_new_index(net);
6529 if (iflink)
6530 dev->iflink = dev->ifindex;
6531 }
6532
4e66ae2e
SH
6533 /* Send a netdev-add uevent to the new namespace */
6534 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
6535
8b41d188 6536 /* Fixup kobjects */
a1b3f594 6537 err = device_rename(&dev->dev, dev->name);
8b41d188 6538 WARN_ON(err);
ce286d32
EB
6539
6540 /* Add the device back in the hashes */
6541 list_netdevice(dev);
6542
6543 /* Notify protocols, that a new device appeared. */
6544 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6545
d90a909e
EB
6546 /*
6547 * Prevent userspace races by waiting until the network
6548 * device is fully setup before sending notifications.
6549 */
7f294054 6550 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
d90a909e 6551
ce286d32
EB
6552 synchronize_net();
6553 err = 0;
6554out:
6555 return err;
6556}
463d0183 6557EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 6558
1da177e4
LT
6559static int dev_cpu_callback(struct notifier_block *nfb,
6560 unsigned long action,
6561 void *ocpu)
6562{
6563 struct sk_buff **list_skb;
1da177e4
LT
6564 struct sk_buff *skb;
6565 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6566 struct softnet_data *sd, *oldsd;
6567
8bb78442 6568 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1da177e4
LT
6569 return NOTIFY_OK;
6570
6571 local_irq_disable();
6572 cpu = smp_processor_id();
6573 sd = &per_cpu(softnet_data, cpu);
6574 oldsd = &per_cpu(softnet_data, oldcpu);
6575
6576 /* Find end of our completion_queue. */
6577 list_skb = &sd->completion_queue;
6578 while (*list_skb)
6579 list_skb = &(*list_skb)->next;
6580 /* Append completion queue from offline CPU. */
6581 *list_skb = oldsd->completion_queue;
6582 oldsd->completion_queue = NULL;
6583
1da177e4 6584 /* Append output queue from offline CPU. */
a9cbd588
CG
6585 if (oldsd->output_queue) {
6586 *sd->output_queue_tailp = oldsd->output_queue;
6587 sd->output_queue_tailp = oldsd->output_queue_tailp;
6588 oldsd->output_queue = NULL;
6589 oldsd->output_queue_tailp = &oldsd->output_queue;
6590 }
264524d5
HC
6591 /* Append NAPI poll list from offline CPU. */
6592 if (!list_empty(&oldsd->poll_list)) {
6593 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6594 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6595 }
1da177e4
LT
6596
6597 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6598 local_irq_enable();
6599
6600 /* Process offline CPU's input_pkt_queue */
76cc8b13 6601 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
1da177e4 6602 netif_rx(skb);
76cc8b13 6603 input_queue_head_incr(oldsd);
fec5e652 6604 }
76cc8b13 6605 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6e7676c1 6606 netif_rx(skb);
76cc8b13
TH
6607 input_queue_head_incr(oldsd);
6608 }
1da177e4
LT
6609
6610 return NOTIFY_OK;
6611}
1da177e4
LT
6612
6613
7f353bf2 6614/**
b63365a2
HX
6615 * netdev_increment_features - increment feature set by one
6616 * @all: current feature set
6617 * @one: new feature set
6618 * @mask: mask feature set
7f353bf2
HX
6619 *
6620 * Computes a new feature set after adding a device with feature set
b63365a2
HX
6621 * @one to the master device with current feature set @all. Will not
6622 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 6623 */
c8f44aff
MM
6624netdev_features_t netdev_increment_features(netdev_features_t all,
6625 netdev_features_t one, netdev_features_t mask)
b63365a2 6626{
1742f183
MM
6627 if (mask & NETIF_F_GEN_CSUM)
6628 mask |= NETIF_F_ALL_CSUM;
6629 mask |= NETIF_F_VLAN_CHALLENGED;
7f353bf2 6630
1742f183
MM
6631 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6632 all &= one | ~NETIF_F_ALL_FOR_ALL;
c6e1a0d1 6633
1742f183
MM
6634 /* If one device supports hw checksumming, set for all. */
6635 if (all & NETIF_F_GEN_CSUM)
6636 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
7f353bf2
HX
6637
6638 return all;
6639}
b63365a2 6640EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 6641
430f03cd 6642static struct hlist_head * __net_init netdev_create_hash(void)
30d97d35
PE
6643{
6644 int i;
6645 struct hlist_head *hash;
6646
6647 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6648 if (hash != NULL)
6649 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6650 INIT_HLIST_HEAD(&hash[i]);
6651
6652 return hash;
6653}
6654
881d966b 6655/* Initialize per network namespace state */
4665079c 6656static int __net_init netdev_init(struct net *net)
881d966b 6657{
734b6541
RM
6658 if (net != &init_net)
6659 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 6660
30d97d35
PE
6661 net->dev_name_head = netdev_create_hash();
6662 if (net->dev_name_head == NULL)
6663 goto err_name;
881d966b 6664
30d97d35
PE
6665 net->dev_index_head = netdev_create_hash();
6666 if (net->dev_index_head == NULL)
6667 goto err_idx;
881d966b
EB
6668
6669 return 0;
30d97d35
PE
6670
6671err_idx:
6672 kfree(net->dev_name_head);
6673err_name:
6674 return -ENOMEM;
881d966b
EB
6675}
6676
f0db275a
SH
6677/**
6678 * netdev_drivername - network driver for the device
6679 * @dev: network device
f0db275a
SH
6680 *
6681 * Determine network driver for device.
6682 */
3019de12 6683const char *netdev_drivername(const struct net_device *dev)
6579e57b 6684{
cf04a4c7
SH
6685 const struct device_driver *driver;
6686 const struct device *parent;
3019de12 6687 const char *empty = "";
6579e57b
AV
6688
6689 parent = dev->dev.parent;
6579e57b 6690 if (!parent)
3019de12 6691 return empty;
6579e57b
AV
6692
6693 driver = parent->driver;
6694 if (driver && driver->name)
3019de12
DM
6695 return driver->name;
6696 return empty;
6579e57b
AV
6697}
6698
b004ff49 6699static int __netdev_printk(const char *level, const struct net_device *dev,
256df2f3
JP
6700 struct va_format *vaf)
6701{
6702 int r;
6703
b004ff49 6704 if (dev && dev->dev.parent) {
666f355f
JP
6705 r = dev_printk_emit(level[1] - '0',
6706 dev->dev.parent,
6707 "%s %s %s: %pV",
6708 dev_driver_string(dev->dev.parent),
6709 dev_name(dev->dev.parent),
6710 netdev_name(dev), vaf);
b004ff49 6711 } else if (dev) {
256df2f3 6712 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
b004ff49 6713 } else {
256df2f3 6714 r = printk("%s(NULL net_device): %pV", level, vaf);
b004ff49 6715 }
256df2f3
JP
6716
6717 return r;
6718}
6719
6720int netdev_printk(const char *level, const struct net_device *dev,
6721 const char *format, ...)
6722{
6723 struct va_format vaf;
6724 va_list args;
6725 int r;
6726
6727 va_start(args, format);
6728
6729 vaf.fmt = format;
6730 vaf.va = &args;
6731
6732 r = __netdev_printk(level, dev, &vaf);
b004ff49 6733
256df2f3
JP
6734 va_end(args);
6735
6736 return r;
6737}
6738EXPORT_SYMBOL(netdev_printk);
6739
6740#define define_netdev_printk_level(func, level) \
6741int func(const struct net_device *dev, const char *fmt, ...) \
6742{ \
6743 int r; \
6744 struct va_format vaf; \
6745 va_list args; \
6746 \
6747 va_start(args, fmt); \
6748 \
6749 vaf.fmt = fmt; \
6750 vaf.va = &args; \
6751 \
6752 r = __netdev_printk(level, dev, &vaf); \
b004ff49 6753 \
256df2f3
JP
6754 va_end(args); \
6755 \
6756 return r; \
6757} \
6758EXPORT_SYMBOL(func);
6759
6760define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6761define_netdev_printk_level(netdev_alert, KERN_ALERT);
6762define_netdev_printk_level(netdev_crit, KERN_CRIT);
6763define_netdev_printk_level(netdev_err, KERN_ERR);
6764define_netdev_printk_level(netdev_warn, KERN_WARNING);
6765define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6766define_netdev_printk_level(netdev_info, KERN_INFO);
6767
4665079c 6768static void __net_exit netdev_exit(struct net *net)
881d966b
EB
6769{
6770 kfree(net->dev_name_head);
6771 kfree(net->dev_index_head);
6772}
6773
022cbae6 6774static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
6775 .init = netdev_init,
6776 .exit = netdev_exit,
6777};
6778
4665079c 6779static void __net_exit default_device_exit(struct net *net)
ce286d32 6780{
e008b5fc 6781 struct net_device *dev, *aux;
ce286d32 6782 /*
e008b5fc 6783 * Push all migratable network devices back to the
ce286d32
EB
6784 * initial network namespace
6785 */
6786 rtnl_lock();
e008b5fc 6787 for_each_netdev_safe(net, dev, aux) {
ce286d32 6788 int err;
aca51397 6789 char fb_name[IFNAMSIZ];
ce286d32
EB
6790
6791 /* Ignore unmoveable devices (i.e. loopback) */
6792 if (dev->features & NETIF_F_NETNS_LOCAL)
6793 continue;
6794
e008b5fc
EB
6795 /* Leave virtual devices for the generic cleanup */
6796 if (dev->rtnl_link_ops)
6797 continue;
d0c082ce 6798
25985edc 6799 /* Push remaining network devices to init_net */
aca51397
PE
6800 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6801 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 6802 if (err) {
7b6cd1ce
JP
6803 pr_emerg("%s: failed to move %s to init_net: %d\n",
6804 __func__, dev->name, err);
aca51397 6805 BUG();
ce286d32
EB
6806 }
6807 }
6808 rtnl_unlock();
6809}
6810
50624c93
EB
6811static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
6812{
6813 /* Return with the rtnl_lock held when there are no network
6814 * devices unregistering in any network namespace in net_list.
6815 */
6816 struct net *net;
6817 bool unregistering;
6818 DEFINE_WAIT(wait);
6819
6820 for (;;) {
6821 prepare_to_wait(&netdev_unregistering_wq, &wait,
6822 TASK_UNINTERRUPTIBLE);
6823 unregistering = false;
6824 rtnl_lock();
6825 list_for_each_entry(net, net_list, exit_list) {
6826 if (net->dev_unreg_count > 0) {
6827 unregistering = true;
6828 break;
6829 }
6830 }
6831 if (!unregistering)
6832 break;
6833 __rtnl_unlock();
6834 schedule();
6835 }
6836 finish_wait(&netdev_unregistering_wq, &wait);
6837}
6838
04dc7f6b
EB
6839static void __net_exit default_device_exit_batch(struct list_head *net_list)
6840{
6841 /* At exit all network devices most be removed from a network
b595076a 6842 * namespace. Do this in the reverse order of registration.
04dc7f6b
EB
6843 * Do this across as many network namespaces as possible to
6844 * improve batching efficiency.
6845 */
6846 struct net_device *dev;
6847 struct net *net;
6848 LIST_HEAD(dev_kill_list);
6849
50624c93
EB
6850 /* To prevent network device cleanup code from dereferencing
6851 * loopback devices or network devices that have been freed
6852 * wait here for all pending unregistrations to complete,
6853 * before unregistring the loopback device and allowing the
6854 * network namespace be freed.
6855 *
6856 * The netdev todo list containing all network devices
6857 * unregistrations that happen in default_device_exit_batch
6858 * will run in the rtnl_unlock() at the end of
6859 * default_device_exit_batch.
6860 */
6861 rtnl_lock_unregistering(net_list);
04dc7f6b
EB
6862 list_for_each_entry(net, net_list, exit_list) {
6863 for_each_netdev_reverse(net, dev) {
6864 if (dev->rtnl_link_ops)
6865 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6866 else
6867 unregister_netdevice_queue(dev, &dev_kill_list);
6868 }
6869 }
6870 unregister_netdevice_many(&dev_kill_list);
ceaaec98 6871 list_del(&dev_kill_list);
04dc7f6b
EB
6872 rtnl_unlock();
6873}
6874
022cbae6 6875static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 6876 .exit = default_device_exit,
04dc7f6b 6877 .exit_batch = default_device_exit_batch,
ce286d32
EB
6878};
6879
1da177e4
LT
6880/*
6881 * Initialize the DEV module. At boot time this walks the device list and
6882 * unhooks any devices that fail to initialise (normally hardware not
6883 * present) and leaves us with a valid list of present and active devices.
6884 *
6885 */
6886
6887/*
6888 * This is called single threaded during boot, so no need
6889 * to take the rtnl semaphore.
6890 */
6891static int __init net_dev_init(void)
6892{
6893 int i, rc = -ENOMEM;
6894
6895 BUG_ON(!dev_boot_phase);
6896
1da177e4
LT
6897 if (dev_proc_init())
6898 goto out;
6899
8b41d188 6900 if (netdev_kobject_init())
1da177e4
LT
6901 goto out;
6902
6903 INIT_LIST_HEAD(&ptype_all);
82d8a867 6904 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
6905 INIT_LIST_HEAD(&ptype_base[i]);
6906
62532da9
VY
6907 INIT_LIST_HEAD(&offload_base);
6908
881d966b
EB
6909 if (register_pernet_subsys(&netdev_net_ops))
6910 goto out;
1da177e4
LT
6911
6912 /*
6913 * Initialise the packet receive queues.
6914 */
6915
6f912042 6916 for_each_possible_cpu(i) {
e36fa2f7 6917 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 6918
dee42870 6919 memset(sd, 0, sizeof(*sd));
e36fa2f7 6920 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 6921 skb_queue_head_init(&sd->process_queue);
e36fa2f7
ED
6922 sd->completion_queue = NULL;
6923 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588
CG
6924 sd->output_queue = NULL;
6925 sd->output_queue_tailp = &sd->output_queue;
df334545 6926#ifdef CONFIG_RPS
e36fa2f7
ED
6927 sd->csd.func = rps_trigger_softirq;
6928 sd->csd.info = sd;
6929 sd->csd.flags = 0;
6930 sd->cpu = i;
1e94d72f 6931#endif
0a9627f2 6932
e36fa2f7
ED
6933 sd->backlog.poll = process_backlog;
6934 sd->backlog.weight = weight_p;
6935 sd->backlog.gro_list = NULL;
6936 sd->backlog.gro_count = 0;
99bbc707
WB
6937
6938#ifdef CONFIG_NET_FLOW_LIMIT
6939 sd->flow_limit = NULL;
6940#endif
1da177e4
LT
6941 }
6942
1da177e4
LT
6943 dev_boot_phase = 0;
6944
505d4f73
EB
6945 /* The loopback device is special if any other network devices
6946 * is present in a network namespace the loopback device must
6947 * be present. Since we now dynamically allocate and free the
6948 * loopback device ensure this invariant is maintained by
6949 * keeping the loopback device as the first device on the
6950 * list of network devices. Ensuring the loopback devices
6951 * is the first device that appears and the last network device
6952 * that disappears.
6953 */
6954 if (register_pernet_device(&loopback_net_ops))
6955 goto out;
6956
6957 if (register_pernet_device(&default_device_ops))
6958 goto out;
6959
962cf36c
CM
6960 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6961 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4
LT
6962
6963 hotcpu_notifier(dev_cpu_callback, 0);
6964 dst_init();
1da177e4
LT
6965 rc = 0;
6966out:
6967 return rc;
6968}
6969
6970subsys_initcall(net_dev_init);
This page took 1.682151 seconds and 5 git commands to generate.