2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/hash.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/proc_fs.h>
101 #include <linux/seq_file.h>
102 #include <linux/stat.h>
103 #include <linux/if_bridge.h>
104 #include <linux/if_macvlan.h>
106 #include <net/pkt_sched.h>
107 #include <net/checksum.h>
108 #include <net/xfrm.h>
109 #include <linux/highmem.h>
110 #include <linux/init.h>
111 #include <linux/kmod.h>
112 #include <linux/module.h>
113 #include <linux/netpoll.h>
114 #include <linux/rcupdate.h>
115 #include <linux/delay.h>
116 #include <net/wext.h>
117 #include <net/iw_handler.h>
118 #include <asm/current.h>
119 #include <linux/audit.h>
120 #include <linux/dmaengine.h>
121 #include <linux/err.h>
122 #include <linux/ctype.h>
123 #include <linux/if_arp.h>
124 #include <linux/if_vlan.h>
125 #include <linux/ip.h>
127 #include <linux/ipv6.h>
128 #include <linux/in.h>
129 #include <linux/jhash.h>
130 #include <linux/random.h>
131 #include <trace/events/napi.h>
132 #include <linux/pci.h>
134 #include "net-sysfs.h"
136 /* Instead of increasing this, you should create a hash table. */
137 #define MAX_GRO_SKBS 8
139 /* This should be increased if a protocol with a bigger head is added. */
140 #define GRO_MAX_HEAD (MAX_HEADER + 128)
143 * The list of packet types we will receive (as opposed to discard)
144 * and the routines to invoke.
146 * Why 16. Because with 16 the only overlap we get on a hash of the
147 * low nibble of the protocol value is RARP/SNAP/X.25.
149 * NOTE: That is no longer true with the addition of VLAN tags. Not
150 * sure which should go first, but I bet it won't make much
151 * difference if we are running VLANs. The good news is that
152 * this protocol won't be in the list unless compiled in, so
153 * the average user (w/out VLANs) will not be adversely affected.
170 #define PTYPE_HASH_SIZE (16)
171 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
173 static DEFINE_SPINLOCK(ptype_lock
);
174 static struct list_head ptype_base
[PTYPE_HASH_SIZE
] __read_mostly
;
175 static struct list_head ptype_all __read_mostly
; /* Taps */
178 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
181 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
183 * Writers must hold the rtnl semaphore while they loop through the
184 * dev_base_head list, and hold dev_base_lock for writing when they do the
185 * actual updates. This allows pure readers to access the list even
186 * while a writer is preparing to update it.
188 * To put it another way, dev_base_lock is held for writing only to
189 * protect against pure readers; the rtnl semaphore provides the
190 * protection against other writers.
192 * See, for example usages, register_netdevice() and
193 * unregister_netdevice(), which must be called with the rtnl
196 DEFINE_RWLOCK(dev_base_lock
);
197 EXPORT_SYMBOL(dev_base_lock
);
199 static inline struct hlist_head
*dev_name_hash(struct net
*net
, const char *name
)
201 unsigned hash
= full_name_hash(name
, strnlen(name
, IFNAMSIZ
));
202 return &net
->dev_name_head
[hash_32(hash
, NETDEV_HASHBITS
)];
205 static inline struct hlist_head
*dev_index_hash(struct net
*net
, int ifindex
)
207 return &net
->dev_index_head
[ifindex
& (NETDEV_HASHENTRIES
- 1)];
210 static inline void rps_lock(struct softnet_data
*queue
)
213 spin_lock(&queue
->input_pkt_queue
.lock
);
217 static inline void rps_unlock(struct softnet_data
*queue
)
220 spin_unlock(&queue
->input_pkt_queue
.lock
);
224 /* Device list insertion */
225 static int list_netdevice(struct net_device
*dev
)
227 struct net
*net
= dev_net(dev
);
231 write_lock_bh(&dev_base_lock
);
232 list_add_tail_rcu(&dev
->dev_list
, &net
->dev_base_head
);
233 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
234 hlist_add_head_rcu(&dev
->index_hlist
,
235 dev_index_hash(net
, dev
->ifindex
));
236 write_unlock_bh(&dev_base_lock
);
240 /* Device list removal
241 * caller must respect a RCU grace period before freeing/reusing dev
243 static void unlist_netdevice(struct net_device
*dev
)
247 /* Unlink dev from the device chain */
248 write_lock_bh(&dev_base_lock
);
249 list_del_rcu(&dev
->dev_list
);
250 hlist_del_rcu(&dev
->name_hlist
);
251 hlist_del_rcu(&dev
->index_hlist
);
252 write_unlock_bh(&dev_base_lock
);
259 static RAW_NOTIFIER_HEAD(netdev_chain
);
262 * Device drivers call our routines to queue packets here. We empty the
263 * queue in the local softnet handler.
266 DEFINE_PER_CPU(struct softnet_data
, softnet_data
);
267 EXPORT_PER_CPU_SYMBOL(softnet_data
);
269 #ifdef CONFIG_LOCKDEP
271 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
272 * according to dev->type
274 static const unsigned short netdev_lock_type
[] =
275 {ARPHRD_NETROM
, ARPHRD_ETHER
, ARPHRD_EETHER
, ARPHRD_AX25
,
276 ARPHRD_PRONET
, ARPHRD_CHAOS
, ARPHRD_IEEE802
, ARPHRD_ARCNET
,
277 ARPHRD_APPLETLK
, ARPHRD_DLCI
, ARPHRD_ATM
, ARPHRD_METRICOM
,
278 ARPHRD_IEEE1394
, ARPHRD_EUI64
, ARPHRD_INFINIBAND
, ARPHRD_SLIP
,
279 ARPHRD_CSLIP
, ARPHRD_SLIP6
, ARPHRD_CSLIP6
, ARPHRD_RSRVD
,
280 ARPHRD_ADAPT
, ARPHRD_ROSE
, ARPHRD_X25
, ARPHRD_HWX25
,
281 ARPHRD_PPP
, ARPHRD_CISCO
, ARPHRD_LAPB
, ARPHRD_DDCMP
,
282 ARPHRD_RAWHDLC
, ARPHRD_TUNNEL
, ARPHRD_TUNNEL6
, ARPHRD_FRAD
,
283 ARPHRD_SKIP
, ARPHRD_LOOPBACK
, ARPHRD_LOCALTLK
, ARPHRD_FDDI
,
284 ARPHRD_BIF
, ARPHRD_SIT
, ARPHRD_IPDDP
, ARPHRD_IPGRE
,
285 ARPHRD_PIMREG
, ARPHRD_HIPPI
, ARPHRD_ASH
, ARPHRD_ECONET
,
286 ARPHRD_IRDA
, ARPHRD_FCPP
, ARPHRD_FCAL
, ARPHRD_FCPL
,
287 ARPHRD_FCFABRIC
, ARPHRD_IEEE802_TR
, ARPHRD_IEEE80211
,
288 ARPHRD_IEEE80211_PRISM
, ARPHRD_IEEE80211_RADIOTAP
, ARPHRD_PHONET
,
289 ARPHRD_PHONET_PIPE
, ARPHRD_IEEE802154
,
290 ARPHRD_VOID
, ARPHRD_NONE
};
292 static const char *const netdev_lock_name
[] =
293 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
294 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
295 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
296 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
297 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
298 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
299 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
300 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
301 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
302 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
303 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
304 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
305 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
306 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
307 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
308 "_xmit_VOID", "_xmit_NONE"};
310 static struct lock_class_key netdev_xmit_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
311 static struct lock_class_key netdev_addr_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
313 static inline unsigned short netdev_lock_pos(unsigned short dev_type
)
317 for (i
= 0; i
< ARRAY_SIZE(netdev_lock_type
); i
++)
318 if (netdev_lock_type
[i
] == dev_type
)
320 /* the last key is used by default */
321 return ARRAY_SIZE(netdev_lock_type
) - 1;
324 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
325 unsigned short dev_type
)
329 i
= netdev_lock_pos(dev_type
);
330 lockdep_set_class_and_name(lock
, &netdev_xmit_lock_key
[i
],
331 netdev_lock_name
[i
]);
334 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
338 i
= netdev_lock_pos(dev
->type
);
339 lockdep_set_class_and_name(&dev
->addr_list_lock
,
340 &netdev_addr_lock_key
[i
],
341 netdev_lock_name
[i
]);
344 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
345 unsigned short dev_type
)
348 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
353 /*******************************************************************************
355 Protocol management and registration routines
357 *******************************************************************************/
360 * Add a protocol ID to the list. Now that the input handler is
361 * smarter we can dispense with all the messy stuff that used to be
364 * BEWARE!!! Protocol handlers, mangling input packets,
365 * MUST BE last in hash buckets and checking protocol handlers
366 * MUST start from promiscuous ptype_all chain in net_bh.
367 * It is true now, do not change it.
368 * Explanation follows: if protocol handler, mangling packet, will
369 * be the first on list, it is not able to sense, that packet
370 * is cloned and should be copied-on-write, so that it will
371 * change it and subsequent readers will get broken packet.
376 * dev_add_pack - add packet handler
377 * @pt: packet type declaration
379 * Add a protocol handler to the networking stack. The passed &packet_type
380 * is linked into kernel lists and may not be freed until it has been
381 * removed from the kernel lists.
383 * This call does not sleep therefore it can not
384 * guarantee all CPU's that are in middle of receiving packets
385 * will see the new packet type (until the next received packet).
388 void dev_add_pack(struct packet_type
*pt
)
392 spin_lock_bh(&ptype_lock
);
393 if (pt
->type
== htons(ETH_P_ALL
))
394 list_add_rcu(&pt
->list
, &ptype_all
);
396 hash
= ntohs(pt
->type
) & PTYPE_HASH_MASK
;
397 list_add_rcu(&pt
->list
, &ptype_base
[hash
]);
399 spin_unlock_bh(&ptype_lock
);
401 EXPORT_SYMBOL(dev_add_pack
);
404 * __dev_remove_pack - remove packet handler
405 * @pt: packet type declaration
407 * Remove a protocol handler that was previously added to the kernel
408 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
409 * from the kernel lists and can be freed or reused once this function
412 * The packet type might still be in use by receivers
413 * and must not be freed until after all the CPU's have gone
414 * through a quiescent state.
416 void __dev_remove_pack(struct packet_type
*pt
)
418 struct list_head
*head
;
419 struct packet_type
*pt1
;
421 spin_lock_bh(&ptype_lock
);
423 if (pt
->type
== htons(ETH_P_ALL
))
426 head
= &ptype_base
[ntohs(pt
->type
) & PTYPE_HASH_MASK
];
428 list_for_each_entry(pt1
, head
, list
) {
430 list_del_rcu(&pt
->list
);
435 printk(KERN_WARNING
"dev_remove_pack: %p not found.\n", pt
);
437 spin_unlock_bh(&ptype_lock
);
439 EXPORT_SYMBOL(__dev_remove_pack
);
442 * dev_remove_pack - remove packet handler
443 * @pt: packet type declaration
445 * Remove a protocol handler that was previously added to the kernel
446 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
447 * from the kernel lists and can be freed or reused once this function
450 * This call sleeps to guarantee that no CPU is looking at the packet
453 void dev_remove_pack(struct packet_type
*pt
)
455 __dev_remove_pack(pt
);
459 EXPORT_SYMBOL(dev_remove_pack
);
461 /******************************************************************************
463 Device Boot-time Settings Routines
465 *******************************************************************************/
467 /* Boot time configuration table */
468 static struct netdev_boot_setup dev_boot_setup
[NETDEV_BOOT_SETUP_MAX
];
471 * netdev_boot_setup_add - add new setup entry
472 * @name: name of the device
473 * @map: configured settings for the device
475 * Adds new setup entry to the dev_boot_setup list. The function
476 * returns 0 on error and 1 on success. This is a generic routine to
479 static int netdev_boot_setup_add(char *name
, struct ifmap
*map
)
481 struct netdev_boot_setup
*s
;
485 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
486 if (s
[i
].name
[0] == '\0' || s
[i
].name
[0] == ' ') {
487 memset(s
[i
].name
, 0, sizeof(s
[i
].name
));
488 strlcpy(s
[i
].name
, name
, IFNAMSIZ
);
489 memcpy(&s
[i
].map
, map
, sizeof(s
[i
].map
));
494 return i
>= NETDEV_BOOT_SETUP_MAX
? 0 : 1;
498 * netdev_boot_setup_check - check boot time settings
499 * @dev: the netdevice
501 * Check boot time settings for the device.
502 * The found settings are set for the device to be used
503 * later in the device probing.
504 * Returns 0 if no settings found, 1 if they are.
506 int netdev_boot_setup_check(struct net_device
*dev
)
508 struct netdev_boot_setup
*s
= dev_boot_setup
;
511 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
512 if (s
[i
].name
[0] != '\0' && s
[i
].name
[0] != ' ' &&
513 !strcmp(dev
->name
, s
[i
].name
)) {
514 dev
->irq
= s
[i
].map
.irq
;
515 dev
->base_addr
= s
[i
].map
.base_addr
;
516 dev
->mem_start
= s
[i
].map
.mem_start
;
517 dev
->mem_end
= s
[i
].map
.mem_end
;
523 EXPORT_SYMBOL(netdev_boot_setup_check
);
527 * netdev_boot_base - get address from boot time settings
528 * @prefix: prefix for network device
529 * @unit: id for network device
531 * Check boot time settings for the base address of device.
532 * The found settings are set for the device to be used
533 * later in the device probing.
534 * Returns 0 if no settings found.
536 unsigned long netdev_boot_base(const char *prefix
, int unit
)
538 const struct netdev_boot_setup
*s
= dev_boot_setup
;
542 sprintf(name
, "%s%d", prefix
, unit
);
545 * If device already registered then return base of 1
546 * to indicate not to probe for this interface
548 if (__dev_get_by_name(&init_net
, name
))
551 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++)
552 if (!strcmp(name
, s
[i
].name
))
553 return s
[i
].map
.base_addr
;
558 * Saves at boot time configured settings for any netdevice.
560 int __init
netdev_boot_setup(char *str
)
565 str
= get_options(str
, ARRAY_SIZE(ints
), ints
);
570 memset(&map
, 0, sizeof(map
));
574 map
.base_addr
= ints
[2];
576 map
.mem_start
= ints
[3];
578 map
.mem_end
= ints
[4];
580 /* Add new entry to the list */
581 return netdev_boot_setup_add(str
, &map
);
584 __setup("netdev=", netdev_boot_setup
);
586 /*******************************************************************************
588 Device Interface Subroutines
590 *******************************************************************************/
593 * __dev_get_by_name - find a device by its name
594 * @net: the applicable net namespace
595 * @name: name to find
597 * Find an interface by name. Must be called under RTNL semaphore
598 * or @dev_base_lock. If the name is found a pointer to the device
599 * is returned. If the name is not found then %NULL is returned. The
600 * reference counters are not incremented so the caller must be
601 * careful with locks.
604 struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
)
606 struct hlist_node
*p
;
607 struct net_device
*dev
;
608 struct hlist_head
*head
= dev_name_hash(net
, name
);
610 hlist_for_each_entry(dev
, p
, head
, name_hlist
)
611 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
616 EXPORT_SYMBOL(__dev_get_by_name
);
619 * dev_get_by_name_rcu - find a device by its name
620 * @net: the applicable net namespace
621 * @name: name to find
623 * Find an interface by name.
624 * If the name is found a pointer to the device is returned.
625 * If the name is not found then %NULL is returned.
626 * The reference counters are not incremented so the caller must be
627 * careful with locks. The caller must hold RCU lock.
630 struct net_device
*dev_get_by_name_rcu(struct net
*net
, const char *name
)
632 struct hlist_node
*p
;
633 struct net_device
*dev
;
634 struct hlist_head
*head
= dev_name_hash(net
, name
);
636 hlist_for_each_entry_rcu(dev
, p
, head
, name_hlist
)
637 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
642 EXPORT_SYMBOL(dev_get_by_name_rcu
);
645 * dev_get_by_name - find a device by its name
646 * @net: the applicable net namespace
647 * @name: name to find
649 * Find an interface by name. This can be called from any
650 * context and does its own locking. The returned handle has
651 * the usage count incremented and the caller must use dev_put() to
652 * release it when it is no longer needed. %NULL is returned if no
653 * matching device is found.
656 struct net_device
*dev_get_by_name(struct net
*net
, const char *name
)
658 struct net_device
*dev
;
661 dev
= dev_get_by_name_rcu(net
, name
);
667 EXPORT_SYMBOL(dev_get_by_name
);
670 * __dev_get_by_index - find a device by its ifindex
671 * @net: the applicable net namespace
672 * @ifindex: index of device
674 * Search for an interface by index. Returns %NULL if the device
675 * is not found or a pointer to the device. The device has not
676 * had its reference counter increased so the caller must be careful
677 * about locking. The caller must hold either the RTNL semaphore
681 struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
)
683 struct hlist_node
*p
;
684 struct net_device
*dev
;
685 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
687 hlist_for_each_entry(dev
, p
, head
, index_hlist
)
688 if (dev
->ifindex
== ifindex
)
693 EXPORT_SYMBOL(__dev_get_by_index
);
696 * dev_get_by_index_rcu - find a device by its ifindex
697 * @net: the applicable net namespace
698 * @ifindex: index of device
700 * Search for an interface by index. Returns %NULL if the device
701 * is not found or a pointer to the device. The device has not
702 * had its reference counter increased so the caller must be careful
703 * about locking. The caller must hold RCU lock.
706 struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
)
708 struct hlist_node
*p
;
709 struct net_device
*dev
;
710 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
712 hlist_for_each_entry_rcu(dev
, p
, head
, index_hlist
)
713 if (dev
->ifindex
== ifindex
)
718 EXPORT_SYMBOL(dev_get_by_index_rcu
);
722 * dev_get_by_index - find a device by its ifindex
723 * @net: the applicable net namespace
724 * @ifindex: index of device
726 * Search for an interface by index. Returns NULL if the device
727 * is not found or a pointer to the device. The device returned has
728 * had a reference added and the pointer is safe until the user calls
729 * dev_put to indicate they have finished with it.
732 struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
)
734 struct net_device
*dev
;
737 dev
= dev_get_by_index_rcu(net
, ifindex
);
743 EXPORT_SYMBOL(dev_get_by_index
);
746 * dev_getbyhwaddr - find a device by its hardware address
747 * @net: the applicable net namespace
748 * @type: media type of device
749 * @ha: hardware address
751 * Search for an interface by MAC address. Returns NULL if the device
752 * is not found or a pointer to the device. The caller must hold the
753 * rtnl semaphore. The returned device has not had its ref count increased
754 * and the caller must therefore be careful about locking
757 * If the API was consistent this would be __dev_get_by_hwaddr
760 struct net_device
*dev_getbyhwaddr(struct net
*net
, unsigned short type
, char *ha
)
762 struct net_device
*dev
;
766 for_each_netdev(net
, dev
)
767 if (dev
->type
== type
&&
768 !memcmp(dev
->dev_addr
, ha
, dev
->addr_len
))
773 EXPORT_SYMBOL(dev_getbyhwaddr
);
775 struct net_device
*__dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
777 struct net_device
*dev
;
780 for_each_netdev(net
, dev
)
781 if (dev
->type
== type
)
786 EXPORT_SYMBOL(__dev_getfirstbyhwtype
);
788 struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
790 struct net_device
*dev
, *ret
= NULL
;
793 for_each_netdev_rcu(net
, dev
)
794 if (dev
->type
== type
) {
802 EXPORT_SYMBOL(dev_getfirstbyhwtype
);
805 * dev_get_by_flags - find any device with given flags
806 * @net: the applicable net namespace
807 * @if_flags: IFF_* values
808 * @mask: bitmask of bits in if_flags to check
810 * Search for any interface with the given flags. Returns NULL if a device
811 * is not found or a pointer to the device. The device returned has
812 * had a reference added and the pointer is safe until the user calls
813 * dev_put to indicate they have finished with it.
816 struct net_device
*dev_get_by_flags(struct net
*net
, unsigned short if_flags
,
819 struct net_device
*dev
, *ret
;
823 for_each_netdev_rcu(net
, dev
) {
824 if (((dev
->flags
^ if_flags
) & mask
) == 0) {
833 EXPORT_SYMBOL(dev_get_by_flags
);
836 * dev_valid_name - check if name is okay for network device
839 * Network device names need to be valid file names to
840 * to allow sysfs to work. We also disallow any kind of
843 int dev_valid_name(const char *name
)
847 if (strlen(name
) >= IFNAMSIZ
)
849 if (!strcmp(name
, ".") || !strcmp(name
, ".."))
853 if (*name
== '/' || isspace(*name
))
859 EXPORT_SYMBOL(dev_valid_name
);
862 * __dev_alloc_name - allocate a name for a device
863 * @net: network namespace to allocate the device name in
864 * @name: name format string
865 * @buf: scratch buffer and result name string
867 * Passed a format string - eg "lt%d" it will try and find a suitable
868 * id. It scans list of devices to build up a free map, then chooses
869 * the first empty slot. The caller must hold the dev_base or rtnl lock
870 * while allocating the name and adding the device in order to avoid
872 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
873 * Returns the number of the unit assigned or a negative errno code.
876 static int __dev_alloc_name(struct net
*net
, const char *name
, char *buf
)
880 const int max_netdevices
= 8*PAGE_SIZE
;
881 unsigned long *inuse
;
882 struct net_device
*d
;
884 p
= strnchr(name
, IFNAMSIZ
-1, '%');
887 * Verify the string as this thing may have come from
888 * the user. There must be either one "%d" and no other "%"
891 if (p
[1] != 'd' || strchr(p
+ 2, '%'))
894 /* Use one page as a bit array of possible slots */
895 inuse
= (unsigned long *) get_zeroed_page(GFP_ATOMIC
);
899 for_each_netdev(net
, d
) {
900 if (!sscanf(d
->name
, name
, &i
))
902 if (i
< 0 || i
>= max_netdevices
)
905 /* avoid cases where sscanf is not exact inverse of printf */
906 snprintf(buf
, IFNAMSIZ
, name
, i
);
907 if (!strncmp(buf
, d
->name
, IFNAMSIZ
))
911 i
= find_first_zero_bit(inuse
, max_netdevices
);
912 free_page((unsigned long) inuse
);
916 snprintf(buf
, IFNAMSIZ
, name
, i
);
917 if (!__dev_get_by_name(net
, buf
))
920 /* It is possible to run out of possible slots
921 * when the name is long and there isn't enough space left
922 * for the digits, or if all bits are used.
928 * dev_alloc_name - allocate a name for a device
930 * @name: name format string
932 * Passed a format string - eg "lt%d" it will try and find a suitable
933 * id. It scans list of devices to build up a free map, then chooses
934 * the first empty slot. The caller must hold the dev_base or rtnl lock
935 * while allocating the name and adding the device in order to avoid
937 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
938 * Returns the number of the unit assigned or a negative errno code.
941 int dev_alloc_name(struct net_device
*dev
, const char *name
)
947 BUG_ON(!dev_net(dev
));
949 ret
= __dev_alloc_name(net
, name
, buf
);
951 strlcpy(dev
->name
, buf
, IFNAMSIZ
);
954 EXPORT_SYMBOL(dev_alloc_name
);
956 static int dev_get_valid_name(struct net
*net
, const char *name
, char *buf
,
959 if (!dev_valid_name(name
))
962 if (fmt
&& strchr(name
, '%'))
963 return __dev_alloc_name(net
, name
, buf
);
964 else if (__dev_get_by_name(net
, name
))
966 else if (buf
!= name
)
967 strlcpy(buf
, name
, IFNAMSIZ
);
973 * dev_change_name - change name of a device
975 * @newname: name (or format string) must be at least IFNAMSIZ
977 * Change name of a device, can pass format strings "eth%d".
980 int dev_change_name(struct net_device
*dev
, const char *newname
)
982 char oldname
[IFNAMSIZ
];
988 BUG_ON(!dev_net(dev
));
991 if (dev
->flags
& IFF_UP
)
994 if (strncmp(newname
, dev
->name
, IFNAMSIZ
) == 0)
997 memcpy(oldname
, dev
->name
, IFNAMSIZ
);
999 err
= dev_get_valid_name(net
, newname
, dev
->name
, 1);
1004 /* For now only devices in the initial network namespace
1007 if (net_eq(net
, &init_net
)) {
1008 ret
= device_rename(&dev
->dev
, dev
->name
);
1010 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1015 write_lock_bh(&dev_base_lock
);
1016 hlist_del(&dev
->name_hlist
);
1017 write_unlock_bh(&dev_base_lock
);
1021 write_lock_bh(&dev_base_lock
);
1022 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
1023 write_unlock_bh(&dev_base_lock
);
1025 ret
= call_netdevice_notifiers(NETDEV_CHANGENAME
, dev
);
1026 ret
= notifier_to_errno(ret
);
1029 /* err >= 0 after dev_alloc_name() or stores the first errno */
1032 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1036 "%s: name change rollback failed: %d.\n",
1045 * dev_set_alias - change ifalias of a device
1047 * @alias: name up to IFALIASZ
1048 * @len: limit of bytes to copy from info
1050 * Set ifalias for a device,
1052 int dev_set_alias(struct net_device
*dev
, const char *alias
, size_t len
)
1056 if (len
>= IFALIASZ
)
1061 kfree(dev
->ifalias
);
1062 dev
->ifalias
= NULL
;
1067 dev
->ifalias
= krealloc(dev
->ifalias
, len
+ 1, GFP_KERNEL
);
1071 strlcpy(dev
->ifalias
, alias
, len
+1);
1077 * netdev_features_change - device changes features
1078 * @dev: device to cause notification
1080 * Called to indicate a device has changed features.
1082 void netdev_features_change(struct net_device
*dev
)
1084 call_netdevice_notifiers(NETDEV_FEAT_CHANGE
, dev
);
1086 EXPORT_SYMBOL(netdev_features_change
);
1089 * netdev_state_change - device changes state
1090 * @dev: device to cause notification
1092 * Called to indicate a device has changed state. This function calls
1093 * the notifier chains for netdev_chain and sends a NEWLINK message
1094 * to the routing socket.
1096 void netdev_state_change(struct net_device
*dev
)
1098 if (dev
->flags
& IFF_UP
) {
1099 call_netdevice_notifiers(NETDEV_CHANGE
, dev
);
1100 rtmsg_ifinfo(RTM_NEWLINK
, dev
, 0);
1103 EXPORT_SYMBOL(netdev_state_change
);
1105 int netdev_bonding_change(struct net_device
*dev
, unsigned long event
)
1107 return call_netdevice_notifiers(event
, dev
);
1109 EXPORT_SYMBOL(netdev_bonding_change
);
1112 * dev_load - load a network module
1113 * @net: the applicable net namespace
1114 * @name: name of interface
1116 * If a network interface is not present and the process has suitable
1117 * privileges this function loads the module. If module loading is not
1118 * available in this kernel then it becomes a nop.
1121 void dev_load(struct net
*net
, const char *name
)
1123 struct net_device
*dev
;
1126 dev
= dev_get_by_name_rcu(net
, name
);
1129 if (!dev
&& capable(CAP_NET_ADMIN
))
1130 request_module("%s", name
);
1132 EXPORT_SYMBOL(dev_load
);
1134 static int __dev_open(struct net_device
*dev
)
1136 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1142 * Is it even present?
1144 if (!netif_device_present(dev
))
1147 ret
= call_netdevice_notifiers(NETDEV_PRE_UP
, dev
);
1148 ret
= notifier_to_errno(ret
);
1153 * Call device private open method
1155 set_bit(__LINK_STATE_START
, &dev
->state
);
1157 if (ops
->ndo_validate_addr
)
1158 ret
= ops
->ndo_validate_addr(dev
);
1160 if (!ret
&& ops
->ndo_open
)
1161 ret
= ops
->ndo_open(dev
);
1164 * If it went open OK then:
1168 clear_bit(__LINK_STATE_START
, &dev
->state
);
1173 dev
->flags
|= IFF_UP
;
1178 net_dmaengine_get();
1181 * Initialize multicasting status
1183 dev_set_rx_mode(dev
);
1186 * Wakeup transmit queue engine
1195 * dev_open - prepare an interface for use.
1196 * @dev: device to open
1198 * Takes a device from down to up state. The device's private open
1199 * function is invoked and then the multicast lists are loaded. Finally
1200 * the device is moved into the up state and a %NETDEV_UP message is
1201 * sent to the netdev notifier chain.
1203 * Calling this function on an active interface is a nop. On a failure
1204 * a negative errno code is returned.
1206 int dev_open(struct net_device
*dev
)
1213 if (dev
->flags
& IFF_UP
)
1219 ret
= __dev_open(dev
);
1224 * ... and announce new interface.
1226 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
);
1227 call_netdevice_notifiers(NETDEV_UP
, dev
);
1231 EXPORT_SYMBOL(dev_open
);
1233 static int __dev_close(struct net_device
*dev
)
1235 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1241 * Tell people we are going down, so that they can
1242 * prepare to death, when device is still operating.
1244 call_netdevice_notifiers(NETDEV_GOING_DOWN
, dev
);
1246 clear_bit(__LINK_STATE_START
, &dev
->state
);
1248 /* Synchronize to scheduled poll. We cannot touch poll list,
1249 * it can be even on different cpu. So just clear netif_running().
1251 * dev->stop() will invoke napi_disable() on all of it's
1252 * napi_struct instances on this device.
1254 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1256 dev_deactivate(dev
);
1259 * Call the device specific close. This cannot fail.
1260 * Only if device is UP
1262 * We allow it to be called even after a DETACH hot-plug
1269 * Device is now down.
1272 dev
->flags
&= ~IFF_UP
;
1277 net_dmaengine_put();
1283 * dev_close - shutdown an interface.
1284 * @dev: device to shutdown
1286 * This function moves an active device into down state. A
1287 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1288 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1291 int dev_close(struct net_device
*dev
)
1293 if (!(dev
->flags
& IFF_UP
))
1299 * Tell people we are down
1301 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
);
1302 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
1306 EXPORT_SYMBOL(dev_close
);
1310 * dev_disable_lro - disable Large Receive Offload on a device
1313 * Disable Large Receive Offload (LRO) on a net device. Must be
1314 * called under RTNL. This is needed if received packets may be
1315 * forwarded to another interface.
1317 void dev_disable_lro(struct net_device
*dev
)
1319 if (dev
->ethtool_ops
&& dev
->ethtool_ops
->get_flags
&&
1320 dev
->ethtool_ops
->set_flags
) {
1321 u32 flags
= dev
->ethtool_ops
->get_flags(dev
);
1322 if (flags
& ETH_FLAG_LRO
) {
1323 flags
&= ~ETH_FLAG_LRO
;
1324 dev
->ethtool_ops
->set_flags(dev
, flags
);
1327 WARN_ON(dev
->features
& NETIF_F_LRO
);
1329 EXPORT_SYMBOL(dev_disable_lro
);
1332 static int dev_boot_phase
= 1;
1335 * Device change register/unregister. These are not inline or static
1336 * as we export them to the world.
1340 * register_netdevice_notifier - register a network notifier block
1343 * Register a notifier to be called when network device events occur.
1344 * The notifier passed is linked into the kernel structures and must
1345 * not be reused until it has been unregistered. A negative errno code
1346 * is returned on a failure.
1348 * When registered all registration and up events are replayed
1349 * to the new notifier to allow device to have a race free
1350 * view of the network device list.
1353 int register_netdevice_notifier(struct notifier_block
*nb
)
1355 struct net_device
*dev
;
1356 struct net_device
*last
;
1361 err
= raw_notifier_chain_register(&netdev_chain
, nb
);
1367 for_each_netdev(net
, dev
) {
1368 err
= nb
->notifier_call(nb
, NETDEV_REGISTER
, dev
);
1369 err
= notifier_to_errno(err
);
1373 if (!(dev
->flags
& IFF_UP
))
1376 nb
->notifier_call(nb
, NETDEV_UP
, dev
);
1387 for_each_netdev(net
, dev
) {
1391 if (dev
->flags
& IFF_UP
) {
1392 nb
->notifier_call(nb
, NETDEV_GOING_DOWN
, dev
);
1393 nb
->notifier_call(nb
, NETDEV_DOWN
, dev
);
1395 nb
->notifier_call(nb
, NETDEV_UNREGISTER
, dev
);
1396 nb
->notifier_call(nb
, NETDEV_UNREGISTER_BATCH
, dev
);
1400 raw_notifier_chain_unregister(&netdev_chain
, nb
);
1403 EXPORT_SYMBOL(register_netdevice_notifier
);
1406 * unregister_netdevice_notifier - unregister a network notifier block
1409 * Unregister a notifier previously registered by
1410 * register_netdevice_notifier(). The notifier is unlinked into the
1411 * kernel structures and may then be reused. A negative errno code
1412 * is returned on a failure.
1415 int unregister_netdevice_notifier(struct notifier_block
*nb
)
1420 err
= raw_notifier_chain_unregister(&netdev_chain
, nb
);
1424 EXPORT_SYMBOL(unregister_netdevice_notifier
);
1427 * call_netdevice_notifiers - call all network notifier blocks
1428 * @val: value passed unmodified to notifier function
1429 * @dev: net_device pointer passed unmodified to notifier function
1431 * Call all network notifier blocks. Parameters and return value
1432 * are as for raw_notifier_call_chain().
1435 int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
)
1437 return raw_notifier_call_chain(&netdev_chain
, val
, dev
);
1440 /* When > 0 there are consumers of rx skb time stamps */
1441 static atomic_t netstamp_needed
= ATOMIC_INIT(0);
1443 void net_enable_timestamp(void)
1445 atomic_inc(&netstamp_needed
);
1447 EXPORT_SYMBOL(net_enable_timestamp
);
1449 void net_disable_timestamp(void)
1451 atomic_dec(&netstamp_needed
);
1453 EXPORT_SYMBOL(net_disable_timestamp
);
1455 static inline void net_timestamp(struct sk_buff
*skb
)
1457 if (atomic_read(&netstamp_needed
))
1458 __net_timestamp(skb
);
1460 skb
->tstamp
.tv64
= 0;
1464 * dev_forward_skb - loopback an skb to another netif
1466 * @dev: destination network device
1467 * @skb: buffer to forward
1470 * NET_RX_SUCCESS (no congestion)
1471 * NET_RX_DROP (packet was dropped)
1473 * dev_forward_skb can be used for injecting an skb from the
1474 * start_xmit function of one device into the receive queue
1475 * of another device.
1477 * The receiving device may be in another namespace, so
1478 * we have to clear all information in the skb that could
1479 * impact namespace isolation.
1481 int dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1485 if (!(dev
->flags
& IFF_UP
))
1488 if (skb
->len
> (dev
->mtu
+ dev
->hard_header_len
))
1491 skb_set_dev(skb
, dev
);
1492 skb
->tstamp
.tv64
= 0;
1493 skb
->pkt_type
= PACKET_HOST
;
1494 skb
->protocol
= eth_type_trans(skb
, dev
);
1495 return netif_rx(skb
);
1497 EXPORT_SYMBOL_GPL(dev_forward_skb
);
1500 * Support routine. Sends outgoing frames to any network
1501 * taps currently in use.
1504 static void dev_queue_xmit_nit(struct sk_buff
*skb
, struct net_device
*dev
)
1506 struct packet_type
*ptype
;
1508 #ifdef CONFIG_NET_CLS_ACT
1509 if (!(skb
->tstamp
.tv64
&& (G_TC_FROM(skb
->tc_verd
) & AT_INGRESS
)))
1516 list_for_each_entry_rcu(ptype
, &ptype_all
, list
) {
1517 /* Never send packets back to the socket
1518 * they originated from - MvS (miquels@drinkel.ow.org)
1520 if ((ptype
->dev
== dev
|| !ptype
->dev
) &&
1521 (ptype
->af_packet_priv
== NULL
||
1522 (struct sock
*)ptype
->af_packet_priv
!= skb
->sk
)) {
1523 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
1527 /* skb->nh should be correctly
1528 set by sender, so that the second statement is
1529 just protection against buggy protocols.
1531 skb_reset_mac_header(skb2
);
1533 if (skb_network_header(skb2
) < skb2
->data
||
1534 skb2
->network_header
> skb2
->tail
) {
1535 if (net_ratelimit())
1536 printk(KERN_CRIT
"protocol %04x is "
1538 skb2
->protocol
, dev
->name
);
1539 skb_reset_network_header(skb2
);
1542 skb2
->transport_header
= skb2
->network_header
;
1543 skb2
->pkt_type
= PACKET_OUTGOING
;
1544 ptype
->func(skb2
, skb
->dev
, ptype
, skb
->dev
);
1551 static inline void __netif_reschedule(struct Qdisc
*q
)
1553 struct softnet_data
*sd
;
1554 unsigned long flags
;
1556 local_irq_save(flags
);
1557 sd
= &__get_cpu_var(softnet_data
);
1558 q
->next_sched
= sd
->output_queue
;
1559 sd
->output_queue
= q
;
1560 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
1561 local_irq_restore(flags
);
1564 void __netif_schedule(struct Qdisc
*q
)
1566 if (!test_and_set_bit(__QDISC_STATE_SCHED
, &q
->state
))
1567 __netif_reschedule(q
);
1569 EXPORT_SYMBOL(__netif_schedule
);
1571 void dev_kfree_skb_irq(struct sk_buff
*skb
)
1573 if (atomic_dec_and_test(&skb
->users
)) {
1574 struct softnet_data
*sd
;
1575 unsigned long flags
;
1577 local_irq_save(flags
);
1578 sd
= &__get_cpu_var(softnet_data
);
1579 skb
->next
= sd
->completion_queue
;
1580 sd
->completion_queue
= skb
;
1581 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
1582 local_irq_restore(flags
);
1585 EXPORT_SYMBOL(dev_kfree_skb_irq
);
1587 void dev_kfree_skb_any(struct sk_buff
*skb
)
1589 if (in_irq() || irqs_disabled())
1590 dev_kfree_skb_irq(skb
);
1594 EXPORT_SYMBOL(dev_kfree_skb_any
);
1598 * netif_device_detach - mark device as removed
1599 * @dev: network device
1601 * Mark device as removed from system and therefore no longer available.
1603 void netif_device_detach(struct net_device
*dev
)
1605 if (test_and_clear_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
1606 netif_running(dev
)) {
1607 netif_tx_stop_all_queues(dev
);
1610 EXPORT_SYMBOL(netif_device_detach
);
1613 * netif_device_attach - mark device as attached
1614 * @dev: network device
1616 * Mark device as attached from system and restart if needed.
1618 void netif_device_attach(struct net_device
*dev
)
1620 if (!test_and_set_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
1621 netif_running(dev
)) {
1622 netif_tx_wake_all_queues(dev
);
1623 __netdev_watchdog_up(dev
);
1626 EXPORT_SYMBOL(netif_device_attach
);
1628 static bool can_checksum_protocol(unsigned long features
, __be16 protocol
)
1630 return ((features
& NETIF_F_GEN_CSUM
) ||
1631 ((features
& NETIF_F_IP_CSUM
) &&
1632 protocol
== htons(ETH_P_IP
)) ||
1633 ((features
& NETIF_F_IPV6_CSUM
) &&
1634 protocol
== htons(ETH_P_IPV6
)) ||
1635 ((features
& NETIF_F_FCOE_CRC
) &&
1636 protocol
== htons(ETH_P_FCOE
)));
1639 static bool dev_can_checksum(struct net_device
*dev
, struct sk_buff
*skb
)
1641 if (can_checksum_protocol(dev
->features
, skb
->protocol
))
1644 if (skb
->protocol
== htons(ETH_P_8021Q
)) {
1645 struct vlan_ethhdr
*veh
= (struct vlan_ethhdr
*)skb
->data
;
1646 if (can_checksum_protocol(dev
->features
& dev
->vlan_features
,
1647 veh
->h_vlan_encapsulated_proto
))
1655 * skb_dev_set -- assign a new device to a buffer
1656 * @skb: buffer for the new device
1657 * @dev: network device
1659 * If an skb is owned by a device already, we have to reset
1660 * all data private to the namespace a device belongs to
1661 * before assigning it a new device.
1663 #ifdef CONFIG_NET_NS
1664 void skb_set_dev(struct sk_buff
*skb
, struct net_device
*dev
)
1667 if (skb
->dev
&& !net_eq(dev_net(skb
->dev
), dev_net(dev
))) {
1670 skb_init_secmark(skb
);
1674 skb
->ipvs_property
= 0;
1675 #ifdef CONFIG_NET_SCHED
1681 EXPORT_SYMBOL(skb_set_dev
);
1682 #endif /* CONFIG_NET_NS */
1685 * Invalidate hardware checksum when packet is to be mangled, and
1686 * complete checksum manually on outgoing path.
1688 int skb_checksum_help(struct sk_buff
*skb
)
1691 int ret
= 0, offset
;
1693 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
1694 goto out_set_summed
;
1696 if (unlikely(skb_shinfo(skb
)->gso_size
)) {
1697 /* Let GSO fix up the checksum. */
1698 goto out_set_summed
;
1701 offset
= skb
->csum_start
- skb_headroom(skb
);
1702 BUG_ON(offset
>= skb_headlen(skb
));
1703 csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
1705 offset
+= skb
->csum_offset
;
1706 BUG_ON(offset
+ sizeof(__sum16
) > skb_headlen(skb
));
1708 if (skb_cloned(skb
) &&
1709 !skb_clone_writable(skb
, offset
+ sizeof(__sum16
))) {
1710 ret
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
1715 *(__sum16
*)(skb
->data
+ offset
) = csum_fold(csum
);
1717 skb
->ip_summed
= CHECKSUM_NONE
;
1721 EXPORT_SYMBOL(skb_checksum_help
);
1724 * skb_gso_segment - Perform segmentation on skb.
1725 * @skb: buffer to segment
1726 * @features: features for the output path (see dev->features)
1728 * This function segments the given skb and returns a list of segments.
1730 * It may return NULL if the skb requires no segmentation. This is
1731 * only possible when GSO is used for verifying header integrity.
1733 struct sk_buff
*skb_gso_segment(struct sk_buff
*skb
, int features
)
1735 struct sk_buff
*segs
= ERR_PTR(-EPROTONOSUPPORT
);
1736 struct packet_type
*ptype
;
1737 __be16 type
= skb
->protocol
;
1740 skb_reset_mac_header(skb
);
1741 skb
->mac_len
= skb
->network_header
- skb
->mac_header
;
1742 __skb_pull(skb
, skb
->mac_len
);
1744 if (unlikely(skb
->ip_summed
!= CHECKSUM_PARTIAL
)) {
1745 struct net_device
*dev
= skb
->dev
;
1746 struct ethtool_drvinfo info
= {};
1748 if (dev
&& dev
->ethtool_ops
&& dev
->ethtool_ops
->get_drvinfo
)
1749 dev
->ethtool_ops
->get_drvinfo(dev
, &info
);
1751 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1753 info
.driver
, dev
? dev
->features
: 0L,
1754 skb
->sk
? skb
->sk
->sk_route_caps
: 0L,
1755 skb
->len
, skb
->data_len
, skb
->ip_summed
);
1757 if (skb_header_cloned(skb
) &&
1758 (err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)))
1759 return ERR_PTR(err
);
1763 list_for_each_entry_rcu(ptype
,
1764 &ptype_base
[ntohs(type
) & PTYPE_HASH_MASK
], list
) {
1765 if (ptype
->type
== type
&& !ptype
->dev
&& ptype
->gso_segment
) {
1766 if (unlikely(skb
->ip_summed
!= CHECKSUM_PARTIAL
)) {
1767 err
= ptype
->gso_send_check(skb
);
1768 segs
= ERR_PTR(err
);
1769 if (err
|| skb_gso_ok(skb
, features
))
1771 __skb_push(skb
, (skb
->data
-
1772 skb_network_header(skb
)));
1774 segs
= ptype
->gso_segment(skb
, features
);
1780 __skb_push(skb
, skb
->data
- skb_mac_header(skb
));
1784 EXPORT_SYMBOL(skb_gso_segment
);
1786 /* Take action when hardware reception checksum errors are detected. */
1788 void netdev_rx_csum_fault(struct net_device
*dev
)
1790 if (net_ratelimit()) {
1791 printk(KERN_ERR
"%s: hw csum failure.\n",
1792 dev
? dev
->name
: "<unknown>");
1796 EXPORT_SYMBOL(netdev_rx_csum_fault
);
1799 /* Actually, we should eliminate this check as soon as we know, that:
1800 * 1. IOMMU is present and allows to map all the memory.
1801 * 2. No high memory really exists on this machine.
1804 static int illegal_highdma(struct net_device
*dev
, struct sk_buff
*skb
)
1806 #ifdef CONFIG_HIGHMEM
1808 if (!(dev
->features
& NETIF_F_HIGHDMA
)) {
1809 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++)
1810 if (PageHighMem(skb_shinfo(skb
)->frags
[i
].page
))
1814 if (PCI_DMA_BUS_IS_PHYS
) {
1815 struct device
*pdev
= dev
->dev
.parent
;
1819 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1820 dma_addr_t addr
= page_to_phys(skb_shinfo(skb
)->frags
[i
].page
);
1821 if (!pdev
->dma_mask
|| addr
+ PAGE_SIZE
- 1 > *pdev
->dma_mask
)
1830 void (*destructor
)(struct sk_buff
*skb
);
1833 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1835 static void dev_gso_skb_destructor(struct sk_buff
*skb
)
1837 struct dev_gso_cb
*cb
;
1840 struct sk_buff
*nskb
= skb
->next
;
1842 skb
->next
= nskb
->next
;
1845 } while (skb
->next
);
1847 cb
= DEV_GSO_CB(skb
);
1849 cb
->destructor(skb
);
1853 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1854 * @skb: buffer to segment
1856 * This function segments the given skb and stores the list of segments
1859 static int dev_gso_segment(struct sk_buff
*skb
)
1861 struct net_device
*dev
= skb
->dev
;
1862 struct sk_buff
*segs
;
1863 int features
= dev
->features
& ~(illegal_highdma(dev
, skb
) ?
1866 segs
= skb_gso_segment(skb
, features
);
1868 /* Verifying header integrity only. */
1873 return PTR_ERR(segs
);
1876 DEV_GSO_CB(skb
)->destructor
= skb
->destructor
;
1877 skb
->destructor
= dev_gso_skb_destructor
;
1882 int dev_hard_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
1883 struct netdev_queue
*txq
)
1885 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1886 int rc
= NETDEV_TX_OK
;
1888 if (likely(!skb
->next
)) {
1889 if (!list_empty(&ptype_all
))
1890 dev_queue_xmit_nit(skb
, dev
);
1892 if (netif_needs_gso(dev
, skb
)) {
1893 if (unlikely(dev_gso_segment(skb
)))
1900 * If device doesnt need skb->dst, release it right now while
1901 * its hot in this cpu cache
1903 if (dev
->priv_flags
& IFF_XMIT_DST_RELEASE
)
1906 rc
= ops
->ndo_start_xmit(skb
, dev
);
1907 if (rc
== NETDEV_TX_OK
)
1908 txq_trans_update(txq
);
1910 * TODO: if skb_orphan() was called by
1911 * dev->hard_start_xmit() (for example, the unmodified
1912 * igb driver does that; bnx2 doesn't), then
1913 * skb_tx_software_timestamp() will be unable to send
1914 * back the time stamp.
1916 * How can this be prevented? Always create another
1917 * reference to the socket before calling
1918 * dev->hard_start_xmit()? Prevent that skb_orphan()
1919 * does anything in dev->hard_start_xmit() by clearing
1920 * the skb destructor before the call and restoring it
1921 * afterwards, then doing the skb_orphan() ourselves?
1928 struct sk_buff
*nskb
= skb
->next
;
1930 skb
->next
= nskb
->next
;
1934 * If device doesnt need nskb->dst, release it right now while
1935 * its hot in this cpu cache
1937 if (dev
->priv_flags
& IFF_XMIT_DST_RELEASE
)
1940 rc
= ops
->ndo_start_xmit(nskb
, dev
);
1941 if (unlikely(rc
!= NETDEV_TX_OK
)) {
1942 if (rc
& ~NETDEV_TX_MASK
)
1943 goto out_kfree_gso_skb
;
1944 nskb
->next
= skb
->next
;
1948 txq_trans_update(txq
);
1949 if (unlikely(netif_tx_queue_stopped(txq
) && skb
->next
))
1950 return NETDEV_TX_BUSY
;
1951 } while (skb
->next
);
1954 if (likely(skb
->next
== NULL
))
1955 skb
->destructor
= DEV_GSO_CB(skb
)->destructor
;
1961 static u32 hashrnd __read_mostly
;
1963 u16
skb_tx_hash(const struct net_device
*dev
, const struct sk_buff
*skb
)
1967 if (skb_rx_queue_recorded(skb
)) {
1968 hash
= skb_get_rx_queue(skb
);
1969 while (unlikely(hash
>= dev
->real_num_tx_queues
))
1970 hash
-= dev
->real_num_tx_queues
;
1974 if (skb
->sk
&& skb
->sk
->sk_hash
)
1975 hash
= skb
->sk
->sk_hash
;
1977 hash
= skb
->protocol
;
1979 hash
= jhash_1word(hash
, hashrnd
);
1981 return (u16
) (((u64
) hash
* dev
->real_num_tx_queues
) >> 32);
1983 EXPORT_SYMBOL(skb_tx_hash
);
1985 static inline u16
dev_cap_txqueue(struct net_device
*dev
, u16 queue_index
)
1987 if (unlikely(queue_index
>= dev
->real_num_tx_queues
)) {
1988 if (net_ratelimit()) {
1989 netdev_warn(dev
, "selects TX queue %d, but "
1990 "real number of TX queues is %d\n",
1991 queue_index
, dev
->real_num_tx_queues
);
1998 static struct netdev_queue
*dev_pick_tx(struct net_device
*dev
,
1999 struct sk_buff
*skb
)
2002 struct sock
*sk
= skb
->sk
;
2004 if (sk_tx_queue_recorded(sk
)) {
2005 queue_index
= sk_tx_queue_get(sk
);
2007 const struct net_device_ops
*ops
= dev
->netdev_ops
;
2009 if (ops
->ndo_select_queue
) {
2010 queue_index
= ops
->ndo_select_queue(dev
, skb
);
2011 queue_index
= dev_cap_txqueue(dev
, queue_index
);
2014 if (dev
->real_num_tx_queues
> 1)
2015 queue_index
= skb_tx_hash(dev
, skb
);
2017 if (sk
&& sk
->sk_dst_cache
)
2018 sk_tx_queue_set(sk
, queue_index
);
2022 skb_set_queue_mapping(skb
, queue_index
);
2023 return netdev_get_tx_queue(dev
, queue_index
);
2026 static inline int __dev_xmit_skb(struct sk_buff
*skb
, struct Qdisc
*q
,
2027 struct net_device
*dev
,
2028 struct netdev_queue
*txq
)
2030 spinlock_t
*root_lock
= qdisc_lock(q
);
2033 spin_lock(root_lock
);
2034 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
))) {
2037 } else if ((q
->flags
& TCQ_F_CAN_BYPASS
) && !qdisc_qlen(q
) &&
2038 !test_and_set_bit(__QDISC_STATE_RUNNING
, &q
->state
)) {
2040 * This is a work-conserving queue; there are no old skbs
2041 * waiting to be sent out; and the qdisc is not running -
2042 * xmit the skb directly.
2044 __qdisc_update_bstats(q
, skb
->len
);
2045 if (sch_direct_xmit(skb
, q
, dev
, txq
, root_lock
))
2048 clear_bit(__QDISC_STATE_RUNNING
, &q
->state
);
2050 rc
= NET_XMIT_SUCCESS
;
2052 rc
= qdisc_enqueue_root(skb
, q
);
2055 spin_unlock(root_lock
);
2061 * Returns true if either:
2062 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2063 * 2. skb is fragmented and the device does not support SG, or if
2064 * at least one of fragments is in highmem and device does not
2065 * support DMA from it.
2067 static inline int skb_needs_linearize(struct sk_buff
*skb
,
2068 struct net_device
*dev
)
2070 return (skb_has_frags(skb
) && !(dev
->features
& NETIF_F_FRAGLIST
)) ||
2071 (skb_shinfo(skb
)->nr_frags
&& (!(dev
->features
& NETIF_F_SG
) ||
2072 illegal_highdma(dev
, skb
)));
2076 * dev_queue_xmit - transmit a buffer
2077 * @skb: buffer to transmit
2079 * Queue a buffer for transmission to a network device. The caller must
2080 * have set the device and priority and built the buffer before calling
2081 * this function. The function can be called from an interrupt.
2083 * A negative errno code is returned on a failure. A success does not
2084 * guarantee the frame will be transmitted as it may be dropped due
2085 * to congestion or traffic shaping.
2087 * -----------------------------------------------------------------------------------
2088 * I notice this method can also return errors from the queue disciplines,
2089 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2092 * Regardless of the return value, the skb is consumed, so it is currently
2093 * difficult to retry a send to this method. (You can bump the ref count
2094 * before sending to hold a reference for retry if you are careful.)
2096 * When calling this method, interrupts MUST be enabled. This is because
2097 * the BH enable code must have IRQs enabled so that it will not deadlock.
2100 int dev_queue_xmit(struct sk_buff
*skb
)
2102 struct net_device
*dev
= skb
->dev
;
2103 struct netdev_queue
*txq
;
2107 /* GSO will handle the following emulations directly. */
2108 if (netif_needs_gso(dev
, skb
))
2111 /* Convert a paged skb to linear, if required */
2112 if (skb_needs_linearize(skb
, dev
) && __skb_linearize(skb
))
2115 /* If packet is not checksummed and device does not support
2116 * checksumming for this protocol, complete checksumming here.
2118 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2119 skb_set_transport_header(skb
, skb
->csum_start
-
2121 if (!dev_can_checksum(dev
, skb
) && skb_checksum_help(skb
))
2126 /* Disable soft irqs for various locks below. Also
2127 * stops preemption for RCU.
2131 txq
= dev_pick_tx(dev
, skb
);
2132 q
= rcu_dereference_bh(txq
->qdisc
);
2134 #ifdef CONFIG_NET_CLS_ACT
2135 skb
->tc_verd
= SET_TC_AT(skb
->tc_verd
, AT_EGRESS
);
2138 rc
= __dev_xmit_skb(skb
, q
, dev
, txq
);
2142 /* The device has no queue. Common case for software devices:
2143 loopback, all the sorts of tunnels...
2145 Really, it is unlikely that netif_tx_lock protection is necessary
2146 here. (f.e. loopback and IP tunnels are clean ignoring statistics
2148 However, it is possible, that they rely on protection
2151 Check this and shot the lock. It is not prone from deadlocks.
2152 Either shot noqueue qdisc, it is even simpler 8)
2154 if (dev
->flags
& IFF_UP
) {
2155 int cpu
= smp_processor_id(); /* ok because BHs are off */
2157 if (txq
->xmit_lock_owner
!= cpu
) {
2159 HARD_TX_LOCK(dev
, txq
, cpu
);
2161 if (!netif_tx_queue_stopped(txq
)) {
2162 rc
= dev_hard_start_xmit(skb
, dev
, txq
);
2163 if (dev_xmit_complete(rc
)) {
2164 HARD_TX_UNLOCK(dev
, txq
);
2168 HARD_TX_UNLOCK(dev
, txq
);
2169 if (net_ratelimit())
2170 printk(KERN_CRIT
"Virtual device %s asks to "
2171 "queue packet!\n", dev
->name
);
2173 /* Recursion is detected! It is possible,
2175 if (net_ratelimit())
2176 printk(KERN_CRIT
"Dead loop on virtual device "
2177 "%s, fix it urgently!\n", dev
->name
);
2182 rcu_read_unlock_bh();
2188 rcu_read_unlock_bh();
2191 EXPORT_SYMBOL(dev_queue_xmit
);
2194 /*=======================================================================
2196 =======================================================================*/
2198 int netdev_max_backlog __read_mostly
= 1000;
2199 int netdev_budget __read_mostly
= 300;
2200 int weight_p __read_mostly
= 64; /* old backlog weight */
2202 DEFINE_PER_CPU(struct netif_rx_stats
, netdev_rx_stat
) = { 0, };
2206 * get_rps_cpu is called from netif_receive_skb and returns the target
2207 * CPU from the RPS map of the receiving queue for a given skb.
2209 static int get_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
)
2211 struct ipv6hdr
*ip6
;
2213 struct netdev_rx_queue
*rxqueue
;
2214 struct rps_map
*map
;
2217 u32 addr1
, addr2
, ports
, ihl
;
2221 if (skb_rx_queue_recorded(skb
)) {
2222 u16 index
= skb_get_rx_queue(skb
);
2223 if (unlikely(index
>= dev
->num_rx_queues
)) {
2224 if (net_ratelimit()) {
2225 netdev_warn(dev
, "received packet on queue "
2226 "%u, but number of RX queues is %u\n",
2227 index
, dev
->num_rx_queues
);
2231 rxqueue
= dev
->_rx
+ index
;
2235 if (!rxqueue
->rps_map
)
2239 goto got_hash
; /* Skip hash computation on packet header */
2241 switch (skb
->protocol
) {
2242 case __constant_htons(ETH_P_IP
):
2243 if (!pskb_may_pull(skb
, sizeof(*ip
)))
2246 ip
= (struct iphdr
*) skb
->data
;
2247 ip_proto
= ip
->protocol
;
2252 case __constant_htons(ETH_P_IPV6
):
2253 if (!pskb_may_pull(skb
, sizeof(*ip6
)))
2256 ip6
= (struct ipv6hdr
*) skb
->data
;
2257 ip_proto
= ip6
->nexthdr
;
2258 addr1
= ip6
->saddr
.s6_addr32
[3];
2259 addr2
= ip6
->daddr
.s6_addr32
[3];
2273 case IPPROTO_UDPLITE
:
2274 if (pskb_may_pull(skb
, (ihl
* 4) + 4))
2275 ports
= *((u32
*) (skb
->data
+ (ihl
* 4)));
2282 skb
->rxhash
= jhash_3words(addr1
, addr2
, ports
, hashrnd
);
2287 map
= rcu_dereference(rxqueue
->rps_map
);
2289 u16 tcpu
= map
->cpus
[((u64
) skb
->rxhash
* map
->len
) >> 32];
2291 if (cpu_online(tcpu
)) {
2303 * This structure holds the per-CPU mask of CPUs for which IPIs are scheduled
2304 * to be sent to kick remote softirq processing. There are two masks since
2305 * the sending of IPIs must be done with interrupts enabled. The select field
2306 * indicates the current mask that enqueue_backlog uses to schedule IPIs.
2307 * select is flipped before net_rps_action is called while still under lock,
2308 * net_rps_action then uses the non-selected mask to send the IPIs and clears
2309 * it without conflicting with enqueue_backlog operation.
2311 struct rps_remote_softirq_cpus
{
2315 static DEFINE_PER_CPU(struct rps_remote_softirq_cpus
, rps_remote_softirq_cpus
);
2317 /* Called from hardirq (IPI) context */
2318 static void trigger_softirq(void *data
)
2320 struct softnet_data
*queue
= data
;
2321 __napi_schedule(&queue
->backlog
);
2322 __get_cpu_var(netdev_rx_stat
).received_rps
++;
2324 #endif /* CONFIG_SMP */
2327 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2328 * queue (may be a remote CPU queue).
2330 static int enqueue_to_backlog(struct sk_buff
*skb
, int cpu
)
2332 struct softnet_data
*queue
;
2333 unsigned long flags
;
2335 queue
= &per_cpu(softnet_data
, cpu
);
2337 local_irq_save(flags
);
2338 __get_cpu_var(netdev_rx_stat
).total
++;
2341 if (queue
->input_pkt_queue
.qlen
<= netdev_max_backlog
) {
2342 if (queue
->input_pkt_queue
.qlen
) {
2344 __skb_queue_tail(&queue
->input_pkt_queue
, skb
);
2346 local_irq_restore(flags
);
2347 return NET_RX_SUCCESS
;
2350 /* Schedule NAPI for backlog device */
2351 if (napi_schedule_prep(&queue
->backlog
)) {
2353 if (cpu
!= smp_processor_id()) {
2354 struct rps_remote_softirq_cpus
*rcpus
=
2355 &__get_cpu_var(rps_remote_softirq_cpus
);
2357 cpu_set(cpu
, rcpus
->mask
[rcpus
->select
]);
2358 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
2360 __napi_schedule(&queue
->backlog
);
2362 __napi_schedule(&queue
->backlog
);
2370 __get_cpu_var(netdev_rx_stat
).dropped
++;
2371 local_irq_restore(flags
);
2378 * netif_rx - post buffer to the network code
2379 * @skb: buffer to post
2381 * This function receives a packet from a device driver and queues it for
2382 * the upper (protocol) levels to process. It always succeeds. The buffer
2383 * may be dropped during processing for congestion control or by the
2387 * NET_RX_SUCCESS (no congestion)
2388 * NET_RX_DROP (packet was dropped)
2392 int netif_rx(struct sk_buff
*skb
)
2396 /* if netpoll wants it, pretend we never saw it */
2397 if (netpoll_rx(skb
))
2400 if (!skb
->tstamp
.tv64
)
2404 cpu
= get_rps_cpu(skb
->dev
, skb
);
2406 cpu
= smp_processor_id();
2408 cpu
= smp_processor_id();
2411 return enqueue_to_backlog(skb
, cpu
);
2413 EXPORT_SYMBOL(netif_rx
);
2415 int netif_rx_ni(struct sk_buff
*skb
)
2420 err
= netif_rx(skb
);
2421 if (local_softirq_pending())
2427 EXPORT_SYMBOL(netif_rx_ni
);
2429 static void net_tx_action(struct softirq_action
*h
)
2431 struct softnet_data
*sd
= &__get_cpu_var(softnet_data
);
2433 if (sd
->completion_queue
) {
2434 struct sk_buff
*clist
;
2436 local_irq_disable();
2437 clist
= sd
->completion_queue
;
2438 sd
->completion_queue
= NULL
;
2442 struct sk_buff
*skb
= clist
;
2443 clist
= clist
->next
;
2445 WARN_ON(atomic_read(&skb
->users
));
2450 if (sd
->output_queue
) {
2453 local_irq_disable();
2454 head
= sd
->output_queue
;
2455 sd
->output_queue
= NULL
;
2459 struct Qdisc
*q
= head
;
2460 spinlock_t
*root_lock
;
2462 head
= head
->next_sched
;
2464 root_lock
= qdisc_lock(q
);
2465 if (spin_trylock(root_lock
)) {
2466 smp_mb__before_clear_bit();
2467 clear_bit(__QDISC_STATE_SCHED
,
2470 spin_unlock(root_lock
);
2472 if (!test_bit(__QDISC_STATE_DEACTIVATED
,
2474 __netif_reschedule(q
);
2476 smp_mb__before_clear_bit();
2477 clear_bit(__QDISC_STATE_SCHED
,
2485 static inline int deliver_skb(struct sk_buff
*skb
,
2486 struct packet_type
*pt_prev
,
2487 struct net_device
*orig_dev
)
2489 atomic_inc(&skb
->users
);
2490 return pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
2493 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2495 #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2496 /* This hook is defined here for ATM LANE */
2497 int (*br_fdb_test_addr_hook
)(struct net_device
*dev
,
2498 unsigned char *addr
) __read_mostly
;
2499 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook
);
2503 * If bridge module is loaded call bridging hook.
2504 * returns NULL if packet was consumed.
2506 struct sk_buff
*(*br_handle_frame_hook
)(struct net_bridge_port
*p
,
2507 struct sk_buff
*skb
) __read_mostly
;
2508 EXPORT_SYMBOL_GPL(br_handle_frame_hook
);
2510 static inline struct sk_buff
*handle_bridge(struct sk_buff
*skb
,
2511 struct packet_type
**pt_prev
, int *ret
,
2512 struct net_device
*orig_dev
)
2514 struct net_bridge_port
*port
;
2516 if (skb
->pkt_type
== PACKET_LOOPBACK
||
2517 (port
= rcu_dereference(skb
->dev
->br_port
)) == NULL
)
2521 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
2525 return br_handle_frame_hook(port
, skb
);
2528 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2531 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2532 struct sk_buff
*(*macvlan_handle_frame_hook
)(struct sk_buff
*skb
) __read_mostly
;
2533 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook
);
2535 static inline struct sk_buff
*handle_macvlan(struct sk_buff
*skb
,
2536 struct packet_type
**pt_prev
,
2538 struct net_device
*orig_dev
)
2540 if (skb
->dev
->macvlan_port
== NULL
)
2544 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
2547 return macvlan_handle_frame_hook(skb
);
2550 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2553 #ifdef CONFIG_NET_CLS_ACT
2554 /* TODO: Maybe we should just force sch_ingress to be compiled in
2555 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2556 * a compare and 2 stores extra right now if we dont have it on
2557 * but have CONFIG_NET_CLS_ACT
2558 * NOTE: This doesnt stop any functionality; if you dont have
2559 * the ingress scheduler, you just cant add policies on ingress.
2562 static int ing_filter(struct sk_buff
*skb
)
2564 struct net_device
*dev
= skb
->dev
;
2565 u32 ttl
= G_TC_RTTL(skb
->tc_verd
);
2566 struct netdev_queue
*rxq
;
2567 int result
= TC_ACT_OK
;
2570 if (MAX_RED_LOOP
< ttl
++) {
2572 "Redir loop detected Dropping packet (%d->%d)\n",
2573 skb
->skb_iif
, dev
->ifindex
);
2577 skb
->tc_verd
= SET_TC_RTTL(skb
->tc_verd
, ttl
);
2578 skb
->tc_verd
= SET_TC_AT(skb
->tc_verd
, AT_INGRESS
);
2580 rxq
= &dev
->rx_queue
;
2583 if (q
!= &noop_qdisc
) {
2584 spin_lock(qdisc_lock(q
));
2585 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
)))
2586 result
= qdisc_enqueue_root(skb
, q
);
2587 spin_unlock(qdisc_lock(q
));
2593 static inline struct sk_buff
*handle_ing(struct sk_buff
*skb
,
2594 struct packet_type
**pt_prev
,
2595 int *ret
, struct net_device
*orig_dev
)
2597 if (skb
->dev
->rx_queue
.qdisc
== &noop_qdisc
)
2601 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
2604 /* Huh? Why does turning on AF_PACKET affect this? */
2605 skb
->tc_verd
= SET_TC_OK2MUNGE(skb
->tc_verd
);
2608 switch (ing_filter(skb
)) {
2622 * netif_nit_deliver - deliver received packets to network taps
2625 * This function is used to deliver incoming packets to network
2626 * taps. It should be used when the normal netif_receive_skb path
2627 * is bypassed, for example because of VLAN acceleration.
2629 void netif_nit_deliver(struct sk_buff
*skb
)
2631 struct packet_type
*ptype
;
2633 if (list_empty(&ptype_all
))
2636 skb_reset_network_header(skb
);
2637 skb_reset_transport_header(skb
);
2638 skb
->mac_len
= skb
->network_header
- skb
->mac_header
;
2641 list_for_each_entry_rcu(ptype
, &ptype_all
, list
) {
2642 if (!ptype
->dev
|| ptype
->dev
== skb
->dev
)
2643 deliver_skb(skb
, ptype
, skb
->dev
);
2648 static int __netif_receive_skb(struct sk_buff
*skb
)
2650 struct packet_type
*ptype
, *pt_prev
;
2651 struct net_device
*orig_dev
;
2652 struct net_device
*master
;
2653 struct net_device
*null_or_orig
;
2654 struct net_device
*null_or_bond
;
2655 int ret
= NET_RX_DROP
;
2658 if (!skb
->tstamp
.tv64
)
2661 if (vlan_tx_tag_present(skb
) && vlan_hwaccel_do_receive(skb
))
2662 return NET_RX_SUCCESS
;
2664 /* if we've gotten here through NAPI, check netpoll */
2665 if (netpoll_receive_skb(skb
))
2669 skb
->skb_iif
= skb
->dev
->ifindex
;
2671 null_or_orig
= NULL
;
2672 orig_dev
= skb
->dev
;
2673 master
= ACCESS_ONCE(orig_dev
->master
);
2675 if (skb_bond_should_drop(skb
, master
))
2676 null_or_orig
= orig_dev
; /* deliver only exact match */
2681 __get_cpu_var(netdev_rx_stat
).total
++;
2683 skb_reset_network_header(skb
);
2684 skb_reset_transport_header(skb
);
2685 skb
->mac_len
= skb
->network_header
- skb
->mac_header
;
2691 #ifdef CONFIG_NET_CLS_ACT
2692 if (skb
->tc_verd
& TC_NCLS
) {
2693 skb
->tc_verd
= CLR_TC_NCLS(skb
->tc_verd
);
2698 list_for_each_entry_rcu(ptype
, &ptype_all
, list
) {
2699 if (ptype
->dev
== null_or_orig
|| ptype
->dev
== skb
->dev
||
2700 ptype
->dev
== orig_dev
) {
2702 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
2707 #ifdef CONFIG_NET_CLS_ACT
2708 skb
= handle_ing(skb
, &pt_prev
, &ret
, orig_dev
);
2714 skb
= handle_bridge(skb
, &pt_prev
, &ret
, orig_dev
);
2717 skb
= handle_macvlan(skb
, &pt_prev
, &ret
, orig_dev
);
2722 * Make sure frames received on VLAN interfaces stacked on
2723 * bonding interfaces still make their way to any base bonding
2724 * device that may have registered for a specific ptype. The
2725 * handler may have to adjust skb->dev and orig_dev.
2727 null_or_bond
= NULL
;
2728 if ((skb
->dev
->priv_flags
& IFF_802_1Q_VLAN
) &&
2729 (vlan_dev_real_dev(skb
->dev
)->priv_flags
& IFF_BONDING
)) {
2730 null_or_bond
= vlan_dev_real_dev(skb
->dev
);
2733 type
= skb
->protocol
;
2734 list_for_each_entry_rcu(ptype
,
2735 &ptype_base
[ntohs(type
) & PTYPE_HASH_MASK
], list
) {
2736 if (ptype
->type
== type
&& (ptype
->dev
== null_or_orig
||
2737 ptype
->dev
== skb
->dev
|| ptype
->dev
== orig_dev
||
2738 ptype
->dev
== null_or_bond
)) {
2740 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
2746 ret
= pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
2749 /* Jamal, now you will not able to escape explaining
2750 * me how you were going to use this. :-)
2761 * netif_receive_skb - process receive buffer from network
2762 * @skb: buffer to process
2764 * netif_receive_skb() is the main receive data processing function.
2765 * It always succeeds. The buffer may be dropped during processing
2766 * for congestion control or by the protocol layers.
2768 * This function may only be called from softirq context and interrupts
2769 * should be enabled.
2771 * Return values (usually ignored):
2772 * NET_RX_SUCCESS: no congestion
2773 * NET_RX_DROP: packet was dropped
2775 int netif_receive_skb(struct sk_buff
*skb
)
2780 cpu
= get_rps_cpu(skb
->dev
, skb
);
2783 return __netif_receive_skb(skb
);
2785 return enqueue_to_backlog(skb
, cpu
);
2787 return __netif_receive_skb(skb
);
2790 EXPORT_SYMBOL(netif_receive_skb
);
2792 /* Network device is going away, flush any packets still pending */
2793 static void flush_backlog(void *arg
)
2795 struct net_device
*dev
= arg
;
2796 struct softnet_data
*queue
= &__get_cpu_var(softnet_data
);
2797 struct sk_buff
*skb
, *tmp
;
2800 skb_queue_walk_safe(&queue
->input_pkt_queue
, skb
, tmp
)
2801 if (skb
->dev
== dev
) {
2802 __skb_unlink(skb
, &queue
->input_pkt_queue
);
2808 static int napi_gro_complete(struct sk_buff
*skb
)
2810 struct packet_type
*ptype
;
2811 __be16 type
= skb
->protocol
;
2812 struct list_head
*head
= &ptype_base
[ntohs(type
) & PTYPE_HASH_MASK
];
2815 if (NAPI_GRO_CB(skb
)->count
== 1) {
2816 skb_shinfo(skb
)->gso_size
= 0;
2821 list_for_each_entry_rcu(ptype
, head
, list
) {
2822 if (ptype
->type
!= type
|| ptype
->dev
|| !ptype
->gro_complete
)
2825 err
= ptype
->gro_complete(skb
);
2831 WARN_ON(&ptype
->list
== head
);
2833 return NET_RX_SUCCESS
;
2837 return netif_receive_skb(skb
);
2840 static void napi_gro_flush(struct napi_struct
*napi
)
2842 struct sk_buff
*skb
, *next
;
2844 for (skb
= napi
->gro_list
; skb
; skb
= next
) {
2847 napi_gro_complete(skb
);
2850 napi
->gro_count
= 0;
2851 napi
->gro_list
= NULL
;
2854 enum gro_result
dev_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
2856 struct sk_buff
**pp
= NULL
;
2857 struct packet_type
*ptype
;
2858 __be16 type
= skb
->protocol
;
2859 struct list_head
*head
= &ptype_base
[ntohs(type
) & PTYPE_HASH_MASK
];
2862 enum gro_result ret
;
2864 if (!(skb
->dev
->features
& NETIF_F_GRO
))
2867 if (skb_is_gso(skb
) || skb_has_frags(skb
))
2871 list_for_each_entry_rcu(ptype
, head
, list
) {
2872 if (ptype
->type
!= type
|| ptype
->dev
|| !ptype
->gro_receive
)
2875 skb_set_network_header(skb
, skb_gro_offset(skb
));
2876 mac_len
= skb
->network_header
- skb
->mac_header
;
2877 skb
->mac_len
= mac_len
;
2878 NAPI_GRO_CB(skb
)->same_flow
= 0;
2879 NAPI_GRO_CB(skb
)->flush
= 0;
2880 NAPI_GRO_CB(skb
)->free
= 0;
2882 pp
= ptype
->gro_receive(&napi
->gro_list
, skb
);
2887 if (&ptype
->list
== head
)
2890 same_flow
= NAPI_GRO_CB(skb
)->same_flow
;
2891 ret
= NAPI_GRO_CB(skb
)->free
? GRO_MERGED_FREE
: GRO_MERGED
;
2894 struct sk_buff
*nskb
= *pp
;
2898 napi_gro_complete(nskb
);
2905 if (NAPI_GRO_CB(skb
)->flush
|| napi
->gro_count
>= MAX_GRO_SKBS
)
2909 NAPI_GRO_CB(skb
)->count
= 1;
2910 skb_shinfo(skb
)->gso_size
= skb_gro_len(skb
);
2911 skb
->next
= napi
->gro_list
;
2912 napi
->gro_list
= skb
;
2916 if (skb_headlen(skb
) < skb_gro_offset(skb
)) {
2917 int grow
= skb_gro_offset(skb
) - skb_headlen(skb
);
2919 BUG_ON(skb
->end
- skb
->tail
< grow
);
2921 memcpy(skb_tail_pointer(skb
), NAPI_GRO_CB(skb
)->frag0
, grow
);
2924 skb
->data_len
-= grow
;
2926 skb_shinfo(skb
)->frags
[0].page_offset
+= grow
;
2927 skb_shinfo(skb
)->frags
[0].size
-= grow
;
2929 if (unlikely(!skb_shinfo(skb
)->frags
[0].size
)) {
2930 put_page(skb_shinfo(skb
)->frags
[0].page
);
2931 memmove(skb_shinfo(skb
)->frags
,
2932 skb_shinfo(skb
)->frags
+ 1,
2933 --skb_shinfo(skb
)->nr_frags
);
2944 EXPORT_SYMBOL(dev_gro_receive
);
2947 __napi_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
2951 if (netpoll_rx_on(skb
))
2954 for (p
= napi
->gro_list
; p
; p
= p
->next
) {
2955 NAPI_GRO_CB(p
)->same_flow
=
2956 (p
->dev
== skb
->dev
) &&
2957 !compare_ether_header(skb_mac_header(p
),
2958 skb_gro_mac_header(skb
));
2959 NAPI_GRO_CB(p
)->flush
= 0;
2962 return dev_gro_receive(napi
, skb
);
2965 gro_result_t
napi_skb_finish(gro_result_t ret
, struct sk_buff
*skb
)
2969 if (netif_receive_skb(skb
))
2974 case GRO_MERGED_FREE
:
2985 EXPORT_SYMBOL(napi_skb_finish
);
2987 void skb_gro_reset_offset(struct sk_buff
*skb
)
2989 NAPI_GRO_CB(skb
)->data_offset
= 0;
2990 NAPI_GRO_CB(skb
)->frag0
= NULL
;
2991 NAPI_GRO_CB(skb
)->frag0_len
= 0;
2993 if (skb
->mac_header
== skb
->tail
&&
2994 !PageHighMem(skb_shinfo(skb
)->frags
[0].page
)) {
2995 NAPI_GRO_CB(skb
)->frag0
=
2996 page_address(skb_shinfo(skb
)->frags
[0].page
) +
2997 skb_shinfo(skb
)->frags
[0].page_offset
;
2998 NAPI_GRO_CB(skb
)->frag0_len
= skb_shinfo(skb
)->frags
[0].size
;
3001 EXPORT_SYMBOL(skb_gro_reset_offset
);
3003 gro_result_t
napi_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
3005 skb_gro_reset_offset(skb
);
3007 return napi_skb_finish(__napi_gro_receive(napi
, skb
), skb
);
3009 EXPORT_SYMBOL(napi_gro_receive
);
3011 void napi_reuse_skb(struct napi_struct
*napi
, struct sk_buff
*skb
)
3013 __skb_pull(skb
, skb_headlen(skb
));
3014 skb_reserve(skb
, NET_IP_ALIGN
- skb_headroom(skb
));
3018 EXPORT_SYMBOL(napi_reuse_skb
);
3020 struct sk_buff
*napi_get_frags(struct napi_struct
*napi
)
3022 struct sk_buff
*skb
= napi
->skb
;
3025 skb
= netdev_alloc_skb_ip_align(napi
->dev
, GRO_MAX_HEAD
);
3031 EXPORT_SYMBOL(napi_get_frags
);
3033 gro_result_t
napi_frags_finish(struct napi_struct
*napi
, struct sk_buff
*skb
,
3039 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
3041 if (ret
== GRO_HELD
)
3042 skb_gro_pull(skb
, -ETH_HLEN
);
3043 else if (netif_receive_skb(skb
))
3048 case GRO_MERGED_FREE
:
3049 napi_reuse_skb(napi
, skb
);
3058 EXPORT_SYMBOL(napi_frags_finish
);
3060 struct sk_buff
*napi_frags_skb(struct napi_struct
*napi
)
3062 struct sk_buff
*skb
= napi
->skb
;
3069 skb_reset_mac_header(skb
);
3070 skb_gro_reset_offset(skb
);
3072 off
= skb_gro_offset(skb
);
3073 hlen
= off
+ sizeof(*eth
);
3074 eth
= skb_gro_header_fast(skb
, off
);
3075 if (skb_gro_header_hard(skb
, hlen
)) {
3076 eth
= skb_gro_header_slow(skb
, hlen
, off
);
3077 if (unlikely(!eth
)) {
3078 napi_reuse_skb(napi
, skb
);
3084 skb_gro_pull(skb
, sizeof(*eth
));
3087 * This works because the only protocols we care about don't require
3088 * special handling. We'll fix it up properly at the end.
3090 skb
->protocol
= eth
->h_proto
;
3095 EXPORT_SYMBOL(napi_frags_skb
);
3097 gro_result_t
napi_gro_frags(struct napi_struct
*napi
)
3099 struct sk_buff
*skb
= napi_frags_skb(napi
);
3104 return napi_frags_finish(napi
, skb
, __napi_gro_receive(napi
, skb
));
3106 EXPORT_SYMBOL(napi_gro_frags
);
3108 static int process_backlog(struct napi_struct
*napi
, int quota
)
3111 struct softnet_data
*queue
= &__get_cpu_var(softnet_data
);
3112 unsigned long start_time
= jiffies
;
3114 napi
->weight
= weight_p
;
3116 struct sk_buff
*skb
;
3118 local_irq_disable();
3120 skb
= __skb_dequeue(&queue
->input_pkt_queue
);
3122 __napi_complete(napi
);
3123 spin_unlock_irq(&queue
->input_pkt_queue
.lock
);
3129 __netif_receive_skb(skb
);
3130 } while (++work
< quota
&& jiffies
== start_time
);
3136 * __napi_schedule - schedule for receive
3137 * @n: entry to schedule
3139 * The entry's receive function will be scheduled to run
3141 void __napi_schedule(struct napi_struct
*n
)
3143 unsigned long flags
;
3145 local_irq_save(flags
);
3146 list_add_tail(&n
->poll_list
, &__get_cpu_var(softnet_data
).poll_list
);
3147 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
3148 local_irq_restore(flags
);
3150 EXPORT_SYMBOL(__napi_schedule
);
3152 void __napi_complete(struct napi_struct
*n
)
3154 BUG_ON(!test_bit(NAPI_STATE_SCHED
, &n
->state
));
3155 BUG_ON(n
->gro_list
);
3157 list_del(&n
->poll_list
);
3158 smp_mb__before_clear_bit();
3159 clear_bit(NAPI_STATE_SCHED
, &n
->state
);
3161 EXPORT_SYMBOL(__napi_complete
);
3163 void napi_complete(struct napi_struct
*n
)
3165 unsigned long flags
;
3168 * don't let napi dequeue from the cpu poll list
3169 * just in case its running on a different cpu
3171 if (unlikely(test_bit(NAPI_STATE_NPSVC
, &n
->state
)))
3175 local_irq_save(flags
);
3177 local_irq_restore(flags
);
3179 EXPORT_SYMBOL(napi_complete
);
3181 void netif_napi_add(struct net_device
*dev
, struct napi_struct
*napi
,
3182 int (*poll
)(struct napi_struct
*, int), int weight
)
3184 INIT_LIST_HEAD(&napi
->poll_list
);
3185 napi
->gro_count
= 0;
3186 napi
->gro_list
= NULL
;
3189 napi
->weight
= weight
;
3190 list_add(&napi
->dev_list
, &dev
->napi_list
);
3192 #ifdef CONFIG_NETPOLL
3193 spin_lock_init(&napi
->poll_lock
);
3194 napi
->poll_owner
= -1;
3196 set_bit(NAPI_STATE_SCHED
, &napi
->state
);
3198 EXPORT_SYMBOL(netif_napi_add
);
3200 void netif_napi_del(struct napi_struct
*napi
)
3202 struct sk_buff
*skb
, *next
;
3204 list_del_init(&napi
->dev_list
);
3205 napi_free_frags(napi
);
3207 for (skb
= napi
->gro_list
; skb
; skb
= next
) {
3213 napi
->gro_list
= NULL
;
3214 napi
->gro_count
= 0;
3216 EXPORT_SYMBOL(netif_napi_del
);
3220 * net_rps_action sends any pending IPI's for rps. This is only called from
3221 * softirq and interrupts must be enabled.
3223 static void net_rps_action(cpumask_t
*mask
)
3227 /* Send pending IPI's to kick RPS processing on remote cpus. */
3228 for_each_cpu_mask_nr(cpu
, *mask
) {
3229 struct softnet_data
*queue
= &per_cpu(softnet_data
, cpu
);
3230 if (cpu_online(cpu
))
3231 __smp_call_function_single(cpu
, &queue
->csd
, 0);
3237 static void net_rx_action(struct softirq_action
*h
)
3239 struct list_head
*list
= &__get_cpu_var(softnet_data
).poll_list
;
3240 unsigned long time_limit
= jiffies
+ 2;
3241 int budget
= netdev_budget
;
3245 struct rps_remote_softirq_cpus
*rcpus
;
3248 local_irq_disable();
3250 while (!list_empty(list
)) {
3251 struct napi_struct
*n
;
3254 /* If softirq window is exhuasted then punt.
3255 * Allow this to run for 2 jiffies since which will allow
3256 * an average latency of 1.5/HZ.
3258 if (unlikely(budget
<= 0 || time_after(jiffies
, time_limit
)))
3263 /* Even though interrupts have been re-enabled, this
3264 * access is safe because interrupts can only add new
3265 * entries to the tail of this list, and only ->poll()
3266 * calls can remove this head entry from the list.
3268 n
= list_first_entry(list
, struct napi_struct
, poll_list
);
3270 have
= netpoll_poll_lock(n
);
3274 /* This NAPI_STATE_SCHED test is for avoiding a race
3275 * with netpoll's poll_napi(). Only the entity which
3276 * obtains the lock and sees NAPI_STATE_SCHED set will
3277 * actually make the ->poll() call. Therefore we avoid
3278 * accidently calling ->poll() when NAPI is not scheduled.
3281 if (test_bit(NAPI_STATE_SCHED
, &n
->state
)) {
3282 work
= n
->poll(n
, weight
);
3286 WARN_ON_ONCE(work
> weight
);
3290 local_irq_disable();
3292 /* Drivers must not modify the NAPI state if they
3293 * consume the entire weight. In such cases this code
3294 * still "owns" the NAPI instance and therefore can
3295 * move the instance around on the list at-will.
3297 if (unlikely(work
== weight
)) {
3298 if (unlikely(napi_disable_pending(n
))) {
3301 local_irq_disable();
3303 list_move_tail(&n
->poll_list
, list
);
3306 netpoll_poll_unlock(have
);
3310 rcpus
= &__get_cpu_var(rps_remote_softirq_cpus
);
3311 select
= rcpus
->select
;
3316 net_rps_action(&rcpus
->mask
[select
]);
3321 #ifdef CONFIG_NET_DMA
3323 * There may not be any more sk_buffs coming right now, so push
3324 * any pending DMA copies to hardware
3326 dma_issue_pending_all();
3332 __get_cpu_var(netdev_rx_stat
).time_squeeze
++;
3333 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
3337 static gifconf_func_t
*gifconf_list
[NPROTO
];
3340 * register_gifconf - register a SIOCGIF handler
3341 * @family: Address family
3342 * @gifconf: Function handler
3344 * Register protocol dependent address dumping routines. The handler
3345 * that is passed must not be freed or reused until it has been replaced
3346 * by another handler.
3348 int register_gifconf(unsigned int family
, gifconf_func_t
*gifconf
)
3350 if (family
>= NPROTO
)
3352 gifconf_list
[family
] = gifconf
;
3355 EXPORT_SYMBOL(register_gifconf
);
3359 * Map an interface index to its name (SIOCGIFNAME)
3363 * We need this ioctl for efficient implementation of the
3364 * if_indextoname() function required by the IPv6 API. Without
3365 * it, we would have to search all the interfaces to find a
3369 static int dev_ifname(struct net
*net
, struct ifreq __user
*arg
)
3371 struct net_device
*dev
;
3375 * Fetch the caller's info block.
3378 if (copy_from_user(&ifr
, arg
, sizeof(struct ifreq
)))
3382 dev
= dev_get_by_index_rcu(net
, ifr
.ifr_ifindex
);
3388 strcpy(ifr
.ifr_name
, dev
->name
);
3391 if (copy_to_user(arg
, &ifr
, sizeof(struct ifreq
)))
3397 * Perform a SIOCGIFCONF call. This structure will change
3398 * size eventually, and there is nothing I can do about it.
3399 * Thus we will need a 'compatibility mode'.
3402 static int dev_ifconf(struct net
*net
, char __user
*arg
)
3405 struct net_device
*dev
;
3412 * Fetch the caller's info block.
3415 if (copy_from_user(&ifc
, arg
, sizeof(struct ifconf
)))
3422 * Loop over the interfaces, and write an info block for each.
3426 for_each_netdev(net
, dev
) {
3427 for (i
= 0; i
< NPROTO
; i
++) {
3428 if (gifconf_list
[i
]) {
3431 done
= gifconf_list
[i
](dev
, NULL
, 0);
3433 done
= gifconf_list
[i
](dev
, pos
+ total
,
3443 * All done. Write the updated control block back to the caller.
3445 ifc
.ifc_len
= total
;
3448 * Both BSD and Solaris return 0 here, so we do too.
3450 return copy_to_user(arg
, &ifc
, sizeof(struct ifconf
)) ? -EFAULT
: 0;
3453 #ifdef CONFIG_PROC_FS
3455 * This is invoked by the /proc filesystem handler to display a device
3458 void *dev_seq_start(struct seq_file
*seq
, loff_t
*pos
)
3461 struct net
*net
= seq_file_net(seq
);
3463 struct net_device
*dev
;
3467 return SEQ_START_TOKEN
;
3470 for_each_netdev_rcu(net
, dev
)
3477 void *dev_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
3479 struct net_device
*dev
= (v
== SEQ_START_TOKEN
) ?
3480 first_net_device(seq_file_net(seq
)) :
3481 next_net_device((struct net_device
*)v
);
3484 return rcu_dereference(dev
);
3487 void dev_seq_stop(struct seq_file
*seq
, void *v
)
3493 static void dev_seq_printf_stats(struct seq_file
*seq
, struct net_device
*dev
)
3495 const struct net_device_stats
*stats
= dev_get_stats(dev
);
3497 seq_printf(seq
, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3498 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3499 dev
->name
, stats
->rx_bytes
, stats
->rx_packets
,
3501 stats
->rx_dropped
+ stats
->rx_missed_errors
,
3502 stats
->rx_fifo_errors
,
3503 stats
->rx_length_errors
+ stats
->rx_over_errors
+
3504 stats
->rx_crc_errors
+ stats
->rx_frame_errors
,
3505 stats
->rx_compressed
, stats
->multicast
,
3506 stats
->tx_bytes
, stats
->tx_packets
,
3507 stats
->tx_errors
, stats
->tx_dropped
,
3508 stats
->tx_fifo_errors
, stats
->collisions
,
3509 stats
->tx_carrier_errors
+
3510 stats
->tx_aborted_errors
+
3511 stats
->tx_window_errors
+
3512 stats
->tx_heartbeat_errors
,
3513 stats
->tx_compressed
);
3517 * Called from the PROCfs module. This now uses the new arbitrary sized
3518 * /proc/net interface to create /proc/net/dev
3520 static int dev_seq_show(struct seq_file
*seq
, void *v
)
3522 if (v
== SEQ_START_TOKEN
)
3523 seq_puts(seq
, "Inter-| Receive "
3525 " face |bytes packets errs drop fifo frame "
3526 "compressed multicast|bytes packets errs "
3527 "drop fifo colls carrier compressed\n");
3529 dev_seq_printf_stats(seq
, v
);
3533 static struct netif_rx_stats
*softnet_get_online(loff_t
*pos
)
3535 struct netif_rx_stats
*rc
= NULL
;
3537 while (*pos
< nr_cpu_ids
)
3538 if (cpu_online(*pos
)) {
3539 rc
= &per_cpu(netdev_rx_stat
, *pos
);
3546 static void *softnet_seq_start(struct seq_file
*seq
, loff_t
*pos
)
3548 return softnet_get_online(pos
);
3551 static void *softnet_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
3554 return softnet_get_online(pos
);
3557 static void softnet_seq_stop(struct seq_file
*seq
, void *v
)
3561 static int softnet_seq_show(struct seq_file
*seq
, void *v
)
3563 struct netif_rx_stats
*s
= v
;
3565 seq_printf(seq
, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
3566 s
->total
, s
->dropped
, s
->time_squeeze
, 0,
3567 0, 0, 0, 0, /* was fastroute */
3568 s
->cpu_collision
, s
->received_rps
);
3572 static const struct seq_operations dev_seq_ops
= {
3573 .start
= dev_seq_start
,
3574 .next
= dev_seq_next
,
3575 .stop
= dev_seq_stop
,
3576 .show
= dev_seq_show
,
3579 static int dev_seq_open(struct inode
*inode
, struct file
*file
)
3581 return seq_open_net(inode
, file
, &dev_seq_ops
,
3582 sizeof(struct seq_net_private
));
3585 static const struct file_operations dev_seq_fops
= {
3586 .owner
= THIS_MODULE
,
3587 .open
= dev_seq_open
,
3589 .llseek
= seq_lseek
,
3590 .release
= seq_release_net
,
3593 static const struct seq_operations softnet_seq_ops
= {
3594 .start
= softnet_seq_start
,
3595 .next
= softnet_seq_next
,
3596 .stop
= softnet_seq_stop
,
3597 .show
= softnet_seq_show
,
3600 static int softnet_seq_open(struct inode
*inode
, struct file
*file
)
3602 return seq_open(file
, &softnet_seq_ops
);
3605 static const struct file_operations softnet_seq_fops
= {
3606 .owner
= THIS_MODULE
,
3607 .open
= softnet_seq_open
,
3609 .llseek
= seq_lseek
,
3610 .release
= seq_release
,
3613 static void *ptype_get_idx(loff_t pos
)
3615 struct packet_type
*pt
= NULL
;
3619 list_for_each_entry_rcu(pt
, &ptype_all
, list
) {
3625 for (t
= 0; t
< PTYPE_HASH_SIZE
; t
++) {
3626 list_for_each_entry_rcu(pt
, &ptype_base
[t
], list
) {
3635 static void *ptype_seq_start(struct seq_file
*seq
, loff_t
*pos
)
3639 return *pos
? ptype_get_idx(*pos
- 1) : SEQ_START_TOKEN
;
3642 static void *ptype_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
3644 struct packet_type
*pt
;
3645 struct list_head
*nxt
;
3649 if (v
== SEQ_START_TOKEN
)
3650 return ptype_get_idx(0);
3653 nxt
= pt
->list
.next
;
3654 if (pt
->type
== htons(ETH_P_ALL
)) {
3655 if (nxt
!= &ptype_all
)
3658 nxt
= ptype_base
[0].next
;
3660 hash
= ntohs(pt
->type
) & PTYPE_HASH_MASK
;
3662 while (nxt
== &ptype_base
[hash
]) {
3663 if (++hash
>= PTYPE_HASH_SIZE
)
3665 nxt
= ptype_base
[hash
].next
;
3668 return list_entry(nxt
, struct packet_type
, list
);
3671 static void ptype_seq_stop(struct seq_file
*seq
, void *v
)
3677 static int ptype_seq_show(struct seq_file
*seq
, void *v
)
3679 struct packet_type
*pt
= v
;
3681 if (v
== SEQ_START_TOKEN
)
3682 seq_puts(seq
, "Type Device Function\n");
3683 else if (pt
->dev
== NULL
|| dev_net(pt
->dev
) == seq_file_net(seq
)) {
3684 if (pt
->type
== htons(ETH_P_ALL
))
3685 seq_puts(seq
, "ALL ");
3687 seq_printf(seq
, "%04x", ntohs(pt
->type
));
3689 seq_printf(seq
, " %-8s %pF\n",
3690 pt
->dev
? pt
->dev
->name
: "", pt
->func
);
3696 static const struct seq_operations ptype_seq_ops
= {
3697 .start
= ptype_seq_start
,
3698 .next
= ptype_seq_next
,
3699 .stop
= ptype_seq_stop
,
3700 .show
= ptype_seq_show
,
3703 static int ptype_seq_open(struct inode
*inode
, struct file
*file
)
3705 return seq_open_net(inode
, file
, &ptype_seq_ops
,
3706 sizeof(struct seq_net_private
));
3709 static const struct file_operations ptype_seq_fops
= {
3710 .owner
= THIS_MODULE
,
3711 .open
= ptype_seq_open
,
3713 .llseek
= seq_lseek
,
3714 .release
= seq_release_net
,
3718 static int __net_init
dev_proc_net_init(struct net
*net
)
3722 if (!proc_net_fops_create(net
, "dev", S_IRUGO
, &dev_seq_fops
))
3724 if (!proc_net_fops_create(net
, "softnet_stat", S_IRUGO
, &softnet_seq_fops
))
3726 if (!proc_net_fops_create(net
, "ptype", S_IRUGO
, &ptype_seq_fops
))
3729 if (wext_proc_init(net
))
3735 proc_net_remove(net
, "ptype");
3737 proc_net_remove(net
, "softnet_stat");
3739 proc_net_remove(net
, "dev");
3743 static void __net_exit
dev_proc_net_exit(struct net
*net
)
3745 wext_proc_exit(net
);
3747 proc_net_remove(net
, "ptype");
3748 proc_net_remove(net
, "softnet_stat");
3749 proc_net_remove(net
, "dev");
3752 static struct pernet_operations __net_initdata dev_proc_ops
= {
3753 .init
= dev_proc_net_init
,
3754 .exit
= dev_proc_net_exit
,
3757 static int __init
dev_proc_init(void)
3759 return register_pernet_subsys(&dev_proc_ops
);
3762 #define dev_proc_init() 0
3763 #endif /* CONFIG_PROC_FS */
3767 * netdev_set_master - set up master/slave pair
3768 * @slave: slave device
3769 * @master: new master device
3771 * Changes the master device of the slave. Pass %NULL to break the
3772 * bonding. The caller must hold the RTNL semaphore. On a failure
3773 * a negative errno code is returned. On success the reference counts
3774 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3775 * function returns zero.
3777 int netdev_set_master(struct net_device
*slave
, struct net_device
*master
)
3779 struct net_device
*old
= slave
->master
;
3789 slave
->master
= master
;
3796 slave
->flags
|= IFF_SLAVE
;
3798 slave
->flags
&= ~IFF_SLAVE
;
3800 rtmsg_ifinfo(RTM_NEWLINK
, slave
, IFF_SLAVE
);
3803 EXPORT_SYMBOL(netdev_set_master
);
3805 static void dev_change_rx_flags(struct net_device
*dev
, int flags
)
3807 const struct net_device_ops
*ops
= dev
->netdev_ops
;
3809 if ((dev
->flags
& IFF_UP
) && ops
->ndo_change_rx_flags
)
3810 ops
->ndo_change_rx_flags(dev
, flags
);
3813 static int __dev_set_promiscuity(struct net_device
*dev
, int inc
)
3815 unsigned short old_flags
= dev
->flags
;
3821 dev
->flags
|= IFF_PROMISC
;
3822 dev
->promiscuity
+= inc
;
3823 if (dev
->promiscuity
== 0) {
3826 * If inc causes overflow, untouch promisc and return error.
3829 dev
->flags
&= ~IFF_PROMISC
;
3831 dev
->promiscuity
-= inc
;
3832 printk(KERN_WARNING
"%s: promiscuity touches roof, "
3833 "set promiscuity failed, promiscuity feature "
3834 "of device might be broken.\n", dev
->name
);
3838 if (dev
->flags
!= old_flags
) {
3839 printk(KERN_INFO
"device %s %s promiscuous mode\n",
3840 dev
->name
, (dev
->flags
& IFF_PROMISC
) ? "entered" :
3842 if (audit_enabled
) {
3843 current_uid_gid(&uid
, &gid
);
3844 audit_log(current
->audit_context
, GFP_ATOMIC
,
3845 AUDIT_ANOM_PROMISCUOUS
,
3846 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3847 dev
->name
, (dev
->flags
& IFF_PROMISC
),
3848 (old_flags
& IFF_PROMISC
),
3849 audit_get_loginuid(current
),
3851 audit_get_sessionid(current
));
3854 dev_change_rx_flags(dev
, IFF_PROMISC
);
3860 * dev_set_promiscuity - update promiscuity count on a device
3864 * Add or remove promiscuity from a device. While the count in the device
3865 * remains above zero the interface remains promiscuous. Once it hits zero
3866 * the device reverts back to normal filtering operation. A negative inc
3867 * value is used to drop promiscuity on the device.
3868 * Return 0 if successful or a negative errno code on error.
3870 int dev_set_promiscuity(struct net_device
*dev
, int inc
)
3872 unsigned short old_flags
= dev
->flags
;
3875 err
= __dev_set_promiscuity(dev
, inc
);
3878 if (dev
->flags
!= old_flags
)
3879 dev_set_rx_mode(dev
);
3882 EXPORT_SYMBOL(dev_set_promiscuity
);
3885 * dev_set_allmulti - update allmulti count on a device
3889 * Add or remove reception of all multicast frames to a device. While the
3890 * count in the device remains above zero the interface remains listening
3891 * to all interfaces. Once it hits zero the device reverts back to normal
3892 * filtering operation. A negative @inc value is used to drop the counter
3893 * when releasing a resource needing all multicasts.
3894 * Return 0 if successful or a negative errno code on error.
3897 int dev_set_allmulti(struct net_device
*dev
, int inc
)
3899 unsigned short old_flags
= dev
->flags
;
3903 dev
->flags
|= IFF_ALLMULTI
;
3904 dev
->allmulti
+= inc
;
3905 if (dev
->allmulti
== 0) {
3908 * If inc causes overflow, untouch allmulti and return error.
3911 dev
->flags
&= ~IFF_ALLMULTI
;
3913 dev
->allmulti
-= inc
;
3914 printk(KERN_WARNING
"%s: allmulti touches roof, "
3915 "set allmulti failed, allmulti feature of "
3916 "device might be broken.\n", dev
->name
);
3920 if (dev
->flags
^ old_flags
) {
3921 dev_change_rx_flags(dev
, IFF_ALLMULTI
);
3922 dev_set_rx_mode(dev
);
3926 EXPORT_SYMBOL(dev_set_allmulti
);
3929 * Upload unicast and multicast address lists to device and
3930 * configure RX filtering. When the device doesn't support unicast
3931 * filtering it is put in promiscuous mode while unicast addresses
3934 void __dev_set_rx_mode(struct net_device
*dev
)
3936 const struct net_device_ops
*ops
= dev
->netdev_ops
;
3938 /* dev_open will call this function so the list will stay sane. */
3939 if (!(dev
->flags
&IFF_UP
))
3942 if (!netif_device_present(dev
))
3945 if (ops
->ndo_set_rx_mode
)
3946 ops
->ndo_set_rx_mode(dev
);
3948 /* Unicast addresses changes may only happen under the rtnl,
3949 * therefore calling __dev_set_promiscuity here is safe.
3951 if (!netdev_uc_empty(dev
) && !dev
->uc_promisc
) {
3952 __dev_set_promiscuity(dev
, 1);
3953 dev
->uc_promisc
= 1;
3954 } else if (netdev_uc_empty(dev
) && dev
->uc_promisc
) {
3955 __dev_set_promiscuity(dev
, -1);
3956 dev
->uc_promisc
= 0;
3959 if (ops
->ndo_set_multicast_list
)
3960 ops
->ndo_set_multicast_list(dev
);
3964 void dev_set_rx_mode(struct net_device
*dev
)
3966 netif_addr_lock_bh(dev
);
3967 __dev_set_rx_mode(dev
);
3968 netif_addr_unlock_bh(dev
);
3971 /* multicast addresses handling functions */
3973 int __dev_addr_delete(struct dev_addr_list
**list
, int *count
,
3974 void *addr
, int alen
, int glbl
)
3976 struct dev_addr_list
*da
;
3978 for (; (da
= *list
) != NULL
; list
= &da
->next
) {
3979 if (memcmp(da
->da_addr
, addr
, da
->da_addrlen
) == 0 &&
3980 alen
== da
->da_addrlen
) {
3982 int old_glbl
= da
->da_gusers
;
3999 int __dev_addr_add(struct dev_addr_list
**list
, int *count
,
4000 void *addr
, int alen
, int glbl
)
4002 struct dev_addr_list
*da
;
4004 for (da
= *list
; da
!= NULL
; da
= da
->next
) {
4005 if (memcmp(da
->da_addr
, addr
, da
->da_addrlen
) == 0 &&
4006 da
->da_addrlen
== alen
) {
4008 int old_glbl
= da
->da_gusers
;
4018 da
= kzalloc(sizeof(*da
), GFP_ATOMIC
);
4021 memcpy(da
->da_addr
, addr
, alen
);
4022 da
->da_addrlen
= alen
;
4024 da
->da_gusers
= glbl
? 1 : 0;
4032 int __dev_addr_sync(struct dev_addr_list
**to
, int *to_count
,
4033 struct dev_addr_list
**from
, int *from_count
)
4035 struct dev_addr_list
*da
, *next
;
4039 while (da
!= NULL
) {
4041 if (!da
->da_synced
) {
4042 err
= __dev_addr_add(to
, to_count
,
4043 da
->da_addr
, da
->da_addrlen
, 0);
4048 } else if (da
->da_users
== 1) {
4049 __dev_addr_delete(to
, to_count
,
4050 da
->da_addr
, da
->da_addrlen
, 0);
4051 __dev_addr_delete(from
, from_count
,
4052 da
->da_addr
, da
->da_addrlen
, 0);
4058 EXPORT_SYMBOL_GPL(__dev_addr_sync
);
4060 void __dev_addr_unsync(struct dev_addr_list
**to
, int *to_count
,
4061 struct dev_addr_list
**from
, int *from_count
)
4063 struct dev_addr_list
*da
, *next
;
4066 while (da
!= NULL
) {
4068 if (da
->da_synced
) {
4069 __dev_addr_delete(to
, to_count
,
4070 da
->da_addr
, da
->da_addrlen
, 0);
4072 __dev_addr_delete(from
, from_count
,
4073 da
->da_addr
, da
->da_addrlen
, 0);
4078 EXPORT_SYMBOL_GPL(__dev_addr_unsync
);
4080 static void __dev_addr_discard(struct dev_addr_list
**list
)
4082 struct dev_addr_list
*tmp
;
4084 while (*list
!= NULL
) {
4087 if (tmp
->da_users
> tmp
->da_gusers
)
4088 printk("__dev_addr_discard: address leakage! "
4089 "da_users=%d\n", tmp
->da_users
);
4094 void dev_addr_discard(struct net_device
*dev
)
4096 netif_addr_lock_bh(dev
);
4098 __dev_addr_discard(&dev
->mc_list
);
4099 netdev_mc_count(dev
) = 0;
4101 netif_addr_unlock_bh(dev
);
4103 EXPORT_SYMBOL(dev_addr_discard
);
4106 * dev_get_flags - get flags reported to userspace
4109 * Get the combination of flag bits exported through APIs to userspace.
4111 unsigned dev_get_flags(const struct net_device
*dev
)
4115 flags
= (dev
->flags
& ~(IFF_PROMISC
|
4120 (dev
->gflags
& (IFF_PROMISC
|
4123 if (netif_running(dev
)) {
4124 if (netif_oper_up(dev
))
4125 flags
|= IFF_RUNNING
;
4126 if (netif_carrier_ok(dev
))
4127 flags
|= IFF_LOWER_UP
;
4128 if (netif_dormant(dev
))
4129 flags
|= IFF_DORMANT
;
4134 EXPORT_SYMBOL(dev_get_flags
);
4136 int __dev_change_flags(struct net_device
*dev
, unsigned int flags
)
4138 int old_flags
= dev
->flags
;
4144 * Set the flags on our device.
4147 dev
->flags
= (flags
& (IFF_DEBUG
| IFF_NOTRAILERS
| IFF_NOARP
|
4148 IFF_DYNAMIC
| IFF_MULTICAST
| IFF_PORTSEL
|
4150 (dev
->flags
& (IFF_UP
| IFF_VOLATILE
| IFF_PROMISC
|
4154 * Load in the correct multicast list now the flags have changed.
4157 if ((old_flags
^ flags
) & IFF_MULTICAST
)
4158 dev_change_rx_flags(dev
, IFF_MULTICAST
);
4160 dev_set_rx_mode(dev
);
4163 * Have we downed the interface. We handle IFF_UP ourselves
4164 * according to user attempts to set it, rather than blindly
4169 if ((old_flags
^ flags
) & IFF_UP
) { /* Bit is different ? */
4170 ret
= ((old_flags
& IFF_UP
) ? __dev_close
: __dev_open
)(dev
);
4173 dev_set_rx_mode(dev
);
4176 if ((flags
^ dev
->gflags
) & IFF_PROMISC
) {
4177 int inc
= (flags
& IFF_PROMISC
) ? 1 : -1;
4179 dev
->gflags
^= IFF_PROMISC
;
4180 dev_set_promiscuity(dev
, inc
);
4183 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4184 is important. Some (broken) drivers set IFF_PROMISC, when
4185 IFF_ALLMULTI is requested not asking us and not reporting.
4187 if ((flags
^ dev
->gflags
) & IFF_ALLMULTI
) {
4188 int inc
= (flags
& IFF_ALLMULTI
) ? 1 : -1;
4190 dev
->gflags
^= IFF_ALLMULTI
;
4191 dev_set_allmulti(dev
, inc
);
4197 void __dev_notify_flags(struct net_device
*dev
, unsigned int old_flags
)
4199 unsigned int changes
= dev
->flags
^ old_flags
;
4201 if (changes
& IFF_UP
) {
4202 if (dev
->flags
& IFF_UP
)
4203 call_netdevice_notifiers(NETDEV_UP
, dev
);
4205 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
4208 if (dev
->flags
& IFF_UP
&&
4209 (changes
& ~(IFF_UP
| IFF_PROMISC
| IFF_ALLMULTI
| IFF_VOLATILE
)))
4210 call_netdevice_notifiers(NETDEV_CHANGE
, dev
);
4214 * dev_change_flags - change device settings
4216 * @flags: device state flags
4218 * Change settings on device based state flags. The flags are
4219 * in the userspace exported format.
4221 int dev_change_flags(struct net_device
*dev
, unsigned flags
)
4224 int old_flags
= dev
->flags
;
4226 ret
= __dev_change_flags(dev
, flags
);
4230 changes
= old_flags
^ dev
->flags
;
4232 rtmsg_ifinfo(RTM_NEWLINK
, dev
, changes
);
4234 __dev_notify_flags(dev
, old_flags
);
4237 EXPORT_SYMBOL(dev_change_flags
);
4240 * dev_set_mtu - Change maximum transfer unit
4242 * @new_mtu: new transfer unit
4244 * Change the maximum transfer size of the network device.
4246 int dev_set_mtu(struct net_device
*dev
, int new_mtu
)
4248 const struct net_device_ops
*ops
= dev
->netdev_ops
;
4251 if (new_mtu
== dev
->mtu
)
4254 /* MTU must be positive. */
4258 if (!netif_device_present(dev
))
4262 if (ops
->ndo_change_mtu
)
4263 err
= ops
->ndo_change_mtu(dev
, new_mtu
);
4267 if (!err
&& dev
->flags
& IFF_UP
)
4268 call_netdevice_notifiers(NETDEV_CHANGEMTU
, dev
);
4271 EXPORT_SYMBOL(dev_set_mtu
);
4274 * dev_set_mac_address - Change Media Access Control Address
4278 * Change the hardware (MAC) address of the device
4280 int dev_set_mac_address(struct net_device
*dev
, struct sockaddr
*sa
)
4282 const struct net_device_ops
*ops
= dev
->netdev_ops
;
4285 if (!ops
->ndo_set_mac_address
)
4287 if (sa
->sa_family
!= dev
->type
)
4289 if (!netif_device_present(dev
))
4291 err
= ops
->ndo_set_mac_address(dev
, sa
);
4293 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
4296 EXPORT_SYMBOL(dev_set_mac_address
);
4299 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
4301 static int dev_ifsioc_locked(struct net
*net
, struct ifreq
*ifr
, unsigned int cmd
)
4304 struct net_device
*dev
= dev_get_by_name_rcu(net
, ifr
->ifr_name
);
4310 case SIOCGIFFLAGS
: /* Get interface flags */
4311 ifr
->ifr_flags
= (short) dev_get_flags(dev
);
4314 case SIOCGIFMETRIC
: /* Get the metric on the interface
4315 (currently unused) */
4316 ifr
->ifr_metric
= 0;
4319 case SIOCGIFMTU
: /* Get the MTU of a device */
4320 ifr
->ifr_mtu
= dev
->mtu
;
4325 memset(ifr
->ifr_hwaddr
.sa_data
, 0, sizeof ifr
->ifr_hwaddr
.sa_data
);
4327 memcpy(ifr
->ifr_hwaddr
.sa_data
, dev
->dev_addr
,
4328 min(sizeof ifr
->ifr_hwaddr
.sa_data
, (size_t) dev
->addr_len
));
4329 ifr
->ifr_hwaddr
.sa_family
= dev
->type
;
4337 ifr
->ifr_map
.mem_start
= dev
->mem_start
;
4338 ifr
->ifr_map
.mem_end
= dev
->mem_end
;
4339 ifr
->ifr_map
.base_addr
= dev
->base_addr
;
4340 ifr
->ifr_map
.irq
= dev
->irq
;
4341 ifr
->ifr_map
.dma
= dev
->dma
;
4342 ifr
->ifr_map
.port
= dev
->if_port
;
4346 ifr
->ifr_ifindex
= dev
->ifindex
;
4350 ifr
->ifr_qlen
= dev
->tx_queue_len
;
4354 /* dev_ioctl() should ensure this case
4366 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4368 static int dev_ifsioc(struct net
*net
, struct ifreq
*ifr
, unsigned int cmd
)
4371 struct net_device
*dev
= __dev_get_by_name(net
, ifr
->ifr_name
);
4372 const struct net_device_ops
*ops
;
4377 ops
= dev
->netdev_ops
;
4380 case SIOCSIFFLAGS
: /* Set interface flags */
4381 return dev_change_flags(dev
, ifr
->ifr_flags
);
4383 case SIOCSIFMETRIC
: /* Set the metric on the interface
4384 (currently unused) */
4387 case SIOCSIFMTU
: /* Set the MTU of a device */
4388 return dev_set_mtu(dev
, ifr
->ifr_mtu
);
4391 return dev_set_mac_address(dev
, &ifr
->ifr_hwaddr
);
4393 case SIOCSIFHWBROADCAST
:
4394 if (ifr
->ifr_hwaddr
.sa_family
!= dev
->type
)
4396 memcpy(dev
->broadcast
, ifr
->ifr_hwaddr
.sa_data
,
4397 min(sizeof ifr
->ifr_hwaddr
.sa_data
, (size_t) dev
->addr_len
));
4398 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
4402 if (ops
->ndo_set_config
) {
4403 if (!netif_device_present(dev
))
4405 return ops
->ndo_set_config(dev
, &ifr
->ifr_map
);
4410 if ((!ops
->ndo_set_multicast_list
&& !ops
->ndo_set_rx_mode
) ||
4411 ifr
->ifr_hwaddr
.sa_family
!= AF_UNSPEC
)
4413 if (!netif_device_present(dev
))
4415 return dev_mc_add(dev
, ifr
->ifr_hwaddr
.sa_data
,
4419 if ((!ops
->ndo_set_multicast_list
&& !ops
->ndo_set_rx_mode
) ||
4420 ifr
->ifr_hwaddr
.sa_family
!= AF_UNSPEC
)
4422 if (!netif_device_present(dev
))
4424 return dev_mc_delete(dev
, ifr
->ifr_hwaddr
.sa_data
,
4428 if (ifr
->ifr_qlen
< 0)
4430 dev
->tx_queue_len
= ifr
->ifr_qlen
;
4434 ifr
->ifr_newname
[IFNAMSIZ
-1] = '\0';
4435 return dev_change_name(dev
, ifr
->ifr_newname
);
4438 * Unknown or private ioctl
4441 if ((cmd
>= SIOCDEVPRIVATE
&&
4442 cmd
<= SIOCDEVPRIVATE
+ 15) ||
4443 cmd
== SIOCBONDENSLAVE
||
4444 cmd
== SIOCBONDRELEASE
||
4445 cmd
== SIOCBONDSETHWADDR
||
4446 cmd
== SIOCBONDSLAVEINFOQUERY
||
4447 cmd
== SIOCBONDINFOQUERY
||
4448 cmd
== SIOCBONDCHANGEACTIVE
||
4449 cmd
== SIOCGMIIPHY
||
4450 cmd
== SIOCGMIIREG
||
4451 cmd
== SIOCSMIIREG
||
4452 cmd
== SIOCBRADDIF
||
4453 cmd
== SIOCBRDELIF
||
4454 cmd
== SIOCSHWTSTAMP
||
4455 cmd
== SIOCWANDEV
) {
4457 if (ops
->ndo_do_ioctl
) {
4458 if (netif_device_present(dev
))
4459 err
= ops
->ndo_do_ioctl(dev
, ifr
, cmd
);
4471 * This function handles all "interface"-type I/O control requests. The actual
4472 * 'doing' part of this is dev_ifsioc above.
4476 * dev_ioctl - network device ioctl
4477 * @net: the applicable net namespace
4478 * @cmd: command to issue
4479 * @arg: pointer to a struct ifreq in user space
4481 * Issue ioctl functions to devices. This is normally called by the
4482 * user space syscall interfaces but can sometimes be useful for
4483 * other purposes. The return value is the return from the syscall if
4484 * positive or a negative errno code on error.
4487 int dev_ioctl(struct net
*net
, unsigned int cmd
, void __user
*arg
)
4493 /* One special case: SIOCGIFCONF takes ifconf argument
4494 and requires shared lock, because it sleeps writing
4498 if (cmd
== SIOCGIFCONF
) {
4500 ret
= dev_ifconf(net
, (char __user
*) arg
);
4504 if (cmd
== SIOCGIFNAME
)
4505 return dev_ifname(net
, (struct ifreq __user
*)arg
);
4507 if (copy_from_user(&ifr
, arg
, sizeof(struct ifreq
)))
4510 ifr
.ifr_name
[IFNAMSIZ
-1] = 0;
4512 colon
= strchr(ifr
.ifr_name
, ':');
4517 * See which interface the caller is talking about.
4522 * These ioctl calls:
4523 * - can be done by all.
4524 * - atomic and do not require locking.
4535 dev_load(net
, ifr
.ifr_name
);
4537 ret
= dev_ifsioc_locked(net
, &ifr
, cmd
);
4542 if (copy_to_user(arg
, &ifr
,
4543 sizeof(struct ifreq
)))
4549 dev_load(net
, ifr
.ifr_name
);
4551 ret
= dev_ethtool(net
, &ifr
);
4556 if (copy_to_user(arg
, &ifr
,
4557 sizeof(struct ifreq
)))
4563 * These ioctl calls:
4564 * - require superuser power.
4565 * - require strict serialization.
4571 if (!capable(CAP_NET_ADMIN
))
4573 dev_load(net
, ifr
.ifr_name
);
4575 ret
= dev_ifsioc(net
, &ifr
, cmd
);
4580 if (copy_to_user(arg
, &ifr
,
4581 sizeof(struct ifreq
)))
4587 * These ioctl calls:
4588 * - require superuser power.
4589 * - require strict serialization.
4590 * - do not return a value
4600 case SIOCSIFHWBROADCAST
:
4603 case SIOCBONDENSLAVE
:
4604 case SIOCBONDRELEASE
:
4605 case SIOCBONDSETHWADDR
:
4606 case SIOCBONDCHANGEACTIVE
:
4610 if (!capable(CAP_NET_ADMIN
))
4613 case SIOCBONDSLAVEINFOQUERY
:
4614 case SIOCBONDINFOQUERY
:
4615 dev_load(net
, ifr
.ifr_name
);
4617 ret
= dev_ifsioc(net
, &ifr
, cmd
);
4622 /* Get the per device memory space. We can add this but
4623 * currently do not support it */
4625 /* Set the per device memory buffer space.
4626 * Not applicable in our case */
4631 * Unknown or private ioctl.
4634 if (cmd
== SIOCWANDEV
||
4635 (cmd
>= SIOCDEVPRIVATE
&&
4636 cmd
<= SIOCDEVPRIVATE
+ 15)) {
4637 dev_load(net
, ifr
.ifr_name
);
4639 ret
= dev_ifsioc(net
, &ifr
, cmd
);
4641 if (!ret
&& copy_to_user(arg
, &ifr
,
4642 sizeof(struct ifreq
)))
4646 /* Take care of Wireless Extensions */
4647 if (cmd
>= SIOCIWFIRST
&& cmd
<= SIOCIWLAST
)
4648 return wext_handle_ioctl(net
, &ifr
, cmd
, arg
);
4655 * dev_new_index - allocate an ifindex
4656 * @net: the applicable net namespace
4658 * Returns a suitable unique value for a new device interface
4659 * number. The caller must hold the rtnl semaphore or the
4660 * dev_base_lock to be sure it remains unique.
4662 static int dev_new_index(struct net
*net
)
4668 if (!__dev_get_by_index(net
, ifindex
))
4673 /* Delayed registration/unregisteration */
4674 static LIST_HEAD(net_todo_list
);
4676 static void net_set_todo(struct net_device
*dev
)
4678 list_add_tail(&dev
->todo_list
, &net_todo_list
);
4681 static void rollback_registered_many(struct list_head
*head
)
4683 struct net_device
*dev
, *tmp
;
4685 BUG_ON(dev_boot_phase
);
4688 list_for_each_entry_safe(dev
, tmp
, head
, unreg_list
) {
4689 /* Some devices call without registering
4690 * for initialization unwind. Remove those
4691 * devices and proceed with the remaining.
4693 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
4694 pr_debug("unregister_netdevice: device %s/%p never "
4695 "was registered\n", dev
->name
, dev
);
4698 list_del(&dev
->unreg_list
);
4702 BUG_ON(dev
->reg_state
!= NETREG_REGISTERED
);
4704 /* If device is running, close it first. */
4707 /* And unlink it from device chain. */
4708 unlist_netdevice(dev
);
4710 dev
->reg_state
= NETREG_UNREGISTERING
;
4715 list_for_each_entry(dev
, head
, unreg_list
) {
4716 /* Shutdown queueing discipline. */
4720 /* Notify protocols, that we are about to destroy
4721 this device. They should clean all the things.
4723 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
4725 if (!dev
->rtnl_link_ops
||
4726 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
4727 rtmsg_ifinfo(RTM_DELLINK
, dev
, ~0U);
4730 * Flush the unicast and multicast chains
4733 dev_addr_discard(dev
);
4735 if (dev
->netdev_ops
->ndo_uninit
)
4736 dev
->netdev_ops
->ndo_uninit(dev
);
4738 /* Notifier chain MUST detach us from master device. */
4739 WARN_ON(dev
->master
);
4741 /* Remove entries from kobject tree */
4742 netdev_unregister_kobject(dev
);
4745 /* Process any work delayed until the end of the batch */
4746 dev
= list_first_entry(head
, struct net_device
, unreg_list
);
4747 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH
, dev
);
4751 list_for_each_entry(dev
, head
, unreg_list
)
4755 static void rollback_registered(struct net_device
*dev
)
4759 list_add(&dev
->unreg_list
, &single
);
4760 rollback_registered_many(&single
);
4763 static void __netdev_init_queue_locks_one(struct net_device
*dev
,
4764 struct netdev_queue
*dev_queue
,
4767 spin_lock_init(&dev_queue
->_xmit_lock
);
4768 netdev_set_xmit_lockdep_class(&dev_queue
->_xmit_lock
, dev
->type
);
4769 dev_queue
->xmit_lock_owner
= -1;
4772 static void netdev_init_queue_locks(struct net_device
*dev
)
4774 netdev_for_each_tx_queue(dev
, __netdev_init_queue_locks_one
, NULL
);
4775 __netdev_init_queue_locks_one(dev
, &dev
->rx_queue
, NULL
);
4778 unsigned long netdev_fix_features(unsigned long features
, const char *name
)
4780 /* Fix illegal SG+CSUM combinations. */
4781 if ((features
& NETIF_F_SG
) &&
4782 !(features
& NETIF_F_ALL_CSUM
)) {
4784 printk(KERN_NOTICE
"%s: Dropping NETIF_F_SG since no "
4785 "checksum feature.\n", name
);
4786 features
&= ~NETIF_F_SG
;
4789 /* TSO requires that SG is present as well. */
4790 if ((features
& NETIF_F_TSO
) && !(features
& NETIF_F_SG
)) {
4792 printk(KERN_NOTICE
"%s: Dropping NETIF_F_TSO since no "
4793 "SG feature.\n", name
);
4794 features
&= ~NETIF_F_TSO
;
4797 if (features
& NETIF_F_UFO
) {
4798 if (!(features
& NETIF_F_GEN_CSUM
)) {
4800 printk(KERN_ERR
"%s: Dropping NETIF_F_UFO "
4801 "since no NETIF_F_HW_CSUM feature.\n",
4803 features
&= ~NETIF_F_UFO
;
4806 if (!(features
& NETIF_F_SG
)) {
4808 printk(KERN_ERR
"%s: Dropping NETIF_F_UFO "
4809 "since no NETIF_F_SG feature.\n", name
);
4810 features
&= ~NETIF_F_UFO
;
4816 EXPORT_SYMBOL(netdev_fix_features
);
4819 * netif_stacked_transfer_operstate - transfer operstate
4820 * @rootdev: the root or lower level device to transfer state from
4821 * @dev: the device to transfer operstate to
4823 * Transfer operational state from root to device. This is normally
4824 * called when a stacking relationship exists between the root
4825 * device and the device(a leaf device).
4827 void netif_stacked_transfer_operstate(const struct net_device
*rootdev
,
4828 struct net_device
*dev
)
4830 if (rootdev
->operstate
== IF_OPER_DORMANT
)
4831 netif_dormant_on(dev
);
4833 netif_dormant_off(dev
);
4835 if (netif_carrier_ok(rootdev
)) {
4836 if (!netif_carrier_ok(dev
))
4837 netif_carrier_on(dev
);
4839 if (netif_carrier_ok(dev
))
4840 netif_carrier_off(dev
);
4843 EXPORT_SYMBOL(netif_stacked_transfer_operstate
);
4846 * register_netdevice - register a network device
4847 * @dev: device to register
4849 * Take a completed network device structure and add it to the kernel
4850 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4851 * chain. 0 is returned on success. A negative errno code is returned
4852 * on a failure to set up the device, or if the name is a duplicate.
4854 * Callers must hold the rtnl semaphore. You may want
4855 * register_netdev() instead of this.
4858 * The locking appears insufficient to guarantee two parallel registers
4859 * will not get the same name.
4862 int register_netdevice(struct net_device
*dev
)
4865 struct net
*net
= dev_net(dev
);
4867 BUG_ON(dev_boot_phase
);
4872 /* When net_device's are persistent, this will be fatal. */
4873 BUG_ON(dev
->reg_state
!= NETREG_UNINITIALIZED
);
4876 spin_lock_init(&dev
->addr_list_lock
);
4877 netdev_set_addr_lockdep_class(dev
);
4878 netdev_init_queue_locks(dev
);
4883 if (!dev
->num_rx_queues
) {
4885 * Allocate a single RX queue if driver never called
4889 dev
->_rx
= kzalloc(sizeof(struct netdev_rx_queue
), GFP_KERNEL
);
4895 dev
->_rx
->first
= dev
->_rx
;
4896 atomic_set(&dev
->_rx
->count
, 1);
4897 dev
->num_rx_queues
= 1;
4900 /* Init, if this function is available */
4901 if (dev
->netdev_ops
->ndo_init
) {
4902 ret
= dev
->netdev_ops
->ndo_init(dev
);
4910 ret
= dev_get_valid_name(net
, dev
->name
, dev
->name
, 0);
4914 dev
->ifindex
= dev_new_index(net
);
4915 if (dev
->iflink
== -1)
4916 dev
->iflink
= dev
->ifindex
;
4918 /* Fix illegal checksum combinations */
4919 if ((dev
->features
& NETIF_F_HW_CSUM
) &&
4920 (dev
->features
& (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
4921 printk(KERN_NOTICE
"%s: mixed HW and IP checksum settings.\n",
4923 dev
->features
&= ~(NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
);
4926 if ((dev
->features
& NETIF_F_NO_CSUM
) &&
4927 (dev
->features
& (NETIF_F_HW_CSUM
|NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
4928 printk(KERN_NOTICE
"%s: mixed no checksumming and other settings.\n",
4930 dev
->features
&= ~(NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
|NETIF_F_HW_CSUM
);
4933 dev
->features
= netdev_fix_features(dev
->features
, dev
->name
);
4935 /* Enable software GSO if SG is supported. */
4936 if (dev
->features
& NETIF_F_SG
)
4937 dev
->features
|= NETIF_F_GSO
;
4939 netdev_initialize_kobject(dev
);
4941 ret
= call_netdevice_notifiers(NETDEV_POST_INIT
, dev
);
4942 ret
= notifier_to_errno(ret
);
4946 ret
= netdev_register_kobject(dev
);
4949 dev
->reg_state
= NETREG_REGISTERED
;
4952 * Default initial state at registry is that the
4953 * device is present.
4956 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
4958 dev_init_scheduler(dev
);
4960 list_netdevice(dev
);
4962 /* Notify protocols, that a new device appeared. */
4963 ret
= call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
4964 ret
= notifier_to_errno(ret
);
4966 rollback_registered(dev
);
4967 dev
->reg_state
= NETREG_UNREGISTERED
;
4970 * Prevent userspace races by waiting until the network
4971 * device is fully setup before sending notifications.
4973 if (!dev
->rtnl_link_ops
||
4974 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
4975 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U);
4981 if (dev
->netdev_ops
->ndo_uninit
)
4982 dev
->netdev_ops
->ndo_uninit(dev
);
4985 EXPORT_SYMBOL(register_netdevice
);
4988 * init_dummy_netdev - init a dummy network device for NAPI
4989 * @dev: device to init
4991 * This takes a network device structure and initialize the minimum
4992 * amount of fields so it can be used to schedule NAPI polls without
4993 * registering a full blown interface. This is to be used by drivers
4994 * that need to tie several hardware interfaces to a single NAPI
4995 * poll scheduler due to HW limitations.
4997 int init_dummy_netdev(struct net_device
*dev
)
4999 /* Clear everything. Note we don't initialize spinlocks
5000 * are they aren't supposed to be taken by any of the
5001 * NAPI code and this dummy netdev is supposed to be
5002 * only ever used for NAPI polls
5004 memset(dev
, 0, sizeof(struct net_device
));
5006 /* make sure we BUG if trying to hit standard
5007 * register/unregister code path
5009 dev
->reg_state
= NETREG_DUMMY
;
5011 /* initialize the ref count */
5012 atomic_set(&dev
->refcnt
, 1);
5014 /* NAPI wants this */
5015 INIT_LIST_HEAD(&dev
->napi_list
);
5017 /* a dummy interface is started by default */
5018 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
5019 set_bit(__LINK_STATE_START
, &dev
->state
);
5023 EXPORT_SYMBOL_GPL(init_dummy_netdev
);
5027 * register_netdev - register a network device
5028 * @dev: device to register
5030 * Take a completed network device structure and add it to the kernel
5031 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5032 * chain. 0 is returned on success. A negative errno code is returned
5033 * on a failure to set up the device, or if the name is a duplicate.
5035 * This is a wrapper around register_netdevice that takes the rtnl semaphore
5036 * and expands the device name if you passed a format string to
5039 int register_netdev(struct net_device
*dev
)
5046 * If the name is a format string the caller wants us to do a
5049 if (strchr(dev
->name
, '%')) {
5050 err
= dev_alloc_name(dev
, dev
->name
);
5055 err
= register_netdevice(dev
);
5060 EXPORT_SYMBOL(register_netdev
);
5063 * netdev_wait_allrefs - wait until all references are gone.
5065 * This is called when unregistering network devices.
5067 * Any protocol or device that holds a reference should register
5068 * for netdevice notification, and cleanup and put back the
5069 * reference if they receive an UNREGISTER event.
5070 * We can get stuck here if buggy protocols don't correctly
5073 static void netdev_wait_allrefs(struct net_device
*dev
)
5075 unsigned long rebroadcast_time
, warning_time
;
5077 linkwatch_forget_dev(dev
);
5079 rebroadcast_time
= warning_time
= jiffies
;
5080 while (atomic_read(&dev
->refcnt
) != 0) {
5081 if (time_after(jiffies
, rebroadcast_time
+ 1 * HZ
)) {
5084 /* Rebroadcast unregister notification */
5085 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
5086 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
5087 * should have already handle it the first time */
5089 if (test_bit(__LINK_STATE_LINKWATCH_PENDING
,
5091 /* We must not have linkwatch events
5092 * pending on unregister. If this
5093 * happens, we simply run the queue
5094 * unscheduled, resulting in a noop
5097 linkwatch_run_queue();
5102 rebroadcast_time
= jiffies
;
5107 if (time_after(jiffies
, warning_time
+ 10 * HZ
)) {
5108 printk(KERN_EMERG
"unregister_netdevice: "
5109 "waiting for %s to become free. Usage "
5111 dev
->name
, atomic_read(&dev
->refcnt
));
5112 warning_time
= jiffies
;
5121 * register_netdevice(x1);
5122 * register_netdevice(x2);
5124 * unregister_netdevice(y1);
5125 * unregister_netdevice(y2);
5131 * We are invoked by rtnl_unlock().
5132 * This allows us to deal with problems:
5133 * 1) We can delete sysfs objects which invoke hotplug
5134 * without deadlocking with linkwatch via keventd.
5135 * 2) Since we run with the RTNL semaphore not held, we can sleep
5136 * safely in order to wait for the netdev refcnt to drop to zero.
5138 * We must not return until all unregister events added during
5139 * the interval the lock was held have been completed.
5141 void netdev_run_todo(void)
5143 struct list_head list
;
5145 /* Snapshot list, allow later requests */
5146 list_replace_init(&net_todo_list
, &list
);
5150 while (!list_empty(&list
)) {
5151 struct net_device
*dev
5152 = list_first_entry(&list
, struct net_device
, todo_list
);
5153 list_del(&dev
->todo_list
);
5155 if (unlikely(dev
->reg_state
!= NETREG_UNREGISTERING
)) {
5156 printk(KERN_ERR
"network todo '%s' but state %d\n",
5157 dev
->name
, dev
->reg_state
);
5162 dev
->reg_state
= NETREG_UNREGISTERED
;
5164 on_each_cpu(flush_backlog
, dev
, 1);
5166 netdev_wait_allrefs(dev
);
5169 BUG_ON(atomic_read(&dev
->refcnt
));
5170 WARN_ON(dev
->ip_ptr
);
5171 WARN_ON(dev
->ip6_ptr
);
5172 WARN_ON(dev
->dn_ptr
);
5174 if (dev
->destructor
)
5175 dev
->destructor(dev
);
5177 /* Free network device */
5178 kobject_put(&dev
->dev
.kobj
);
5183 * dev_txq_stats_fold - fold tx_queues stats
5184 * @dev: device to get statistics from
5185 * @stats: struct net_device_stats to hold results
5187 void dev_txq_stats_fold(const struct net_device
*dev
,
5188 struct net_device_stats
*stats
)
5190 unsigned long tx_bytes
= 0, tx_packets
= 0, tx_dropped
= 0;
5192 struct netdev_queue
*txq
;
5194 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
5195 txq
= netdev_get_tx_queue(dev
, i
);
5196 tx_bytes
+= txq
->tx_bytes
;
5197 tx_packets
+= txq
->tx_packets
;
5198 tx_dropped
+= txq
->tx_dropped
;
5200 if (tx_bytes
|| tx_packets
|| tx_dropped
) {
5201 stats
->tx_bytes
= tx_bytes
;
5202 stats
->tx_packets
= tx_packets
;
5203 stats
->tx_dropped
= tx_dropped
;
5206 EXPORT_SYMBOL(dev_txq_stats_fold
);
5209 * dev_get_stats - get network device statistics
5210 * @dev: device to get statistics from
5212 * Get network statistics from device. The device driver may provide
5213 * its own method by setting dev->netdev_ops->get_stats; otherwise
5214 * the internal statistics structure is used.
5216 const struct net_device_stats
*dev_get_stats(struct net_device
*dev
)
5218 const struct net_device_ops
*ops
= dev
->netdev_ops
;
5220 if (ops
->ndo_get_stats
)
5221 return ops
->ndo_get_stats(dev
);
5223 dev_txq_stats_fold(dev
, &dev
->stats
);
5226 EXPORT_SYMBOL(dev_get_stats
);
5228 static void netdev_init_one_queue(struct net_device
*dev
,
5229 struct netdev_queue
*queue
,
5235 static void netdev_init_queues(struct net_device
*dev
)
5237 netdev_init_one_queue(dev
, &dev
->rx_queue
, NULL
);
5238 netdev_for_each_tx_queue(dev
, netdev_init_one_queue
, NULL
);
5239 spin_lock_init(&dev
->tx_global_lock
);
5243 * alloc_netdev_mq - allocate network device
5244 * @sizeof_priv: size of private data to allocate space for
5245 * @name: device name format string
5246 * @setup: callback to initialize device
5247 * @queue_count: the number of subqueues to allocate
5249 * Allocates a struct net_device with private data area for driver use
5250 * and performs basic initialization. Also allocates subquue structs
5251 * for each queue on the device at the end of the netdevice.
5253 struct net_device
*alloc_netdev_mq(int sizeof_priv
, const char *name
,
5254 void (*setup
)(struct net_device
*), unsigned int queue_count
)
5256 struct netdev_queue
*tx
;
5257 struct net_device
*dev
;
5259 struct net_device
*p
;
5261 struct netdev_rx_queue
*rx
;
5265 BUG_ON(strlen(name
) >= sizeof(dev
->name
));
5267 alloc_size
= sizeof(struct net_device
);
5269 /* ensure 32-byte alignment of private area */
5270 alloc_size
= ALIGN(alloc_size
, NETDEV_ALIGN
);
5271 alloc_size
+= sizeof_priv
;
5273 /* ensure 32-byte alignment of whole construct */
5274 alloc_size
+= NETDEV_ALIGN
- 1;
5276 p
= kzalloc(alloc_size
, GFP_KERNEL
);
5278 printk(KERN_ERR
"alloc_netdev: Unable to allocate device.\n");
5282 tx
= kcalloc(queue_count
, sizeof(struct netdev_queue
), GFP_KERNEL
);
5284 printk(KERN_ERR
"alloc_netdev: Unable to allocate "
5290 rx
= kcalloc(queue_count
, sizeof(struct netdev_rx_queue
), GFP_KERNEL
);
5292 printk(KERN_ERR
"alloc_netdev: Unable to allocate "
5297 atomic_set(&rx
->count
, queue_count
);
5300 * Set a pointer to first element in the array which holds the
5303 for (i
= 0; i
< queue_count
; i
++)
5307 dev
= PTR_ALIGN(p
, NETDEV_ALIGN
);
5308 dev
->padded
= (char *)dev
- (char *)p
;
5310 if (dev_addr_init(dev
))
5315 dev_net_set(dev
, &init_net
);
5318 dev
->num_tx_queues
= queue_count
;
5319 dev
->real_num_tx_queues
= queue_count
;
5323 dev
->num_rx_queues
= queue_count
;
5326 dev
->gso_max_size
= GSO_MAX_SIZE
;
5328 netdev_init_queues(dev
);
5330 INIT_LIST_HEAD(&dev
->ethtool_ntuple_list
.list
);
5331 dev
->ethtool_ntuple_list
.count
= 0;
5332 INIT_LIST_HEAD(&dev
->napi_list
);
5333 INIT_LIST_HEAD(&dev
->unreg_list
);
5334 INIT_LIST_HEAD(&dev
->link_watch_list
);
5335 dev
->priv_flags
= IFF_XMIT_DST_RELEASE
;
5337 strcpy(dev
->name
, name
);
5350 EXPORT_SYMBOL(alloc_netdev_mq
);
5353 * free_netdev - free network device
5356 * This function does the last stage of destroying an allocated device
5357 * interface. The reference to the device object is released.
5358 * If this is the last reference then it will be freed.
5360 void free_netdev(struct net_device
*dev
)
5362 struct napi_struct
*p
, *n
;
5364 release_net(dev_net(dev
));
5368 /* Flush device addresses */
5369 dev_addr_flush(dev
);
5371 /* Clear ethtool n-tuple list */
5372 ethtool_ntuple_flush(dev
);
5374 list_for_each_entry_safe(p
, n
, &dev
->napi_list
, dev_list
)
5377 /* Compatibility with error handling in drivers */
5378 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
5379 kfree((char *)dev
- dev
->padded
);
5383 BUG_ON(dev
->reg_state
!= NETREG_UNREGISTERED
);
5384 dev
->reg_state
= NETREG_RELEASED
;
5386 /* will free via device release */
5387 put_device(&dev
->dev
);
5389 EXPORT_SYMBOL(free_netdev
);
5392 * synchronize_net - Synchronize with packet receive processing
5394 * Wait for packets currently being received to be done.
5395 * Does not block later packets from starting.
5397 void synchronize_net(void)
5402 EXPORT_SYMBOL(synchronize_net
);
5405 * unregister_netdevice_queue - remove device from the kernel
5409 * This function shuts down a device interface and removes it
5410 * from the kernel tables.
5411 * If head not NULL, device is queued to be unregistered later.
5413 * Callers must hold the rtnl semaphore. You may want
5414 * unregister_netdev() instead of this.
5417 void unregister_netdevice_queue(struct net_device
*dev
, struct list_head
*head
)
5422 list_move_tail(&dev
->unreg_list
, head
);
5424 rollback_registered(dev
);
5425 /* Finish processing unregister after unlock */
5429 EXPORT_SYMBOL(unregister_netdevice_queue
);
5432 * unregister_netdevice_many - unregister many devices
5433 * @head: list of devices
5435 void unregister_netdevice_many(struct list_head
*head
)
5437 struct net_device
*dev
;
5439 if (!list_empty(head
)) {
5440 rollback_registered_many(head
);
5441 list_for_each_entry(dev
, head
, unreg_list
)
5445 EXPORT_SYMBOL(unregister_netdevice_many
);
5448 * unregister_netdev - remove device from the kernel
5451 * This function shuts down a device interface and removes it
5452 * from the kernel tables.
5454 * This is just a wrapper for unregister_netdevice that takes
5455 * the rtnl semaphore. In general you want to use this and not
5456 * unregister_netdevice.
5458 void unregister_netdev(struct net_device
*dev
)
5461 unregister_netdevice(dev
);
5464 EXPORT_SYMBOL(unregister_netdev
);
5467 * dev_change_net_namespace - move device to different nethost namespace
5469 * @net: network namespace
5470 * @pat: If not NULL name pattern to try if the current device name
5471 * is already taken in the destination network namespace.
5473 * This function shuts down a device interface and moves it
5474 * to a new network namespace. On success 0 is returned, on
5475 * a failure a netagive errno code is returned.
5477 * Callers must hold the rtnl semaphore.
5480 int dev_change_net_namespace(struct net_device
*dev
, struct net
*net
, const char *pat
)
5486 /* Don't allow namespace local devices to be moved. */
5488 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
5492 /* Don't allow real devices to be moved when sysfs
5496 if (dev
->dev
.parent
)
5500 /* Ensure the device has been registrered */
5502 if (dev
->reg_state
!= NETREG_REGISTERED
)
5505 /* Get out if there is nothing todo */
5507 if (net_eq(dev_net(dev
), net
))
5510 /* Pick the destination device name, and ensure
5511 * we can use it in the destination network namespace.
5514 if (__dev_get_by_name(net
, dev
->name
)) {
5515 /* We get here if we can't use the current device name */
5518 if (dev_get_valid_name(net
, pat
, dev
->name
, 1))
5523 * And now a mini version of register_netdevice unregister_netdevice.
5526 /* If device is running close it first. */
5529 /* And unlink it from device chain */
5531 unlist_netdevice(dev
);
5535 /* Shutdown queueing discipline. */
5538 /* Notify protocols, that we are about to destroy
5539 this device. They should clean all the things.
5541 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
5542 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH
, dev
);
5545 * Flush the unicast and multicast chains
5548 dev_addr_discard(dev
);
5550 netdev_unregister_kobject(dev
);
5552 /* Actually switch the network namespace */
5553 dev_net_set(dev
, net
);
5555 /* If there is an ifindex conflict assign a new one */
5556 if (__dev_get_by_index(net
, dev
->ifindex
)) {
5557 int iflink
= (dev
->iflink
== dev
->ifindex
);
5558 dev
->ifindex
= dev_new_index(net
);
5560 dev
->iflink
= dev
->ifindex
;
5563 /* Fixup kobjects */
5564 err
= netdev_register_kobject(dev
);
5567 /* Add the device back in the hashes */
5568 list_netdevice(dev
);
5570 /* Notify protocols, that a new device appeared. */
5571 call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
5574 * Prevent userspace races by waiting until the network
5575 * device is fully setup before sending notifications.
5577 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U);
5584 EXPORT_SYMBOL_GPL(dev_change_net_namespace
);
5586 static int dev_cpu_callback(struct notifier_block
*nfb
,
5587 unsigned long action
,
5590 struct sk_buff
**list_skb
;
5591 struct Qdisc
**list_net
;
5592 struct sk_buff
*skb
;
5593 unsigned int cpu
, oldcpu
= (unsigned long)ocpu
;
5594 struct softnet_data
*sd
, *oldsd
;
5596 if (action
!= CPU_DEAD
&& action
!= CPU_DEAD_FROZEN
)
5599 local_irq_disable();
5600 cpu
= smp_processor_id();
5601 sd
= &per_cpu(softnet_data
, cpu
);
5602 oldsd
= &per_cpu(softnet_data
, oldcpu
);
5604 /* Find end of our completion_queue. */
5605 list_skb
= &sd
->completion_queue
;
5607 list_skb
= &(*list_skb
)->next
;
5608 /* Append completion queue from offline CPU. */
5609 *list_skb
= oldsd
->completion_queue
;
5610 oldsd
->completion_queue
= NULL
;
5612 /* Find end of our output_queue. */
5613 list_net
= &sd
->output_queue
;
5615 list_net
= &(*list_net
)->next_sched
;
5616 /* Append output queue from offline CPU. */
5617 *list_net
= oldsd
->output_queue
;
5618 oldsd
->output_queue
= NULL
;
5620 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
5623 /* Process offline CPU's input_pkt_queue */
5624 while ((skb
= __skb_dequeue(&oldsd
->input_pkt_queue
)))
5632 * netdev_increment_features - increment feature set by one
5633 * @all: current feature set
5634 * @one: new feature set
5635 * @mask: mask feature set
5637 * Computes a new feature set after adding a device with feature set
5638 * @one to the master device with current feature set @all. Will not
5639 * enable anything that is off in @mask. Returns the new feature set.
5641 unsigned long netdev_increment_features(unsigned long all
, unsigned long one
,
5644 /* If device needs checksumming, downgrade to it. */
5645 if (all
& NETIF_F_NO_CSUM
&& !(one
& NETIF_F_NO_CSUM
))
5646 all
^= NETIF_F_NO_CSUM
| (one
& NETIF_F_ALL_CSUM
);
5647 else if (mask
& NETIF_F_ALL_CSUM
) {
5648 /* If one device supports v4/v6 checksumming, set for all. */
5649 if (one
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
) &&
5650 !(all
& NETIF_F_GEN_CSUM
)) {
5651 all
&= ~NETIF_F_ALL_CSUM
;
5652 all
|= one
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
);
5655 /* If one device supports hw checksumming, set for all. */
5656 if (one
& NETIF_F_GEN_CSUM
&& !(all
& NETIF_F_GEN_CSUM
)) {
5657 all
&= ~NETIF_F_ALL_CSUM
;
5658 all
|= NETIF_F_HW_CSUM
;
5662 one
|= NETIF_F_ALL_CSUM
;
5664 one
|= all
& NETIF_F_ONE_FOR_ALL
;
5665 all
&= one
| NETIF_F_LLTX
| NETIF_F_GSO
| NETIF_F_UFO
;
5666 all
|= one
& mask
& NETIF_F_ONE_FOR_ALL
;
5670 EXPORT_SYMBOL(netdev_increment_features
);
5672 static struct hlist_head
*netdev_create_hash(void)
5675 struct hlist_head
*hash
;
5677 hash
= kmalloc(sizeof(*hash
) * NETDEV_HASHENTRIES
, GFP_KERNEL
);
5679 for (i
= 0; i
< NETDEV_HASHENTRIES
; i
++)
5680 INIT_HLIST_HEAD(&hash
[i
]);
5685 /* Initialize per network namespace state */
5686 static int __net_init
netdev_init(struct net
*net
)
5688 INIT_LIST_HEAD(&net
->dev_base_head
);
5690 net
->dev_name_head
= netdev_create_hash();
5691 if (net
->dev_name_head
== NULL
)
5694 net
->dev_index_head
= netdev_create_hash();
5695 if (net
->dev_index_head
== NULL
)
5701 kfree(net
->dev_name_head
);
5707 * netdev_drivername - network driver for the device
5708 * @dev: network device
5709 * @buffer: buffer for resulting name
5710 * @len: size of buffer
5712 * Determine network driver for device.
5714 char *netdev_drivername(const struct net_device
*dev
, char *buffer
, int len
)
5716 const struct device_driver
*driver
;
5717 const struct device
*parent
;
5719 if (len
<= 0 || !buffer
)
5723 parent
= dev
->dev
.parent
;
5728 driver
= parent
->driver
;
5729 if (driver
&& driver
->name
)
5730 strlcpy(buffer
, driver
->name
, len
);
5734 static void __net_exit
netdev_exit(struct net
*net
)
5736 kfree(net
->dev_name_head
);
5737 kfree(net
->dev_index_head
);
5740 static struct pernet_operations __net_initdata netdev_net_ops
= {
5741 .init
= netdev_init
,
5742 .exit
= netdev_exit
,
5745 static void __net_exit
default_device_exit(struct net
*net
)
5747 struct net_device
*dev
, *aux
;
5749 * Push all migratable network devices back to the
5750 * initial network namespace
5753 for_each_netdev_safe(net
, dev
, aux
) {
5755 char fb_name
[IFNAMSIZ
];
5757 /* Ignore unmoveable devices (i.e. loopback) */
5758 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
5761 /* Leave virtual devices for the generic cleanup */
5762 if (dev
->rtnl_link_ops
)
5765 /* Push remaing network devices to init_net */
5766 snprintf(fb_name
, IFNAMSIZ
, "dev%d", dev
->ifindex
);
5767 err
= dev_change_net_namespace(dev
, &init_net
, fb_name
);
5769 printk(KERN_EMERG
"%s: failed to move %s to init_net: %d\n",
5770 __func__
, dev
->name
, err
);
5777 static void __net_exit
default_device_exit_batch(struct list_head
*net_list
)
5779 /* At exit all network devices most be removed from a network
5780 * namespace. Do this in the reverse order of registeration.
5781 * Do this across as many network namespaces as possible to
5782 * improve batching efficiency.
5784 struct net_device
*dev
;
5786 LIST_HEAD(dev_kill_list
);
5789 list_for_each_entry(net
, net_list
, exit_list
) {
5790 for_each_netdev_reverse(net
, dev
) {
5791 if (dev
->rtnl_link_ops
)
5792 dev
->rtnl_link_ops
->dellink(dev
, &dev_kill_list
);
5794 unregister_netdevice_queue(dev
, &dev_kill_list
);
5797 unregister_netdevice_many(&dev_kill_list
);
5801 static struct pernet_operations __net_initdata default_device_ops
= {
5802 .exit
= default_device_exit
,
5803 .exit_batch
= default_device_exit_batch
,
5807 * Initialize the DEV module. At boot time this walks the device list and
5808 * unhooks any devices that fail to initialise (normally hardware not
5809 * present) and leaves us with a valid list of present and active devices.
5814 * This is called single threaded during boot, so no need
5815 * to take the rtnl semaphore.
5817 static int __init
net_dev_init(void)
5819 int i
, rc
= -ENOMEM
;
5821 BUG_ON(!dev_boot_phase
);
5823 if (dev_proc_init())
5826 if (netdev_kobject_init())
5829 INIT_LIST_HEAD(&ptype_all
);
5830 for (i
= 0; i
< PTYPE_HASH_SIZE
; i
++)
5831 INIT_LIST_HEAD(&ptype_base
[i
]);
5833 if (register_pernet_subsys(&netdev_net_ops
))
5837 * Initialise the packet receive queues.
5840 for_each_possible_cpu(i
) {
5841 struct softnet_data
*queue
;
5843 queue
= &per_cpu(softnet_data
, i
);
5844 skb_queue_head_init(&queue
->input_pkt_queue
);
5845 queue
->completion_queue
= NULL
;
5846 INIT_LIST_HEAD(&queue
->poll_list
);
5849 queue
->csd
.func
= trigger_softirq
;
5850 queue
->csd
.info
= queue
;
5851 queue
->csd
.flags
= 0;
5854 queue
->backlog
.poll
= process_backlog
;
5855 queue
->backlog
.weight
= weight_p
;
5856 queue
->backlog
.gro_list
= NULL
;
5857 queue
->backlog
.gro_count
= 0;
5862 /* The loopback device is special if any other network devices
5863 * is present in a network namespace the loopback device must
5864 * be present. Since we now dynamically allocate and free the
5865 * loopback device ensure this invariant is maintained by
5866 * keeping the loopback device as the first device on the
5867 * list of network devices. Ensuring the loopback devices
5868 * is the first device that appears and the last network device
5871 if (register_pernet_device(&loopback_net_ops
))
5874 if (register_pernet_device(&default_device_ops
))
5877 open_softirq(NET_TX_SOFTIRQ
, net_tx_action
);
5878 open_softirq(NET_RX_SOFTIRQ
, net_rx_action
);
5880 hotcpu_notifier(dev_cpu_callback
, 0);
5888 subsys_initcall(net_dev_init
);
5890 static int __init
initialize_hashrnd(void)
5892 get_random_bytes(&hashrnd
, sizeof(hashrnd
));
5896 late_initcall_sync(initialize_hashrnd
);