Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * NET3 Protocol independent device support routines. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | * | |
9 | * Derived from the non IP parts of dev.c 1.0.19 | |
02c30a84 | 10 | * Authors: Ross Biro |
1da177e4 LT |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Mark Evans, <evansmp@uhura.aston.ac.uk> | |
13 | * | |
14 | * Additional Authors: | |
15 | * Florian la Roche <rzsfl@rz.uni-sb.de> | |
16 | * Alan Cox <gw4pts@gw4pts.ampr.org> | |
17 | * David Hinds <dahinds@users.sourceforge.net> | |
18 | * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> | |
19 | * Adam Sulmicki <adam@cfar.umd.edu> | |
20 | * Pekka Riikonen <priikone@poesidon.pspt.fi> | |
21 | * | |
22 | * Changes: | |
23 | * D.J. Barrow : Fixed bug where dev->refcnt gets set | |
24 | * to 2 if register_netdev gets called | |
25 | * before net_dev_init & also removed a | |
26 | * few lines of code in the process. | |
27 | * Alan Cox : device private ioctl copies fields back. | |
28 | * Alan Cox : Transmit queue code does relevant | |
29 | * stunts to keep the queue safe. | |
30 | * Alan Cox : Fixed double lock. | |
31 | * Alan Cox : Fixed promisc NULL pointer trap | |
32 | * ???????? : Support the full private ioctl range | |
33 | * Alan Cox : Moved ioctl permission check into | |
34 | * drivers | |
35 | * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI | |
36 | * Alan Cox : 100 backlog just doesn't cut it when | |
37 | * you start doing multicast video 8) | |
38 | * Alan Cox : Rewrote net_bh and list manager. | |
39 | * Alan Cox : Fix ETH_P_ALL echoback lengths. | |
40 | * Alan Cox : Took out transmit every packet pass | |
41 | * Saved a few bytes in the ioctl handler | |
42 | * Alan Cox : Network driver sets packet type before | |
43 | * calling netif_rx. Saves a function | |
44 | * call a packet. | |
45 | * Alan Cox : Hashed net_bh() | |
46 | * Richard Kooijman: Timestamp fixes. | |
47 | * Alan Cox : Wrong field in SIOCGIFDSTADDR | |
48 | * Alan Cox : Device lock protection. | |
49 | * Alan Cox : Fixed nasty side effect of device close | |
50 | * changes. | |
51 | * Rudi Cilibrasi : Pass the right thing to | |
52 | * set_mac_address() | |
53 | * Dave Miller : 32bit quantity for the device lock to | |
54 | * make it work out on a Sparc. | |
55 | * Bjorn Ekwall : Added KERNELD hack. | |
56 | * Alan Cox : Cleaned up the backlog initialise. | |
57 | * Craig Metz : SIOCGIFCONF fix if space for under | |
58 | * 1 device. | |
59 | * Thomas Bogendoerfer : Return ENODEV for dev_open, if there | |
60 | * is no device open function. | |
61 | * Andi Kleen : Fix error reporting for SIOCGIFCONF | |
62 | * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF | |
63 | * Cyrus Durgin : Cleaned for KMOD | |
64 | * Adam Sulmicki : Bug Fix : Network Device Unload | |
65 | * A network device unload needs to purge | |
66 | * the backlog queue. | |
67 | * Paul Rusty Russell : SIOCSIFNAME | |
68 | * Pekka Riikonen : Netdev boot-time settings code | |
69 | * Andrew Morton : Make unregister_netdevice wait | |
70 | * indefinitely on dev->refcnt | |
71 | * J Hadi Salim : - Backlog queue sampling | |
72 | * - netif_rx() feedback | |
73 | */ | |
74 | ||
75 | #include <asm/uaccess.h> | |
76 | #include <asm/system.h> | |
77 | #include <linux/bitops.h> | |
4fc268d2 | 78 | #include <linux/capability.h> |
1da177e4 LT |
79 | #include <linux/cpu.h> |
80 | #include <linux/types.h> | |
81 | #include <linux/kernel.h> | |
82 | #include <linux/sched.h> | |
4a3e2f71 | 83 | #include <linux/mutex.h> |
1da177e4 LT |
84 | #include <linux/string.h> |
85 | #include <linux/mm.h> | |
86 | #include <linux/socket.h> | |
87 | #include <linux/sockios.h> | |
88 | #include <linux/errno.h> | |
89 | #include <linux/interrupt.h> | |
90 | #include <linux/if_ether.h> | |
91 | #include <linux/netdevice.h> | |
92 | #include <linux/etherdevice.h> | |
93 | #include <linux/notifier.h> | |
94 | #include <linux/skbuff.h> | |
457c4cbc | 95 | #include <net/net_namespace.h> |
1da177e4 LT |
96 | #include <net/sock.h> |
97 | #include <linux/rtnetlink.h> | |
98 | #include <linux/proc_fs.h> | |
99 | #include <linux/seq_file.h> | |
100 | #include <linux/stat.h> | |
101 | #include <linux/if_bridge.h> | |
b863ceb7 | 102 | #include <linux/if_macvlan.h> |
1da177e4 LT |
103 | #include <net/dst.h> |
104 | #include <net/pkt_sched.h> | |
105 | #include <net/checksum.h> | |
106 | #include <linux/highmem.h> | |
107 | #include <linux/init.h> | |
108 | #include <linux/kmod.h> | |
109 | #include <linux/module.h> | |
110 | #include <linux/kallsyms.h> | |
111 | #include <linux/netpoll.h> | |
112 | #include <linux/rcupdate.h> | |
113 | #include <linux/delay.h> | |
295f4a1f | 114 | #include <net/wext.h> |
1da177e4 | 115 | #include <net/iw_handler.h> |
1da177e4 | 116 | #include <asm/current.h> |
5bdb9886 | 117 | #include <linux/audit.h> |
db217334 | 118 | #include <linux/dmaengine.h> |
f6a78bfc | 119 | #include <linux/err.h> |
c7fa9d18 | 120 | #include <linux/ctype.h> |
723e98b7 | 121 | #include <linux/if_arp.h> |
1da177e4 | 122 | |
1da177e4 LT |
123 | /* |
124 | * The list of packet types we will receive (as opposed to discard) | |
125 | * and the routines to invoke. | |
126 | * | |
127 | * Why 16. Because with 16 the only overlap we get on a hash of the | |
128 | * low nibble of the protocol value is RARP/SNAP/X.25. | |
129 | * | |
130 | * NOTE: That is no longer true with the addition of VLAN tags. Not | |
131 | * sure which should go first, but I bet it won't make much | |
132 | * difference if we are running VLANs. The good news is that | |
133 | * this protocol won't be in the list unless compiled in, so | |
3041a069 | 134 | * the average user (w/out VLANs) will not be adversely affected. |
1da177e4 LT |
135 | * --BLG |
136 | * | |
137 | * 0800 IP | |
138 | * 8100 802.1Q VLAN | |
139 | * 0001 802.3 | |
140 | * 0002 AX.25 | |
141 | * 0004 802.2 | |
142 | * 8035 RARP | |
143 | * 0005 SNAP | |
144 | * 0805 X.25 | |
145 | * 0806 ARP | |
146 | * 8137 IPX | |
147 | * 0009 Localtalk | |
148 | * 86DD IPv6 | |
149 | */ | |
150 | ||
151 | static DEFINE_SPINLOCK(ptype_lock); | |
6b2bedc3 SH |
152 | static struct list_head ptype_base[16] __read_mostly; /* 16 way hashed list */ |
153 | static struct list_head ptype_all __read_mostly; /* Taps */ | |
1da177e4 | 154 | |
db217334 | 155 | #ifdef CONFIG_NET_DMA |
d379b01e DW |
156 | struct net_dma { |
157 | struct dma_client client; | |
158 | spinlock_t lock; | |
159 | cpumask_t channel_mask; | |
160 | struct dma_chan *channels[NR_CPUS]; | |
161 | }; | |
162 | ||
163 | static enum dma_state_client | |
164 | netdev_dma_event(struct dma_client *client, struct dma_chan *chan, | |
165 | enum dma_state state); | |
166 | ||
167 | static struct net_dma net_dma = { | |
168 | .client = { | |
169 | .event_callback = netdev_dma_event, | |
170 | }, | |
171 | }; | |
db217334 CL |
172 | #endif |
173 | ||
1da177e4 | 174 | /* |
7562f876 | 175 | * The @dev_base_head list is protected by @dev_base_lock and the rtnl |
1da177e4 LT |
176 | * semaphore. |
177 | * | |
178 | * Pure readers hold dev_base_lock for reading. | |
179 | * | |
180 | * Writers must hold the rtnl semaphore while they loop through the | |
7562f876 | 181 | * dev_base_head list, and hold dev_base_lock for writing when they do the |
1da177e4 LT |
182 | * actual updates. This allows pure readers to access the list even |
183 | * while a writer is preparing to update it. | |
184 | * | |
185 | * To put it another way, dev_base_lock is held for writing only to | |
186 | * protect against pure readers; the rtnl semaphore provides the | |
187 | * protection against other writers. | |
188 | * | |
189 | * See, for example usages, register_netdevice() and | |
190 | * unregister_netdevice(), which must be called with the rtnl | |
191 | * semaphore held. | |
192 | */ | |
7562f876 | 193 | LIST_HEAD(dev_base_head); |
1da177e4 LT |
194 | DEFINE_RWLOCK(dev_base_lock); |
195 | ||
7562f876 | 196 | EXPORT_SYMBOL(dev_base_head); |
1da177e4 LT |
197 | EXPORT_SYMBOL(dev_base_lock); |
198 | ||
199 | #define NETDEV_HASHBITS 8 | |
200 | static struct hlist_head dev_name_head[1<<NETDEV_HASHBITS]; | |
201 | static struct hlist_head dev_index_head[1<<NETDEV_HASHBITS]; | |
202 | ||
203 | static inline struct hlist_head *dev_name_hash(const char *name) | |
204 | { | |
205 | unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); | |
206 | return &dev_name_head[hash & ((1<<NETDEV_HASHBITS)-1)]; | |
207 | } | |
208 | ||
209 | static inline struct hlist_head *dev_index_hash(int ifindex) | |
210 | { | |
211 | return &dev_index_head[ifindex & ((1<<NETDEV_HASHBITS)-1)]; | |
212 | } | |
213 | ||
214 | /* | |
215 | * Our notifier list | |
216 | */ | |
217 | ||
f07d5b94 | 218 | static RAW_NOTIFIER_HEAD(netdev_chain); |
1da177e4 LT |
219 | |
220 | /* | |
221 | * Device drivers call our routines to queue packets here. We empty the | |
222 | * queue in the local softnet handler. | |
223 | */ | |
bea3348e SH |
224 | |
225 | DEFINE_PER_CPU(struct softnet_data, softnet_data); | |
1da177e4 LT |
226 | |
227 | #ifdef CONFIG_SYSFS | |
228 | extern int netdev_sysfs_init(void); | |
229 | extern int netdev_register_sysfs(struct net_device *); | |
230 | extern void netdev_unregister_sysfs(struct net_device *); | |
231 | #else | |
232 | #define netdev_sysfs_init() (0) | |
233 | #define netdev_register_sysfs(dev) (0) | |
234 | #define netdev_unregister_sysfs(dev) do { } while(0) | |
235 | #endif | |
236 | ||
723e98b7 JP |
237 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
238 | /* | |
239 | * register_netdevice() inits dev->_xmit_lock and sets lockdep class | |
240 | * according to dev->type | |
241 | */ | |
242 | static const unsigned short netdev_lock_type[] = | |
243 | {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, | |
244 | ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, | |
245 | ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, | |
246 | ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, | |
247 | ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, | |
248 | ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, | |
249 | ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, | |
250 | ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, | |
251 | ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, | |
252 | ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, | |
253 | ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, | |
254 | ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, | |
255 | ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, | |
256 | ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID, | |
257 | ARPHRD_NONE}; | |
258 | ||
259 | static const char *netdev_lock_name[] = | |
260 | {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", | |
261 | "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", | |
262 | "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", | |
263 | "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", | |
264 | "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", | |
265 | "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", | |
266 | "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", | |
267 | "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", | |
268 | "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", | |
269 | "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", | |
270 | "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", | |
271 | "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", | |
272 | "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", | |
273 | "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID", | |
274 | "_xmit_NONE"}; | |
275 | ||
276 | static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; | |
277 | ||
278 | static inline unsigned short netdev_lock_pos(unsigned short dev_type) | |
279 | { | |
280 | int i; | |
281 | ||
282 | for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) | |
283 | if (netdev_lock_type[i] == dev_type) | |
284 | return i; | |
285 | /* the last key is used by default */ | |
286 | return ARRAY_SIZE(netdev_lock_type) - 1; | |
287 | } | |
288 | ||
289 | static inline void netdev_set_lockdep_class(spinlock_t *lock, | |
290 | unsigned short dev_type) | |
291 | { | |
292 | int i; | |
293 | ||
294 | i = netdev_lock_pos(dev_type); | |
295 | lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], | |
296 | netdev_lock_name[i]); | |
297 | } | |
298 | #else | |
299 | static inline void netdev_set_lockdep_class(spinlock_t *lock, | |
300 | unsigned short dev_type) | |
301 | { | |
302 | } | |
303 | #endif | |
1da177e4 LT |
304 | |
305 | /******************************************************************************* | |
306 | ||
307 | Protocol management and registration routines | |
308 | ||
309 | *******************************************************************************/ | |
310 | ||
1da177e4 LT |
311 | /* |
312 | * Add a protocol ID to the list. Now that the input handler is | |
313 | * smarter we can dispense with all the messy stuff that used to be | |
314 | * here. | |
315 | * | |
316 | * BEWARE!!! Protocol handlers, mangling input packets, | |
317 | * MUST BE last in hash buckets and checking protocol handlers | |
318 | * MUST start from promiscuous ptype_all chain in net_bh. | |
319 | * It is true now, do not change it. | |
320 | * Explanation follows: if protocol handler, mangling packet, will | |
321 | * be the first on list, it is not able to sense, that packet | |
322 | * is cloned and should be copied-on-write, so that it will | |
323 | * change it and subsequent readers will get broken packet. | |
324 | * --ANK (980803) | |
325 | */ | |
326 | ||
327 | /** | |
328 | * dev_add_pack - add packet handler | |
329 | * @pt: packet type declaration | |
330 | * | |
331 | * Add a protocol handler to the networking stack. The passed &packet_type | |
332 | * is linked into kernel lists and may not be freed until it has been | |
333 | * removed from the kernel lists. | |
334 | * | |
4ec93edb | 335 | * This call does not sleep therefore it can not |
1da177e4 LT |
336 | * guarantee all CPU's that are in middle of receiving packets |
337 | * will see the new packet type (until the next received packet). | |
338 | */ | |
339 | ||
340 | void dev_add_pack(struct packet_type *pt) | |
341 | { | |
342 | int hash; | |
343 | ||
344 | spin_lock_bh(&ptype_lock); | |
9be9a6b9 | 345 | if (pt->type == htons(ETH_P_ALL)) |
1da177e4 | 346 | list_add_rcu(&pt->list, &ptype_all); |
9be9a6b9 | 347 | else { |
1da177e4 LT |
348 | hash = ntohs(pt->type) & 15; |
349 | list_add_rcu(&pt->list, &ptype_base[hash]); | |
350 | } | |
351 | spin_unlock_bh(&ptype_lock); | |
352 | } | |
353 | ||
1da177e4 LT |
354 | /** |
355 | * __dev_remove_pack - remove packet handler | |
356 | * @pt: packet type declaration | |
357 | * | |
358 | * Remove a protocol handler that was previously added to the kernel | |
359 | * protocol handlers by dev_add_pack(). The passed &packet_type is removed | |
360 | * from the kernel lists and can be freed or reused once this function | |
4ec93edb | 361 | * returns. |
1da177e4 LT |
362 | * |
363 | * The packet type might still be in use by receivers | |
364 | * and must not be freed until after all the CPU's have gone | |
365 | * through a quiescent state. | |
366 | */ | |
367 | void __dev_remove_pack(struct packet_type *pt) | |
368 | { | |
369 | struct list_head *head; | |
370 | struct packet_type *pt1; | |
371 | ||
372 | spin_lock_bh(&ptype_lock); | |
373 | ||
9be9a6b9 | 374 | if (pt->type == htons(ETH_P_ALL)) |
1da177e4 | 375 | head = &ptype_all; |
9be9a6b9 | 376 | else |
1da177e4 LT |
377 | head = &ptype_base[ntohs(pt->type) & 15]; |
378 | ||
379 | list_for_each_entry(pt1, head, list) { | |
380 | if (pt == pt1) { | |
381 | list_del_rcu(&pt->list); | |
382 | goto out; | |
383 | } | |
384 | } | |
385 | ||
386 | printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt); | |
387 | out: | |
388 | spin_unlock_bh(&ptype_lock); | |
389 | } | |
390 | /** | |
391 | * dev_remove_pack - remove packet handler | |
392 | * @pt: packet type declaration | |
393 | * | |
394 | * Remove a protocol handler that was previously added to the kernel | |
395 | * protocol handlers by dev_add_pack(). The passed &packet_type is removed | |
396 | * from the kernel lists and can be freed or reused once this function | |
397 | * returns. | |
398 | * | |
399 | * This call sleeps to guarantee that no CPU is looking at the packet | |
400 | * type after return. | |
401 | */ | |
402 | void dev_remove_pack(struct packet_type *pt) | |
403 | { | |
404 | __dev_remove_pack(pt); | |
4ec93edb | 405 | |
1da177e4 LT |
406 | synchronize_net(); |
407 | } | |
408 | ||
409 | /****************************************************************************** | |
410 | ||
411 | Device Boot-time Settings Routines | |
412 | ||
413 | *******************************************************************************/ | |
414 | ||
415 | /* Boot time configuration table */ | |
416 | static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; | |
417 | ||
418 | /** | |
419 | * netdev_boot_setup_add - add new setup entry | |
420 | * @name: name of the device | |
421 | * @map: configured settings for the device | |
422 | * | |
423 | * Adds new setup entry to the dev_boot_setup list. The function | |
424 | * returns 0 on error and 1 on success. This is a generic routine to | |
425 | * all netdevices. | |
426 | */ | |
427 | static int netdev_boot_setup_add(char *name, struct ifmap *map) | |
428 | { | |
429 | struct netdev_boot_setup *s; | |
430 | int i; | |
431 | ||
432 | s = dev_boot_setup; | |
433 | for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { | |
434 | if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { | |
435 | memset(s[i].name, 0, sizeof(s[i].name)); | |
436 | strcpy(s[i].name, name); | |
437 | memcpy(&s[i].map, map, sizeof(s[i].map)); | |
438 | break; | |
439 | } | |
440 | } | |
441 | ||
442 | return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; | |
443 | } | |
444 | ||
445 | /** | |
446 | * netdev_boot_setup_check - check boot time settings | |
447 | * @dev: the netdevice | |
448 | * | |
449 | * Check boot time settings for the device. | |
450 | * The found settings are set for the device to be used | |
451 | * later in the device probing. | |
452 | * Returns 0 if no settings found, 1 if they are. | |
453 | */ | |
454 | int netdev_boot_setup_check(struct net_device *dev) | |
455 | { | |
456 | struct netdev_boot_setup *s = dev_boot_setup; | |
457 | int i; | |
458 | ||
459 | for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { | |
460 | if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && | |
461 | !strncmp(dev->name, s[i].name, strlen(s[i].name))) { | |
462 | dev->irq = s[i].map.irq; | |
463 | dev->base_addr = s[i].map.base_addr; | |
464 | dev->mem_start = s[i].map.mem_start; | |
465 | dev->mem_end = s[i].map.mem_end; | |
466 | return 1; | |
467 | } | |
468 | } | |
469 | return 0; | |
470 | } | |
471 | ||
472 | ||
473 | /** | |
474 | * netdev_boot_base - get address from boot time settings | |
475 | * @prefix: prefix for network device | |
476 | * @unit: id for network device | |
477 | * | |
478 | * Check boot time settings for the base address of device. | |
479 | * The found settings are set for the device to be used | |
480 | * later in the device probing. | |
481 | * Returns 0 if no settings found. | |
482 | */ | |
483 | unsigned long netdev_boot_base(const char *prefix, int unit) | |
484 | { | |
485 | const struct netdev_boot_setup *s = dev_boot_setup; | |
486 | char name[IFNAMSIZ]; | |
487 | int i; | |
488 | ||
489 | sprintf(name, "%s%d", prefix, unit); | |
490 | ||
491 | /* | |
492 | * If device already registered then return base of 1 | |
493 | * to indicate not to probe for this interface | |
494 | */ | |
495 | if (__dev_get_by_name(name)) | |
496 | return 1; | |
497 | ||
498 | for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) | |
499 | if (!strcmp(name, s[i].name)) | |
500 | return s[i].map.base_addr; | |
501 | return 0; | |
502 | } | |
503 | ||
504 | /* | |
505 | * Saves at boot time configured settings for any netdevice. | |
506 | */ | |
507 | int __init netdev_boot_setup(char *str) | |
508 | { | |
509 | int ints[5]; | |
510 | struct ifmap map; | |
511 | ||
512 | str = get_options(str, ARRAY_SIZE(ints), ints); | |
513 | if (!str || !*str) | |
514 | return 0; | |
515 | ||
516 | /* Save settings */ | |
517 | memset(&map, 0, sizeof(map)); | |
518 | if (ints[0] > 0) | |
519 | map.irq = ints[1]; | |
520 | if (ints[0] > 1) | |
521 | map.base_addr = ints[2]; | |
522 | if (ints[0] > 2) | |
523 | map.mem_start = ints[3]; | |
524 | if (ints[0] > 3) | |
525 | map.mem_end = ints[4]; | |
526 | ||
527 | /* Add new entry to the list */ | |
528 | return netdev_boot_setup_add(str, &map); | |
529 | } | |
530 | ||
531 | __setup("netdev=", netdev_boot_setup); | |
532 | ||
533 | /******************************************************************************* | |
534 | ||
535 | Device Interface Subroutines | |
536 | ||
537 | *******************************************************************************/ | |
538 | ||
539 | /** | |
540 | * __dev_get_by_name - find a device by its name | |
541 | * @name: name to find | |
542 | * | |
543 | * Find an interface by name. Must be called under RTNL semaphore | |
544 | * or @dev_base_lock. If the name is found a pointer to the device | |
545 | * is returned. If the name is not found then %NULL is returned. The | |
546 | * reference counters are not incremented so the caller must be | |
547 | * careful with locks. | |
548 | */ | |
549 | ||
550 | struct net_device *__dev_get_by_name(const char *name) | |
551 | { | |
552 | struct hlist_node *p; | |
553 | ||
554 | hlist_for_each(p, dev_name_hash(name)) { | |
555 | struct net_device *dev | |
556 | = hlist_entry(p, struct net_device, name_hlist); | |
557 | if (!strncmp(dev->name, name, IFNAMSIZ)) | |
558 | return dev; | |
559 | } | |
560 | return NULL; | |
561 | } | |
562 | ||
563 | /** | |
564 | * dev_get_by_name - find a device by its name | |
565 | * @name: name to find | |
566 | * | |
567 | * Find an interface by name. This can be called from any | |
568 | * context and does its own locking. The returned handle has | |
569 | * the usage count incremented and the caller must use dev_put() to | |
570 | * release it when it is no longer needed. %NULL is returned if no | |
571 | * matching device is found. | |
572 | */ | |
573 | ||
574 | struct net_device *dev_get_by_name(const char *name) | |
575 | { | |
576 | struct net_device *dev; | |
577 | ||
578 | read_lock(&dev_base_lock); | |
579 | dev = __dev_get_by_name(name); | |
580 | if (dev) | |
581 | dev_hold(dev); | |
582 | read_unlock(&dev_base_lock); | |
583 | return dev; | |
584 | } | |
585 | ||
586 | /** | |
587 | * __dev_get_by_index - find a device by its ifindex | |
588 | * @ifindex: index of device | |
589 | * | |
590 | * Search for an interface by index. Returns %NULL if the device | |
591 | * is not found or a pointer to the device. The device has not | |
592 | * had its reference counter increased so the caller must be careful | |
593 | * about locking. The caller must hold either the RTNL semaphore | |
594 | * or @dev_base_lock. | |
595 | */ | |
596 | ||
597 | struct net_device *__dev_get_by_index(int ifindex) | |
598 | { | |
599 | struct hlist_node *p; | |
600 | ||
601 | hlist_for_each(p, dev_index_hash(ifindex)) { | |
602 | struct net_device *dev | |
603 | = hlist_entry(p, struct net_device, index_hlist); | |
604 | if (dev->ifindex == ifindex) | |
605 | return dev; | |
606 | } | |
607 | return NULL; | |
608 | } | |
609 | ||
610 | ||
611 | /** | |
612 | * dev_get_by_index - find a device by its ifindex | |
613 | * @ifindex: index of device | |
614 | * | |
615 | * Search for an interface by index. Returns NULL if the device | |
616 | * is not found or a pointer to the device. The device returned has | |
617 | * had a reference added and the pointer is safe until the user calls | |
618 | * dev_put to indicate they have finished with it. | |
619 | */ | |
620 | ||
621 | struct net_device *dev_get_by_index(int ifindex) | |
622 | { | |
623 | struct net_device *dev; | |
624 | ||
625 | read_lock(&dev_base_lock); | |
626 | dev = __dev_get_by_index(ifindex); | |
627 | if (dev) | |
628 | dev_hold(dev); | |
629 | read_unlock(&dev_base_lock); | |
630 | return dev; | |
631 | } | |
632 | ||
633 | /** | |
634 | * dev_getbyhwaddr - find a device by its hardware address | |
635 | * @type: media type of device | |
636 | * @ha: hardware address | |
637 | * | |
638 | * Search for an interface by MAC address. Returns NULL if the device | |
639 | * is not found or a pointer to the device. The caller must hold the | |
640 | * rtnl semaphore. The returned device has not had its ref count increased | |
641 | * and the caller must therefore be careful about locking | |
642 | * | |
643 | * BUGS: | |
644 | * If the API was consistent this would be __dev_get_by_hwaddr | |
645 | */ | |
646 | ||
647 | struct net_device *dev_getbyhwaddr(unsigned short type, char *ha) | |
648 | { | |
649 | struct net_device *dev; | |
650 | ||
651 | ASSERT_RTNL(); | |
652 | ||
7562f876 | 653 | for_each_netdev(dev) |
1da177e4 LT |
654 | if (dev->type == type && |
655 | !memcmp(dev->dev_addr, ha, dev->addr_len)) | |
7562f876 PE |
656 | return dev; |
657 | ||
658 | return NULL; | |
1da177e4 LT |
659 | } |
660 | ||
cf309e3f JF |
661 | EXPORT_SYMBOL(dev_getbyhwaddr); |
662 | ||
4e9cac2b | 663 | struct net_device *__dev_getfirstbyhwtype(unsigned short type) |
1da177e4 LT |
664 | { |
665 | struct net_device *dev; | |
666 | ||
4e9cac2b | 667 | ASSERT_RTNL(); |
7562f876 | 668 | for_each_netdev(dev) |
4e9cac2b | 669 | if (dev->type == type) |
7562f876 PE |
670 | return dev; |
671 | ||
672 | return NULL; | |
4e9cac2b PM |
673 | } |
674 | ||
675 | EXPORT_SYMBOL(__dev_getfirstbyhwtype); | |
676 | ||
677 | struct net_device *dev_getfirstbyhwtype(unsigned short type) | |
678 | { | |
679 | struct net_device *dev; | |
680 | ||
681 | rtnl_lock(); | |
682 | dev = __dev_getfirstbyhwtype(type); | |
683 | if (dev) | |
684 | dev_hold(dev); | |
1da177e4 LT |
685 | rtnl_unlock(); |
686 | return dev; | |
687 | } | |
688 | ||
689 | EXPORT_SYMBOL(dev_getfirstbyhwtype); | |
690 | ||
691 | /** | |
692 | * dev_get_by_flags - find any device with given flags | |
693 | * @if_flags: IFF_* values | |
694 | * @mask: bitmask of bits in if_flags to check | |
695 | * | |
696 | * Search for any interface with the given flags. Returns NULL if a device | |
4ec93edb | 697 | * is not found or a pointer to the device. The device returned has |
1da177e4 LT |
698 | * had a reference added and the pointer is safe until the user calls |
699 | * dev_put to indicate they have finished with it. | |
700 | */ | |
701 | ||
702 | struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mask) | |
703 | { | |
7562f876 | 704 | struct net_device *dev, *ret; |
1da177e4 | 705 | |
7562f876 | 706 | ret = NULL; |
1da177e4 | 707 | read_lock(&dev_base_lock); |
7562f876 | 708 | for_each_netdev(dev) { |
1da177e4 LT |
709 | if (((dev->flags ^ if_flags) & mask) == 0) { |
710 | dev_hold(dev); | |
7562f876 | 711 | ret = dev; |
1da177e4 LT |
712 | break; |
713 | } | |
714 | } | |
715 | read_unlock(&dev_base_lock); | |
7562f876 | 716 | return ret; |
1da177e4 LT |
717 | } |
718 | ||
719 | /** | |
720 | * dev_valid_name - check if name is okay for network device | |
721 | * @name: name string | |
722 | * | |
723 | * Network device names need to be valid file names to | |
c7fa9d18 DM |
724 | * to allow sysfs to work. We also disallow any kind of |
725 | * whitespace. | |
1da177e4 | 726 | */ |
c2373ee9 | 727 | int dev_valid_name(const char *name) |
1da177e4 | 728 | { |
c7fa9d18 DM |
729 | if (*name == '\0') |
730 | return 0; | |
b6fe17d6 SH |
731 | if (strlen(name) >= IFNAMSIZ) |
732 | return 0; | |
c7fa9d18 DM |
733 | if (!strcmp(name, ".") || !strcmp(name, "..")) |
734 | return 0; | |
735 | ||
736 | while (*name) { | |
737 | if (*name == '/' || isspace(*name)) | |
738 | return 0; | |
739 | name++; | |
740 | } | |
741 | return 1; | |
1da177e4 LT |
742 | } |
743 | ||
744 | /** | |
745 | * dev_alloc_name - allocate a name for a device | |
746 | * @dev: device | |
747 | * @name: name format string | |
748 | * | |
749 | * Passed a format string - eg "lt%d" it will try and find a suitable | |
3041a069 SH |
750 | * id. It scans list of devices to build up a free map, then chooses |
751 | * the first empty slot. The caller must hold the dev_base or rtnl lock | |
752 | * while allocating the name and adding the device in order to avoid | |
753 | * duplicates. | |
754 | * Limited to bits_per_byte * page size devices (ie 32K on most platforms). | |
755 | * Returns the number of the unit assigned or a negative errno code. | |
1da177e4 LT |
756 | */ |
757 | ||
758 | int dev_alloc_name(struct net_device *dev, const char *name) | |
759 | { | |
760 | int i = 0; | |
761 | char buf[IFNAMSIZ]; | |
762 | const char *p; | |
763 | const int max_netdevices = 8*PAGE_SIZE; | |
764 | long *inuse; | |
765 | struct net_device *d; | |
766 | ||
767 | p = strnchr(name, IFNAMSIZ-1, '%'); | |
768 | if (p) { | |
769 | /* | |
770 | * Verify the string as this thing may have come from | |
771 | * the user. There must be either one "%d" and no other "%" | |
772 | * characters. | |
773 | */ | |
774 | if (p[1] != 'd' || strchr(p + 2, '%')) | |
775 | return -EINVAL; | |
776 | ||
777 | /* Use one page as a bit array of possible slots */ | |
778 | inuse = (long *) get_zeroed_page(GFP_ATOMIC); | |
779 | if (!inuse) | |
780 | return -ENOMEM; | |
781 | ||
7562f876 | 782 | for_each_netdev(d) { |
1da177e4 LT |
783 | if (!sscanf(d->name, name, &i)) |
784 | continue; | |
785 | if (i < 0 || i >= max_netdevices) | |
786 | continue; | |
787 | ||
788 | /* avoid cases where sscanf is not exact inverse of printf */ | |
789 | snprintf(buf, sizeof(buf), name, i); | |
790 | if (!strncmp(buf, d->name, IFNAMSIZ)) | |
791 | set_bit(i, inuse); | |
792 | } | |
793 | ||
794 | i = find_first_zero_bit(inuse, max_netdevices); | |
795 | free_page((unsigned long) inuse); | |
796 | } | |
797 | ||
798 | snprintf(buf, sizeof(buf), name, i); | |
799 | if (!__dev_get_by_name(buf)) { | |
800 | strlcpy(dev->name, buf, IFNAMSIZ); | |
801 | return i; | |
802 | } | |
803 | ||
804 | /* It is possible to run out of possible slots | |
805 | * when the name is long and there isn't enough space left | |
806 | * for the digits, or if all bits are used. | |
807 | */ | |
808 | return -ENFILE; | |
809 | } | |
810 | ||
811 | ||
812 | /** | |
813 | * dev_change_name - change name of a device | |
814 | * @dev: device | |
815 | * @newname: name (or format string) must be at least IFNAMSIZ | |
816 | * | |
817 | * Change name of a device, can pass format strings "eth%d". | |
818 | * for wildcarding. | |
819 | */ | |
820 | int dev_change_name(struct net_device *dev, char *newname) | |
821 | { | |
fcc5a03a | 822 | char oldname[IFNAMSIZ]; |
1da177e4 | 823 | int err = 0; |
fcc5a03a | 824 | int ret; |
1da177e4 LT |
825 | |
826 | ASSERT_RTNL(); | |
827 | ||
828 | if (dev->flags & IFF_UP) | |
829 | return -EBUSY; | |
830 | ||
831 | if (!dev_valid_name(newname)) | |
832 | return -EINVAL; | |
833 | ||
fcc5a03a HX |
834 | memcpy(oldname, dev->name, IFNAMSIZ); |
835 | ||
1da177e4 LT |
836 | if (strchr(newname, '%')) { |
837 | err = dev_alloc_name(dev, newname); | |
838 | if (err < 0) | |
839 | return err; | |
840 | strcpy(newname, dev->name); | |
841 | } | |
842 | else if (__dev_get_by_name(newname)) | |
843 | return -EEXIST; | |
844 | else | |
845 | strlcpy(dev->name, newname, IFNAMSIZ); | |
846 | ||
fcc5a03a | 847 | rollback: |
92749821 | 848 | device_rename(&dev->dev, dev->name); |
7f988eab HX |
849 | |
850 | write_lock_bh(&dev_base_lock); | |
92749821 EB |
851 | hlist_del(&dev->name_hlist); |
852 | hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name)); | |
7f988eab HX |
853 | write_unlock_bh(&dev_base_lock); |
854 | ||
fcc5a03a HX |
855 | ret = raw_notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev); |
856 | ret = notifier_to_errno(ret); | |
857 | ||
858 | if (ret) { | |
859 | if (err) { | |
860 | printk(KERN_ERR | |
861 | "%s: name change rollback failed: %d.\n", | |
862 | dev->name, ret); | |
863 | } else { | |
864 | err = ret; | |
865 | memcpy(dev->name, oldname, IFNAMSIZ); | |
866 | goto rollback; | |
867 | } | |
868 | } | |
1da177e4 LT |
869 | |
870 | return err; | |
871 | } | |
872 | ||
d8a33ac4 | 873 | /** |
3041a069 | 874 | * netdev_features_change - device changes features |
d8a33ac4 SH |
875 | * @dev: device to cause notification |
876 | * | |
877 | * Called to indicate a device has changed features. | |
878 | */ | |
879 | void netdev_features_change(struct net_device *dev) | |
880 | { | |
f07d5b94 | 881 | raw_notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev); |
d8a33ac4 SH |
882 | } |
883 | EXPORT_SYMBOL(netdev_features_change); | |
884 | ||
1da177e4 LT |
885 | /** |
886 | * netdev_state_change - device changes state | |
887 | * @dev: device to cause notification | |
888 | * | |
889 | * Called to indicate a device has changed state. This function calls | |
890 | * the notifier chains for netdev_chain and sends a NEWLINK message | |
891 | * to the routing socket. | |
892 | */ | |
893 | void netdev_state_change(struct net_device *dev) | |
894 | { | |
895 | if (dev->flags & IFF_UP) { | |
f07d5b94 | 896 | raw_notifier_call_chain(&netdev_chain, |
e041c683 | 897 | NETDEV_CHANGE, dev); |
1da177e4 LT |
898 | rtmsg_ifinfo(RTM_NEWLINK, dev, 0); |
899 | } | |
900 | } | |
901 | ||
902 | /** | |
903 | * dev_load - load a network module | |
904 | * @name: name of interface | |
905 | * | |
906 | * If a network interface is not present and the process has suitable | |
907 | * privileges this function loads the module. If module loading is not | |
908 | * available in this kernel then it becomes a nop. | |
909 | */ | |
910 | ||
911 | void dev_load(const char *name) | |
912 | { | |
4ec93edb | 913 | struct net_device *dev; |
1da177e4 LT |
914 | |
915 | read_lock(&dev_base_lock); | |
916 | dev = __dev_get_by_name(name); | |
917 | read_unlock(&dev_base_lock); | |
918 | ||
919 | if (!dev && capable(CAP_SYS_MODULE)) | |
920 | request_module("%s", name); | |
921 | } | |
922 | ||
923 | static int default_rebuild_header(struct sk_buff *skb) | |
924 | { | |
925 | printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n", | |
926 | skb->dev ? skb->dev->name : "NULL!!!"); | |
927 | kfree_skb(skb); | |
928 | return 1; | |
929 | } | |
930 | ||
1da177e4 LT |
931 | /** |
932 | * dev_open - prepare an interface for use. | |
933 | * @dev: device to open | |
934 | * | |
935 | * Takes a device from down to up state. The device's private open | |
936 | * function is invoked and then the multicast lists are loaded. Finally | |
937 | * the device is moved into the up state and a %NETDEV_UP message is | |
938 | * sent to the netdev notifier chain. | |
939 | * | |
940 | * Calling this function on an active interface is a nop. On a failure | |
941 | * a negative errno code is returned. | |
942 | */ | |
943 | int dev_open(struct net_device *dev) | |
944 | { | |
945 | int ret = 0; | |
946 | ||
947 | /* | |
948 | * Is it already up? | |
949 | */ | |
950 | ||
951 | if (dev->flags & IFF_UP) | |
952 | return 0; | |
953 | ||
954 | /* | |
955 | * Is it even present? | |
956 | */ | |
957 | if (!netif_device_present(dev)) | |
958 | return -ENODEV; | |
959 | ||
960 | /* | |
961 | * Call device private open method | |
962 | */ | |
963 | set_bit(__LINK_STATE_START, &dev->state); | |
964 | if (dev->open) { | |
965 | ret = dev->open(dev); | |
966 | if (ret) | |
967 | clear_bit(__LINK_STATE_START, &dev->state); | |
968 | } | |
969 | ||
4ec93edb | 970 | /* |
1da177e4 LT |
971 | * If it went open OK then: |
972 | */ | |
973 | ||
974 | if (!ret) { | |
975 | /* | |
976 | * Set the flags. | |
977 | */ | |
978 | dev->flags |= IFF_UP; | |
979 | ||
980 | /* | |
981 | * Initialize multicasting status | |
982 | */ | |
4417da66 | 983 | dev_set_rx_mode(dev); |
1da177e4 LT |
984 | |
985 | /* | |
986 | * Wakeup transmit queue engine | |
987 | */ | |
988 | dev_activate(dev); | |
989 | ||
990 | /* | |
991 | * ... and announce new interface. | |
992 | */ | |
f07d5b94 | 993 | raw_notifier_call_chain(&netdev_chain, NETDEV_UP, dev); |
1da177e4 LT |
994 | } |
995 | return ret; | |
996 | } | |
997 | ||
998 | /** | |
999 | * dev_close - shutdown an interface. | |
1000 | * @dev: device to shutdown | |
1001 | * | |
1002 | * This function moves an active device into down state. A | |
1003 | * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device | |
1004 | * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier | |
1005 | * chain. | |
1006 | */ | |
1007 | int dev_close(struct net_device *dev) | |
1008 | { | |
1009 | if (!(dev->flags & IFF_UP)) | |
1010 | return 0; | |
1011 | ||
1012 | /* | |
1013 | * Tell people we are going down, so that they can | |
1014 | * prepare to death, when device is still operating. | |
1015 | */ | |
f07d5b94 | 1016 | raw_notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev); |
1da177e4 LT |
1017 | |
1018 | dev_deactivate(dev); | |
1019 | ||
1020 | clear_bit(__LINK_STATE_START, &dev->state); | |
1021 | ||
1022 | /* Synchronize to scheduled poll. We cannot touch poll list, | |
bea3348e SH |
1023 | * it can be even on different cpu. So just clear netif_running(). |
1024 | * | |
1025 | * dev->stop() will invoke napi_disable() on all of it's | |
1026 | * napi_struct instances on this device. | |
1027 | */ | |
1da177e4 | 1028 | smp_mb__after_clear_bit(); /* Commit netif_running(). */ |
1da177e4 LT |
1029 | |
1030 | /* | |
1031 | * Call the device specific close. This cannot fail. | |
1032 | * Only if device is UP | |
1033 | * | |
1034 | * We allow it to be called even after a DETACH hot-plug | |
1035 | * event. | |
1036 | */ | |
1037 | if (dev->stop) | |
1038 | dev->stop(dev); | |
1039 | ||
1040 | /* | |
1041 | * Device is now down. | |
1042 | */ | |
1043 | ||
1044 | dev->flags &= ~IFF_UP; | |
1045 | ||
1046 | /* | |
1047 | * Tell people we are down | |
1048 | */ | |
f07d5b94 | 1049 | raw_notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev); |
1da177e4 LT |
1050 | |
1051 | return 0; | |
1052 | } | |
1053 | ||
1054 | ||
1055 | /* | |
1056 | * Device change register/unregister. These are not inline or static | |
1057 | * as we export them to the world. | |
1058 | */ | |
1059 | ||
1060 | /** | |
1061 | * register_netdevice_notifier - register a network notifier block | |
1062 | * @nb: notifier | |
1063 | * | |
1064 | * Register a notifier to be called when network device events occur. | |
1065 | * The notifier passed is linked into the kernel structures and must | |
1066 | * not be reused until it has been unregistered. A negative errno code | |
1067 | * is returned on a failure. | |
1068 | * | |
1069 | * When registered all registration and up events are replayed | |
4ec93edb | 1070 | * to the new notifier to allow device to have a race free |
1da177e4 LT |
1071 | * view of the network device list. |
1072 | */ | |
1073 | ||
1074 | int register_netdevice_notifier(struct notifier_block *nb) | |
1075 | { | |
1076 | struct net_device *dev; | |
fcc5a03a | 1077 | struct net_device *last; |
1da177e4 LT |
1078 | int err; |
1079 | ||
1080 | rtnl_lock(); | |
f07d5b94 | 1081 | err = raw_notifier_chain_register(&netdev_chain, nb); |
fcc5a03a HX |
1082 | if (err) |
1083 | goto unlock; | |
1da177e4 | 1084 | |
fcc5a03a HX |
1085 | for_each_netdev(dev) { |
1086 | err = nb->notifier_call(nb, NETDEV_REGISTER, dev); | |
1087 | err = notifier_to_errno(err); | |
1088 | if (err) | |
1089 | goto rollback; | |
1090 | ||
1091 | if (!(dev->flags & IFF_UP)) | |
1092 | continue; | |
1093 | ||
1094 | nb->notifier_call(nb, NETDEV_UP, dev); | |
1da177e4 | 1095 | } |
fcc5a03a HX |
1096 | |
1097 | unlock: | |
1da177e4 LT |
1098 | rtnl_unlock(); |
1099 | return err; | |
fcc5a03a HX |
1100 | |
1101 | rollback: | |
1102 | last = dev; | |
1103 | for_each_netdev(dev) { | |
1104 | if (dev == last) | |
1105 | break; | |
1106 | ||
1107 | if (dev->flags & IFF_UP) { | |
1108 | nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); | |
1109 | nb->notifier_call(nb, NETDEV_DOWN, dev); | |
1110 | } | |
1111 | nb->notifier_call(nb, NETDEV_UNREGISTER, dev); | |
1112 | } | |
1113 | goto unlock; | |
1da177e4 LT |
1114 | } |
1115 | ||
1116 | /** | |
1117 | * unregister_netdevice_notifier - unregister a network notifier block | |
1118 | * @nb: notifier | |
1119 | * | |
1120 | * Unregister a notifier previously registered by | |
1121 | * register_netdevice_notifier(). The notifier is unlinked into the | |
1122 | * kernel structures and may then be reused. A negative errno code | |
1123 | * is returned on a failure. | |
1124 | */ | |
1125 | ||
1126 | int unregister_netdevice_notifier(struct notifier_block *nb) | |
1127 | { | |
9f514950 HX |
1128 | int err; |
1129 | ||
1130 | rtnl_lock(); | |
f07d5b94 | 1131 | err = raw_notifier_chain_unregister(&netdev_chain, nb); |
9f514950 HX |
1132 | rtnl_unlock(); |
1133 | return err; | |
1da177e4 LT |
1134 | } |
1135 | ||
1136 | /** | |
1137 | * call_netdevice_notifiers - call all network notifier blocks | |
1138 | * @val: value passed unmodified to notifier function | |
1139 | * @v: pointer passed unmodified to notifier function | |
1140 | * | |
1141 | * Call all network notifier blocks. Parameters and return value | |
f07d5b94 | 1142 | * are as for raw_notifier_call_chain(). |
1da177e4 LT |
1143 | */ |
1144 | ||
1145 | int call_netdevice_notifiers(unsigned long val, void *v) | |
1146 | { | |
f07d5b94 | 1147 | return raw_notifier_call_chain(&netdev_chain, val, v); |
1da177e4 LT |
1148 | } |
1149 | ||
1150 | /* When > 0 there are consumers of rx skb time stamps */ | |
1151 | static atomic_t netstamp_needed = ATOMIC_INIT(0); | |
1152 | ||
1153 | void net_enable_timestamp(void) | |
1154 | { | |
1155 | atomic_inc(&netstamp_needed); | |
1156 | } | |
1157 | ||
1158 | void net_disable_timestamp(void) | |
1159 | { | |
1160 | atomic_dec(&netstamp_needed); | |
1161 | } | |
1162 | ||
a61bbcf2 | 1163 | static inline void net_timestamp(struct sk_buff *skb) |
1da177e4 LT |
1164 | { |
1165 | if (atomic_read(&netstamp_needed)) | |
a61bbcf2 | 1166 | __net_timestamp(skb); |
b7aa0bf7 ED |
1167 | else |
1168 | skb->tstamp.tv64 = 0; | |
1da177e4 LT |
1169 | } |
1170 | ||
1171 | /* | |
1172 | * Support routine. Sends outgoing frames to any network | |
1173 | * taps currently in use. | |
1174 | */ | |
1175 | ||
f6a78bfc | 1176 | static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) |
1da177e4 LT |
1177 | { |
1178 | struct packet_type *ptype; | |
a61bbcf2 PM |
1179 | |
1180 | net_timestamp(skb); | |
1da177e4 LT |
1181 | |
1182 | rcu_read_lock(); | |
1183 | list_for_each_entry_rcu(ptype, &ptype_all, list) { | |
1184 | /* Never send packets back to the socket | |
1185 | * they originated from - MvS (miquels@drinkel.ow.org) | |
1186 | */ | |
1187 | if ((ptype->dev == dev || !ptype->dev) && | |
1188 | (ptype->af_packet_priv == NULL || | |
1189 | (struct sock *)ptype->af_packet_priv != skb->sk)) { | |
1190 | struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC); | |
1191 | if (!skb2) | |
1192 | break; | |
1193 | ||
1194 | /* skb->nh should be correctly | |
1195 | set by sender, so that the second statement is | |
1196 | just protection against buggy protocols. | |
1197 | */ | |
459a98ed | 1198 | skb_reset_mac_header(skb2); |
1da177e4 | 1199 | |
d56f90a7 | 1200 | if (skb_network_header(skb2) < skb2->data || |
27a884dc | 1201 | skb2->network_header > skb2->tail) { |
1da177e4 LT |
1202 | if (net_ratelimit()) |
1203 | printk(KERN_CRIT "protocol %04x is " | |
1204 | "buggy, dev %s\n", | |
1205 | skb2->protocol, dev->name); | |
c1d2bbe1 | 1206 | skb_reset_network_header(skb2); |
1da177e4 LT |
1207 | } |
1208 | ||
b0e380b1 | 1209 | skb2->transport_header = skb2->network_header; |
1da177e4 | 1210 | skb2->pkt_type = PACKET_OUTGOING; |
f2ccd8fa | 1211 | ptype->func(skb2, skb->dev, ptype, skb->dev); |
1da177e4 LT |
1212 | } |
1213 | } | |
1214 | rcu_read_unlock(); | |
1215 | } | |
1216 | ||
56079431 DV |
1217 | |
1218 | void __netif_schedule(struct net_device *dev) | |
1219 | { | |
1220 | if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) { | |
1221 | unsigned long flags; | |
1222 | struct softnet_data *sd; | |
1223 | ||
1224 | local_irq_save(flags); | |
1225 | sd = &__get_cpu_var(softnet_data); | |
1226 | dev->next_sched = sd->output_queue; | |
1227 | sd->output_queue = dev; | |
1228 | raise_softirq_irqoff(NET_TX_SOFTIRQ); | |
1229 | local_irq_restore(flags); | |
1230 | } | |
1231 | } | |
1232 | EXPORT_SYMBOL(__netif_schedule); | |
1233 | ||
bea3348e | 1234 | void dev_kfree_skb_irq(struct sk_buff *skb) |
56079431 | 1235 | { |
bea3348e SH |
1236 | if (atomic_dec_and_test(&skb->users)) { |
1237 | struct softnet_data *sd; | |
1238 | unsigned long flags; | |
56079431 | 1239 | |
bea3348e SH |
1240 | local_irq_save(flags); |
1241 | sd = &__get_cpu_var(softnet_data); | |
1242 | skb->next = sd->completion_queue; | |
1243 | sd->completion_queue = skb; | |
1244 | raise_softirq_irqoff(NET_TX_SOFTIRQ); | |
1245 | local_irq_restore(flags); | |
1246 | } | |
56079431 | 1247 | } |
bea3348e | 1248 | EXPORT_SYMBOL(dev_kfree_skb_irq); |
56079431 DV |
1249 | |
1250 | void dev_kfree_skb_any(struct sk_buff *skb) | |
1251 | { | |
1252 | if (in_irq() || irqs_disabled()) | |
1253 | dev_kfree_skb_irq(skb); | |
1254 | else | |
1255 | dev_kfree_skb(skb); | |
1256 | } | |
1257 | EXPORT_SYMBOL(dev_kfree_skb_any); | |
1258 | ||
1259 | ||
bea3348e SH |
1260 | /** |
1261 | * netif_device_detach - mark device as removed | |
1262 | * @dev: network device | |
1263 | * | |
1264 | * Mark device as removed from system and therefore no longer available. | |
1265 | */ | |
56079431 DV |
1266 | void netif_device_detach(struct net_device *dev) |
1267 | { | |
1268 | if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && | |
1269 | netif_running(dev)) { | |
1270 | netif_stop_queue(dev); | |
1271 | } | |
1272 | } | |
1273 | EXPORT_SYMBOL(netif_device_detach); | |
1274 | ||
bea3348e SH |
1275 | /** |
1276 | * netif_device_attach - mark device as attached | |
1277 | * @dev: network device | |
1278 | * | |
1279 | * Mark device as attached from system and restart if needed. | |
1280 | */ | |
56079431 DV |
1281 | void netif_device_attach(struct net_device *dev) |
1282 | { | |
1283 | if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && | |
1284 | netif_running(dev)) { | |
1285 | netif_wake_queue(dev); | |
4ec93edb | 1286 | __netdev_watchdog_up(dev); |
56079431 DV |
1287 | } |
1288 | } | |
1289 | EXPORT_SYMBOL(netif_device_attach); | |
1290 | ||
1291 | ||
1da177e4 LT |
1292 | /* |
1293 | * Invalidate hardware checksum when packet is to be mangled, and | |
1294 | * complete checksum manually on outgoing path. | |
1295 | */ | |
84fa7933 | 1296 | int skb_checksum_help(struct sk_buff *skb) |
1da177e4 | 1297 | { |
d3bc23e7 | 1298 | __wsum csum; |
663ead3b | 1299 | int ret = 0, offset; |
1da177e4 | 1300 | |
84fa7933 | 1301 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
a430a43d HX |
1302 | goto out_set_summed; |
1303 | ||
1304 | if (unlikely(skb_shinfo(skb)->gso_size)) { | |
a430a43d HX |
1305 | /* Let GSO fix up the checksum. */ |
1306 | goto out_set_summed; | |
1da177e4 LT |
1307 | } |
1308 | ||
1309 | if (skb_cloned(skb)) { | |
1310 | ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | |
1311 | if (ret) | |
1312 | goto out; | |
1313 | } | |
1314 | ||
663ead3b | 1315 | offset = skb->csum_start - skb_headroom(skb); |
09a62660 | 1316 | BUG_ON(offset > (int)skb->len); |
1da177e4 LT |
1317 | csum = skb_checksum(skb, offset, skb->len-offset, 0); |
1318 | ||
663ead3b | 1319 | offset = skb_headlen(skb) - offset; |
09a62660 | 1320 | BUG_ON(offset <= 0); |
ff1dcadb | 1321 | BUG_ON(skb->csum_offset + 2 > offset); |
1da177e4 | 1322 | |
663ead3b HX |
1323 | *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = |
1324 | csum_fold(csum); | |
a430a43d | 1325 | out_set_summed: |
1da177e4 | 1326 | skb->ip_summed = CHECKSUM_NONE; |
4ec93edb | 1327 | out: |
1da177e4 LT |
1328 | return ret; |
1329 | } | |
1330 | ||
f6a78bfc HX |
1331 | /** |
1332 | * skb_gso_segment - Perform segmentation on skb. | |
1333 | * @skb: buffer to segment | |
576a30eb | 1334 | * @features: features for the output path (see dev->features) |
f6a78bfc HX |
1335 | * |
1336 | * This function segments the given skb and returns a list of segments. | |
576a30eb HX |
1337 | * |
1338 | * It may return NULL if the skb requires no segmentation. This is | |
1339 | * only possible when GSO is used for verifying header integrity. | |
f6a78bfc | 1340 | */ |
576a30eb | 1341 | struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) |
f6a78bfc HX |
1342 | { |
1343 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); | |
1344 | struct packet_type *ptype; | |
252e3346 | 1345 | __be16 type = skb->protocol; |
a430a43d | 1346 | int err; |
f6a78bfc HX |
1347 | |
1348 | BUG_ON(skb_shinfo(skb)->frag_list); | |
f6a78bfc | 1349 | |
459a98ed | 1350 | skb_reset_mac_header(skb); |
b0e380b1 | 1351 | skb->mac_len = skb->network_header - skb->mac_header; |
f6a78bfc HX |
1352 | __skb_pull(skb, skb->mac_len); |
1353 | ||
f9d106a6 | 1354 | if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) { |
a430a43d HX |
1355 | if (skb_header_cloned(skb) && |
1356 | (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) | |
1357 | return ERR_PTR(err); | |
1358 | } | |
1359 | ||
f6a78bfc HX |
1360 | rcu_read_lock(); |
1361 | list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { | |
1362 | if (ptype->type == type && !ptype->dev && ptype->gso_segment) { | |
84fa7933 | 1363 | if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { |
a430a43d HX |
1364 | err = ptype->gso_send_check(skb); |
1365 | segs = ERR_PTR(err); | |
1366 | if (err || skb_gso_ok(skb, features)) | |
1367 | break; | |
d56f90a7 ACM |
1368 | __skb_push(skb, (skb->data - |
1369 | skb_network_header(skb))); | |
a430a43d | 1370 | } |
576a30eb | 1371 | segs = ptype->gso_segment(skb, features); |
f6a78bfc HX |
1372 | break; |
1373 | } | |
1374 | } | |
1375 | rcu_read_unlock(); | |
1376 | ||
98e399f8 | 1377 | __skb_push(skb, skb->data - skb_mac_header(skb)); |
576a30eb | 1378 | |
f6a78bfc HX |
1379 | return segs; |
1380 | } | |
1381 | ||
1382 | EXPORT_SYMBOL(skb_gso_segment); | |
1383 | ||
fb286bb2 HX |
1384 | /* Take action when hardware reception checksum errors are detected. */ |
1385 | #ifdef CONFIG_BUG | |
1386 | void netdev_rx_csum_fault(struct net_device *dev) | |
1387 | { | |
1388 | if (net_ratelimit()) { | |
4ec93edb | 1389 | printk(KERN_ERR "%s: hw csum failure.\n", |
246a4212 | 1390 | dev ? dev->name : "<unknown>"); |
fb286bb2 HX |
1391 | dump_stack(); |
1392 | } | |
1393 | } | |
1394 | EXPORT_SYMBOL(netdev_rx_csum_fault); | |
1395 | #endif | |
1396 | ||
1da177e4 LT |
1397 | /* Actually, we should eliminate this check as soon as we know, that: |
1398 | * 1. IOMMU is present and allows to map all the memory. | |
1399 | * 2. No high memory really exists on this machine. | |
1400 | */ | |
1401 | ||
1402 | static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) | |
1403 | { | |
3d3a8533 | 1404 | #ifdef CONFIG_HIGHMEM |
1da177e4 LT |
1405 | int i; |
1406 | ||
1407 | if (dev->features & NETIF_F_HIGHDMA) | |
1408 | return 0; | |
1409 | ||
1410 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | |
1411 | if (PageHighMem(skb_shinfo(skb)->frags[i].page)) | |
1412 | return 1; | |
1413 | ||
3d3a8533 | 1414 | #endif |
1da177e4 LT |
1415 | return 0; |
1416 | } | |
1da177e4 | 1417 | |
f6a78bfc HX |
1418 | struct dev_gso_cb { |
1419 | void (*destructor)(struct sk_buff *skb); | |
1420 | }; | |
1421 | ||
1422 | #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) | |
1423 | ||
1424 | static void dev_gso_skb_destructor(struct sk_buff *skb) | |
1425 | { | |
1426 | struct dev_gso_cb *cb; | |
1427 | ||
1428 | do { | |
1429 | struct sk_buff *nskb = skb->next; | |
1430 | ||
1431 | skb->next = nskb->next; | |
1432 | nskb->next = NULL; | |
1433 | kfree_skb(nskb); | |
1434 | } while (skb->next); | |
1435 | ||
1436 | cb = DEV_GSO_CB(skb); | |
1437 | if (cb->destructor) | |
1438 | cb->destructor(skb); | |
1439 | } | |
1440 | ||
1441 | /** | |
1442 | * dev_gso_segment - Perform emulated hardware segmentation on skb. | |
1443 | * @skb: buffer to segment | |
1444 | * | |
1445 | * This function segments the given skb and stores the list of segments | |
1446 | * in skb->next. | |
1447 | */ | |
1448 | static int dev_gso_segment(struct sk_buff *skb) | |
1449 | { | |
1450 | struct net_device *dev = skb->dev; | |
1451 | struct sk_buff *segs; | |
576a30eb HX |
1452 | int features = dev->features & ~(illegal_highdma(dev, skb) ? |
1453 | NETIF_F_SG : 0); | |
1454 | ||
1455 | segs = skb_gso_segment(skb, features); | |
1456 | ||
1457 | /* Verifying header integrity only. */ | |
1458 | if (!segs) | |
1459 | return 0; | |
f6a78bfc | 1460 | |
f6a78bfc HX |
1461 | if (unlikely(IS_ERR(segs))) |
1462 | return PTR_ERR(segs); | |
1463 | ||
1464 | skb->next = segs; | |
1465 | DEV_GSO_CB(skb)->destructor = skb->destructor; | |
1466 | skb->destructor = dev_gso_skb_destructor; | |
1467 | ||
1468 | return 0; | |
1469 | } | |
1470 | ||
1471 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
1472 | { | |
1473 | if (likely(!skb->next)) { | |
9be9a6b9 | 1474 | if (!list_empty(&ptype_all)) |
f6a78bfc HX |
1475 | dev_queue_xmit_nit(skb, dev); |
1476 | ||
576a30eb HX |
1477 | if (netif_needs_gso(dev, skb)) { |
1478 | if (unlikely(dev_gso_segment(skb))) | |
1479 | goto out_kfree_skb; | |
1480 | if (skb->next) | |
1481 | goto gso; | |
1482 | } | |
f6a78bfc | 1483 | |
576a30eb | 1484 | return dev->hard_start_xmit(skb, dev); |
f6a78bfc HX |
1485 | } |
1486 | ||
576a30eb | 1487 | gso: |
f6a78bfc HX |
1488 | do { |
1489 | struct sk_buff *nskb = skb->next; | |
1490 | int rc; | |
1491 | ||
1492 | skb->next = nskb->next; | |
1493 | nskb->next = NULL; | |
1494 | rc = dev->hard_start_xmit(nskb, dev); | |
1495 | if (unlikely(rc)) { | |
f54d9e8d | 1496 | nskb->next = skb->next; |
f6a78bfc HX |
1497 | skb->next = nskb; |
1498 | return rc; | |
1499 | } | |
f25f4e44 PWJ |
1500 | if (unlikely((netif_queue_stopped(dev) || |
1501 | netif_subqueue_stopped(dev, skb->queue_mapping)) && | |
1502 | skb->next)) | |
f54d9e8d | 1503 | return NETDEV_TX_BUSY; |
f6a78bfc | 1504 | } while (skb->next); |
4ec93edb | 1505 | |
f6a78bfc HX |
1506 | skb->destructor = DEV_GSO_CB(skb)->destructor; |
1507 | ||
1508 | out_kfree_skb: | |
1509 | kfree_skb(skb); | |
1510 | return 0; | |
1511 | } | |
1512 | ||
1da177e4 LT |
1513 | #define HARD_TX_LOCK(dev, cpu) { \ |
1514 | if ((dev->features & NETIF_F_LLTX) == 0) { \ | |
932ff279 | 1515 | netif_tx_lock(dev); \ |
1da177e4 LT |
1516 | } \ |
1517 | } | |
1518 | ||
1519 | #define HARD_TX_UNLOCK(dev) { \ | |
1520 | if ((dev->features & NETIF_F_LLTX) == 0) { \ | |
932ff279 | 1521 | netif_tx_unlock(dev); \ |
1da177e4 LT |
1522 | } \ |
1523 | } | |
1524 | ||
1525 | /** | |
1526 | * dev_queue_xmit - transmit a buffer | |
1527 | * @skb: buffer to transmit | |
1528 | * | |
1529 | * Queue a buffer for transmission to a network device. The caller must | |
1530 | * have set the device and priority and built the buffer before calling | |
1531 | * this function. The function can be called from an interrupt. | |
1532 | * | |
1533 | * A negative errno code is returned on a failure. A success does not | |
1534 | * guarantee the frame will be transmitted as it may be dropped due | |
1535 | * to congestion or traffic shaping. | |
af191367 BG |
1536 | * |
1537 | * ----------------------------------------------------------------------------------- | |
1538 | * I notice this method can also return errors from the queue disciplines, | |
1539 | * including NET_XMIT_DROP, which is a positive value. So, errors can also | |
1540 | * be positive. | |
1541 | * | |
1542 | * Regardless of the return value, the skb is consumed, so it is currently | |
1543 | * difficult to retry a send to this method. (You can bump the ref count | |
1544 | * before sending to hold a reference for retry if you are careful.) | |
1545 | * | |
1546 | * When calling this method, interrupts MUST be enabled. This is because | |
1547 | * the BH enable code must have IRQs enabled so that it will not deadlock. | |
1548 | * --BLG | |
1da177e4 LT |
1549 | */ |
1550 | ||
1551 | int dev_queue_xmit(struct sk_buff *skb) | |
1552 | { | |
1553 | struct net_device *dev = skb->dev; | |
1554 | struct Qdisc *q; | |
1555 | int rc = -ENOMEM; | |
1556 | ||
f6a78bfc HX |
1557 | /* GSO will handle the following emulations directly. */ |
1558 | if (netif_needs_gso(dev, skb)) | |
1559 | goto gso; | |
1560 | ||
1da177e4 LT |
1561 | if (skb_shinfo(skb)->frag_list && |
1562 | !(dev->features & NETIF_F_FRAGLIST) && | |
364c6bad | 1563 | __skb_linearize(skb)) |
1da177e4 LT |
1564 | goto out_kfree_skb; |
1565 | ||
1566 | /* Fragmented skb is linearized if device does not support SG, | |
1567 | * or if at least one of fragments is in highmem and device | |
1568 | * does not support DMA from it. | |
1569 | */ | |
1570 | if (skb_shinfo(skb)->nr_frags && | |
1571 | (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) && | |
364c6bad | 1572 | __skb_linearize(skb)) |
1da177e4 LT |
1573 | goto out_kfree_skb; |
1574 | ||
1575 | /* If packet is not checksummed and device does not support | |
1576 | * checksumming for this protocol, complete checksumming here. | |
1577 | */ | |
663ead3b HX |
1578 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1579 | skb_set_transport_header(skb, skb->csum_start - | |
1580 | skb_headroom(skb)); | |
1581 | ||
a298830c HX |
1582 | if (!(dev->features & NETIF_F_GEN_CSUM) && |
1583 | !((dev->features & NETIF_F_IP_CSUM) && | |
1584 | skb->protocol == htons(ETH_P_IP)) && | |
1585 | !((dev->features & NETIF_F_IPV6_CSUM) && | |
1586 | skb->protocol == htons(ETH_P_IPV6))) | |
663ead3b HX |
1587 | if (skb_checksum_help(skb)) |
1588 | goto out_kfree_skb; | |
1589 | } | |
1da177e4 | 1590 | |
f6a78bfc | 1591 | gso: |
2d7ceece ED |
1592 | spin_lock_prefetch(&dev->queue_lock); |
1593 | ||
4ec93edb YH |
1594 | /* Disable soft irqs for various locks below. Also |
1595 | * stops preemption for RCU. | |
1da177e4 | 1596 | */ |
4ec93edb | 1597 | rcu_read_lock_bh(); |
1da177e4 | 1598 | |
4ec93edb YH |
1599 | /* Updates of qdisc are serialized by queue_lock. |
1600 | * The struct Qdisc which is pointed to by qdisc is now a | |
1601 | * rcu structure - it may be accessed without acquiring | |
1da177e4 | 1602 | * a lock (but the structure may be stale.) The freeing of the |
4ec93edb | 1603 | * qdisc will be deferred until it's known that there are no |
1da177e4 | 1604 | * more references to it. |
4ec93edb YH |
1605 | * |
1606 | * If the qdisc has an enqueue function, we still need to | |
1da177e4 LT |
1607 | * hold the queue_lock before calling it, since queue_lock |
1608 | * also serializes access to the device queue. | |
1609 | */ | |
1610 | ||
1611 | q = rcu_dereference(dev->qdisc); | |
1612 | #ifdef CONFIG_NET_CLS_ACT | |
1613 | skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); | |
1614 | #endif | |
1615 | if (q->enqueue) { | |
1616 | /* Grab device queue */ | |
1617 | spin_lock(&dev->queue_lock); | |
85670cc1 PM |
1618 | q = dev->qdisc; |
1619 | if (q->enqueue) { | |
f25f4e44 PWJ |
1620 | /* reset queue_mapping to zero */ |
1621 | skb->queue_mapping = 0; | |
85670cc1 PM |
1622 | rc = q->enqueue(skb, q); |
1623 | qdisc_run(dev); | |
1624 | spin_unlock(&dev->queue_lock); | |
1da177e4 | 1625 | |
85670cc1 PM |
1626 | rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc; |
1627 | goto out; | |
1628 | } | |
1da177e4 | 1629 | spin_unlock(&dev->queue_lock); |
1da177e4 LT |
1630 | } |
1631 | ||
1632 | /* The device has no queue. Common case for software devices: | |
1633 | loopback, all the sorts of tunnels... | |
1634 | ||
932ff279 HX |
1635 | Really, it is unlikely that netif_tx_lock protection is necessary |
1636 | here. (f.e. loopback and IP tunnels are clean ignoring statistics | |
1da177e4 LT |
1637 | counters.) |
1638 | However, it is possible, that they rely on protection | |
1639 | made by us here. | |
1640 | ||
1641 | Check this and shot the lock. It is not prone from deadlocks. | |
1642 | Either shot noqueue qdisc, it is even simpler 8) | |
1643 | */ | |
1644 | if (dev->flags & IFF_UP) { | |
1645 | int cpu = smp_processor_id(); /* ok because BHs are off */ | |
1646 | ||
1647 | if (dev->xmit_lock_owner != cpu) { | |
1648 | ||
1649 | HARD_TX_LOCK(dev, cpu); | |
1650 | ||
f25f4e44 PWJ |
1651 | if (!netif_queue_stopped(dev) && |
1652 | !netif_subqueue_stopped(dev, skb->queue_mapping)) { | |
1da177e4 | 1653 | rc = 0; |
f6a78bfc | 1654 | if (!dev_hard_start_xmit(skb, dev)) { |
1da177e4 LT |
1655 | HARD_TX_UNLOCK(dev); |
1656 | goto out; | |
1657 | } | |
1658 | } | |
1659 | HARD_TX_UNLOCK(dev); | |
1660 | if (net_ratelimit()) | |
1661 | printk(KERN_CRIT "Virtual device %s asks to " | |
1662 | "queue packet!\n", dev->name); | |
1663 | } else { | |
1664 | /* Recursion is detected! It is possible, | |
1665 | * unfortunately */ | |
1666 | if (net_ratelimit()) | |
1667 | printk(KERN_CRIT "Dead loop on virtual device " | |
1668 | "%s, fix it urgently!\n", dev->name); | |
1669 | } | |
1670 | } | |
1671 | ||
1672 | rc = -ENETDOWN; | |
d4828d85 | 1673 | rcu_read_unlock_bh(); |
1da177e4 LT |
1674 | |
1675 | out_kfree_skb: | |
1676 | kfree_skb(skb); | |
1677 | return rc; | |
1678 | out: | |
d4828d85 | 1679 | rcu_read_unlock_bh(); |
1da177e4 LT |
1680 | return rc; |
1681 | } | |
1682 | ||
1683 | ||
1684 | /*======================================================================= | |
1685 | Receiver routines | |
1686 | =======================================================================*/ | |
1687 | ||
6b2bedc3 SH |
1688 | int netdev_max_backlog __read_mostly = 1000; |
1689 | int netdev_budget __read_mostly = 300; | |
1690 | int weight_p __read_mostly = 64; /* old backlog weight */ | |
1da177e4 LT |
1691 | |
1692 | DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; | |
1693 | ||
1694 | ||
1da177e4 LT |
1695 | /** |
1696 | * netif_rx - post buffer to the network code | |
1697 | * @skb: buffer to post | |
1698 | * | |
1699 | * This function receives a packet from a device driver and queues it for | |
1700 | * the upper (protocol) levels to process. It always succeeds. The buffer | |
1701 | * may be dropped during processing for congestion control or by the | |
1702 | * protocol layers. | |
1703 | * | |
1704 | * return values: | |
1705 | * NET_RX_SUCCESS (no congestion) | |
1706 | * NET_RX_CN_LOW (low congestion) | |
1707 | * NET_RX_CN_MOD (moderate congestion) | |
1708 | * NET_RX_CN_HIGH (high congestion) | |
1709 | * NET_RX_DROP (packet was dropped) | |
1710 | * | |
1711 | */ | |
1712 | ||
1713 | int netif_rx(struct sk_buff *skb) | |
1714 | { | |
1da177e4 LT |
1715 | struct softnet_data *queue; |
1716 | unsigned long flags; | |
1717 | ||
1718 | /* if netpoll wants it, pretend we never saw it */ | |
1719 | if (netpoll_rx(skb)) | |
1720 | return NET_RX_DROP; | |
1721 | ||
b7aa0bf7 | 1722 | if (!skb->tstamp.tv64) |
a61bbcf2 | 1723 | net_timestamp(skb); |
1da177e4 LT |
1724 | |
1725 | /* | |
1726 | * The code is rearranged so that the path is the most | |
1727 | * short when CPU is congested, but is still operating. | |
1728 | */ | |
1729 | local_irq_save(flags); | |
1da177e4 LT |
1730 | queue = &__get_cpu_var(softnet_data); |
1731 | ||
1732 | __get_cpu_var(netdev_rx_stat).total++; | |
1733 | if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { | |
1734 | if (queue->input_pkt_queue.qlen) { | |
1da177e4 LT |
1735 | enqueue: |
1736 | dev_hold(skb->dev); | |
1737 | __skb_queue_tail(&queue->input_pkt_queue, skb); | |
1da177e4 | 1738 | local_irq_restore(flags); |
34008d8c | 1739 | return NET_RX_SUCCESS; |
1da177e4 LT |
1740 | } |
1741 | ||
bea3348e | 1742 | napi_schedule(&queue->backlog); |
1da177e4 LT |
1743 | goto enqueue; |
1744 | } | |
1745 | ||
1da177e4 LT |
1746 | __get_cpu_var(netdev_rx_stat).dropped++; |
1747 | local_irq_restore(flags); | |
1748 | ||
1749 | kfree_skb(skb); | |
1750 | return NET_RX_DROP; | |
1751 | } | |
1752 | ||
1753 | int netif_rx_ni(struct sk_buff *skb) | |
1754 | { | |
1755 | int err; | |
1756 | ||
1757 | preempt_disable(); | |
1758 | err = netif_rx(skb); | |
1759 | if (local_softirq_pending()) | |
1760 | do_softirq(); | |
1761 | preempt_enable(); | |
1762 | ||
1763 | return err; | |
1764 | } | |
1765 | ||
1766 | EXPORT_SYMBOL(netif_rx_ni); | |
1767 | ||
f2ccd8fa | 1768 | static inline struct net_device *skb_bond(struct sk_buff *skb) |
1da177e4 LT |
1769 | { |
1770 | struct net_device *dev = skb->dev; | |
1771 | ||
8f903c70 | 1772 | if (dev->master) { |
7ea49ed7 | 1773 | if (skb_bond_should_drop(skb)) { |
8f903c70 JV |
1774 | kfree_skb(skb); |
1775 | return NULL; | |
1776 | } | |
1da177e4 | 1777 | skb->dev = dev->master; |
8f903c70 | 1778 | } |
f2ccd8fa DM |
1779 | |
1780 | return dev; | |
1da177e4 LT |
1781 | } |
1782 | ||
bea3348e | 1783 | |
1da177e4 LT |
1784 | static void net_tx_action(struct softirq_action *h) |
1785 | { | |
1786 | struct softnet_data *sd = &__get_cpu_var(softnet_data); | |
1787 | ||
1788 | if (sd->completion_queue) { | |
1789 | struct sk_buff *clist; | |
1790 | ||
1791 | local_irq_disable(); | |
1792 | clist = sd->completion_queue; | |
1793 | sd->completion_queue = NULL; | |
1794 | local_irq_enable(); | |
1795 | ||
1796 | while (clist) { | |
1797 | struct sk_buff *skb = clist; | |
1798 | clist = clist->next; | |
1799 | ||
1800 | BUG_TRAP(!atomic_read(&skb->users)); | |
1801 | __kfree_skb(skb); | |
1802 | } | |
1803 | } | |
1804 | ||
1805 | if (sd->output_queue) { | |
1806 | struct net_device *head; | |
1807 | ||
1808 | local_irq_disable(); | |
1809 | head = sd->output_queue; | |
1810 | sd->output_queue = NULL; | |
1811 | local_irq_enable(); | |
1812 | ||
1813 | while (head) { | |
1814 | struct net_device *dev = head; | |
1815 | head = head->next_sched; | |
1816 | ||
1817 | smp_mb__before_clear_bit(); | |
1818 | clear_bit(__LINK_STATE_SCHED, &dev->state); | |
1819 | ||
1820 | if (spin_trylock(&dev->queue_lock)) { | |
1821 | qdisc_run(dev); | |
1822 | spin_unlock(&dev->queue_lock); | |
1823 | } else { | |
1824 | netif_schedule(dev); | |
1825 | } | |
1826 | } | |
1827 | } | |
1828 | } | |
1829 | ||
6f05f629 SH |
1830 | static inline int deliver_skb(struct sk_buff *skb, |
1831 | struct packet_type *pt_prev, | |
1832 | struct net_device *orig_dev) | |
1da177e4 LT |
1833 | { |
1834 | atomic_inc(&skb->users); | |
f2ccd8fa | 1835 | return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); |
1da177e4 LT |
1836 | } |
1837 | ||
1838 | #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE) | |
6229e362 | 1839 | /* These hooks defined here for ATM */ |
1da177e4 LT |
1840 | struct net_bridge; |
1841 | struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br, | |
1842 | unsigned char *addr); | |
6229e362 | 1843 | void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly; |
1da177e4 | 1844 | |
6229e362 SH |
1845 | /* |
1846 | * If bridge module is loaded call bridging hook. | |
1847 | * returns NULL if packet was consumed. | |
1848 | */ | |
1849 | struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, | |
1850 | struct sk_buff *skb) __read_mostly; | |
1851 | static inline struct sk_buff *handle_bridge(struct sk_buff *skb, | |
1852 | struct packet_type **pt_prev, int *ret, | |
1853 | struct net_device *orig_dev) | |
1da177e4 LT |
1854 | { |
1855 | struct net_bridge_port *port; | |
1856 | ||
6229e362 SH |
1857 | if (skb->pkt_type == PACKET_LOOPBACK || |
1858 | (port = rcu_dereference(skb->dev->br_port)) == NULL) | |
1859 | return skb; | |
1da177e4 LT |
1860 | |
1861 | if (*pt_prev) { | |
6229e362 | 1862 | *ret = deliver_skb(skb, *pt_prev, orig_dev); |
1da177e4 | 1863 | *pt_prev = NULL; |
4ec93edb YH |
1864 | } |
1865 | ||
6229e362 | 1866 | return br_handle_frame_hook(port, skb); |
1da177e4 LT |
1867 | } |
1868 | #else | |
6229e362 | 1869 | #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb) |
1da177e4 LT |
1870 | #endif |
1871 | ||
b863ceb7 PM |
1872 | #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE) |
1873 | struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly; | |
1874 | EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook); | |
1875 | ||
1876 | static inline struct sk_buff *handle_macvlan(struct sk_buff *skb, | |
1877 | struct packet_type **pt_prev, | |
1878 | int *ret, | |
1879 | struct net_device *orig_dev) | |
1880 | { | |
1881 | if (skb->dev->macvlan_port == NULL) | |
1882 | return skb; | |
1883 | ||
1884 | if (*pt_prev) { | |
1885 | *ret = deliver_skb(skb, *pt_prev, orig_dev); | |
1886 | *pt_prev = NULL; | |
1887 | } | |
1888 | return macvlan_handle_frame_hook(skb); | |
1889 | } | |
1890 | #else | |
1891 | #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb) | |
1892 | #endif | |
1893 | ||
1da177e4 LT |
1894 | #ifdef CONFIG_NET_CLS_ACT |
1895 | /* TODO: Maybe we should just force sch_ingress to be compiled in | |
1896 | * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions | |
1897 | * a compare and 2 stores extra right now if we dont have it on | |
1898 | * but have CONFIG_NET_CLS_ACT | |
4ec93edb | 1899 | * NOTE: This doesnt stop any functionality; if you dont have |
1da177e4 LT |
1900 | * the ingress scheduler, you just cant add policies on ingress. |
1901 | * | |
1902 | */ | |
4ec93edb | 1903 | static int ing_filter(struct sk_buff *skb) |
1da177e4 LT |
1904 | { |
1905 | struct Qdisc *q; | |
1906 | struct net_device *dev = skb->dev; | |
1907 | int result = TC_ACT_OK; | |
4ec93edb | 1908 | |
1da177e4 LT |
1909 | if (dev->qdisc_ingress) { |
1910 | __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd); | |
1911 | if (MAX_RED_LOOP < ttl++) { | |
c01003c2 PM |
1912 | printk(KERN_WARNING "Redir loop detected Dropping packet (%d->%d)\n", |
1913 | skb->iif, skb->dev->ifindex); | |
1da177e4 LT |
1914 | return TC_ACT_SHOT; |
1915 | } | |
1916 | ||
1917 | skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl); | |
1918 | ||
1919 | skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS); | |
86e65da9 | 1920 | |
fd44de7c | 1921 | spin_lock(&dev->ingress_lock); |
1da177e4 LT |
1922 | if ((q = dev->qdisc_ingress) != NULL) |
1923 | result = q->enqueue(skb, q); | |
fd44de7c | 1924 | spin_unlock(&dev->ingress_lock); |
1da177e4 LT |
1925 | |
1926 | } | |
1927 | ||
1928 | return result; | |
1929 | } | |
1930 | #endif | |
1931 | ||
1932 | int netif_receive_skb(struct sk_buff *skb) | |
1933 | { | |
1934 | struct packet_type *ptype, *pt_prev; | |
f2ccd8fa | 1935 | struct net_device *orig_dev; |
1da177e4 | 1936 | int ret = NET_RX_DROP; |
252e3346 | 1937 | __be16 type; |
1da177e4 LT |
1938 | |
1939 | /* if we've gotten here through NAPI, check netpoll */ | |
bea3348e | 1940 | if (netpoll_receive_skb(skb)) |
1da177e4 LT |
1941 | return NET_RX_DROP; |
1942 | ||
b7aa0bf7 | 1943 | if (!skb->tstamp.tv64) |
a61bbcf2 | 1944 | net_timestamp(skb); |
1da177e4 | 1945 | |
c01003c2 PM |
1946 | if (!skb->iif) |
1947 | skb->iif = skb->dev->ifindex; | |
86e65da9 | 1948 | |
f2ccd8fa | 1949 | orig_dev = skb_bond(skb); |
1da177e4 | 1950 | |
8f903c70 JV |
1951 | if (!orig_dev) |
1952 | return NET_RX_DROP; | |
1953 | ||
1da177e4 LT |
1954 | __get_cpu_var(netdev_rx_stat).total++; |
1955 | ||
c1d2bbe1 | 1956 | skb_reset_network_header(skb); |
badff6d0 | 1957 | skb_reset_transport_header(skb); |
b0e380b1 | 1958 | skb->mac_len = skb->network_header - skb->mac_header; |
1da177e4 LT |
1959 | |
1960 | pt_prev = NULL; | |
1961 | ||
1962 | rcu_read_lock(); | |
1963 | ||
1964 | #ifdef CONFIG_NET_CLS_ACT | |
1965 | if (skb->tc_verd & TC_NCLS) { | |
1966 | skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); | |
1967 | goto ncls; | |
1968 | } | |
1969 | #endif | |
1970 | ||
1971 | list_for_each_entry_rcu(ptype, &ptype_all, list) { | |
1972 | if (!ptype->dev || ptype->dev == skb->dev) { | |
4ec93edb | 1973 | if (pt_prev) |
f2ccd8fa | 1974 | ret = deliver_skb(skb, pt_prev, orig_dev); |
1da177e4 LT |
1975 | pt_prev = ptype; |
1976 | } | |
1977 | } | |
1978 | ||
1979 | #ifdef CONFIG_NET_CLS_ACT | |
1980 | if (pt_prev) { | |
f2ccd8fa | 1981 | ret = deliver_skb(skb, pt_prev, orig_dev); |
1da177e4 LT |
1982 | pt_prev = NULL; /* noone else should process this after*/ |
1983 | } else { | |
1984 | skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); | |
1985 | } | |
1986 | ||
1987 | ret = ing_filter(skb); | |
1988 | ||
1989 | if (ret == TC_ACT_SHOT || (ret == TC_ACT_STOLEN)) { | |
1990 | kfree_skb(skb); | |
1991 | goto out; | |
1992 | } | |
1993 | ||
1994 | skb->tc_verd = 0; | |
1995 | ncls: | |
1996 | #endif | |
1997 | ||
6229e362 | 1998 | skb = handle_bridge(skb, &pt_prev, &ret, orig_dev); |
b863ceb7 PM |
1999 | if (!skb) |
2000 | goto out; | |
2001 | skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev); | |
6229e362 | 2002 | if (!skb) |
1da177e4 LT |
2003 | goto out; |
2004 | ||
2005 | type = skb->protocol; | |
2006 | list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) { | |
2007 | if (ptype->type == type && | |
2008 | (!ptype->dev || ptype->dev == skb->dev)) { | |
4ec93edb | 2009 | if (pt_prev) |
f2ccd8fa | 2010 | ret = deliver_skb(skb, pt_prev, orig_dev); |
1da177e4 LT |
2011 | pt_prev = ptype; |
2012 | } | |
2013 | } | |
2014 | ||
2015 | if (pt_prev) { | |
f2ccd8fa | 2016 | ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); |
1da177e4 LT |
2017 | } else { |
2018 | kfree_skb(skb); | |
2019 | /* Jamal, now you will not able to escape explaining | |
2020 | * me how you were going to use this. :-) | |
2021 | */ | |
2022 | ret = NET_RX_DROP; | |
2023 | } | |
2024 | ||
2025 | out: | |
2026 | rcu_read_unlock(); | |
2027 | return ret; | |
2028 | } | |
2029 | ||
bea3348e | 2030 | static int process_backlog(struct napi_struct *napi, int quota) |
1da177e4 LT |
2031 | { |
2032 | int work = 0; | |
1da177e4 LT |
2033 | struct softnet_data *queue = &__get_cpu_var(softnet_data); |
2034 | unsigned long start_time = jiffies; | |
2035 | ||
bea3348e SH |
2036 | napi->weight = weight_p; |
2037 | do { | |
1da177e4 LT |
2038 | struct sk_buff *skb; |
2039 | struct net_device *dev; | |
2040 | ||
2041 | local_irq_disable(); | |
2042 | skb = __skb_dequeue(&queue->input_pkt_queue); | |
bea3348e SH |
2043 | if (!skb) { |
2044 | __napi_complete(napi); | |
2045 | local_irq_enable(); | |
2046 | break; | |
2047 | } | |
2048 | ||
1da177e4 LT |
2049 | local_irq_enable(); |
2050 | ||
2051 | dev = skb->dev; | |
2052 | ||
2053 | netif_receive_skb(skb); | |
2054 | ||
2055 | dev_put(dev); | |
bea3348e | 2056 | } while (++work < quota && jiffies == start_time); |
1da177e4 | 2057 | |
bea3348e SH |
2058 | return work; |
2059 | } | |
1da177e4 | 2060 | |
bea3348e SH |
2061 | /** |
2062 | * __napi_schedule - schedule for receive | |
2063 | * @napi: entry to schedule | |
2064 | * | |
2065 | * The entry's receive function will be scheduled to run | |
2066 | */ | |
2067 | void fastcall __napi_schedule(struct napi_struct *n) | |
2068 | { | |
2069 | unsigned long flags; | |
1da177e4 | 2070 | |
bea3348e SH |
2071 | local_irq_save(flags); |
2072 | list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list); | |
2073 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | |
2074 | local_irq_restore(flags); | |
1da177e4 | 2075 | } |
bea3348e SH |
2076 | EXPORT_SYMBOL(__napi_schedule); |
2077 | ||
1da177e4 LT |
2078 | |
2079 | static void net_rx_action(struct softirq_action *h) | |
2080 | { | |
bea3348e | 2081 | struct list_head *list = &__get_cpu_var(softnet_data).poll_list; |
1da177e4 | 2082 | unsigned long start_time = jiffies; |
51b0bded | 2083 | int budget = netdev_budget; |
53fb95d3 MM |
2084 | void *have; |
2085 | ||
1da177e4 LT |
2086 | local_irq_disable(); |
2087 | ||
bea3348e SH |
2088 | while (!list_empty(list)) { |
2089 | struct napi_struct *n; | |
2090 | int work, weight; | |
1da177e4 | 2091 | |
bea3348e SH |
2092 | /* If softirq window is exhuasted then punt. |
2093 | * | |
2094 | * Note that this is a slight policy change from the | |
2095 | * previous NAPI code, which would allow up to 2 | |
2096 | * jiffies to pass before breaking out. The test | |
2097 | * used to be "jiffies - start_time > 1". | |
2098 | */ | |
2099 | if (unlikely(budget <= 0 || jiffies != start_time)) | |
1da177e4 LT |
2100 | goto softnet_break; |
2101 | ||
2102 | local_irq_enable(); | |
2103 | ||
bea3348e SH |
2104 | /* Even though interrupts have been re-enabled, this |
2105 | * access is safe because interrupts can only add new | |
2106 | * entries to the tail of this list, and only ->poll() | |
2107 | * calls can remove this head entry from the list. | |
2108 | */ | |
2109 | n = list_entry(list->next, struct napi_struct, poll_list); | |
1da177e4 | 2110 | |
bea3348e SH |
2111 | have = netpoll_poll_lock(n); |
2112 | ||
2113 | weight = n->weight; | |
2114 | ||
2115 | work = n->poll(n, weight); | |
2116 | ||
2117 | WARN_ON_ONCE(work > weight); | |
2118 | ||
2119 | budget -= work; | |
2120 | ||
2121 | local_irq_disable(); | |
2122 | ||
2123 | /* Drivers must not modify the NAPI state if they | |
2124 | * consume the entire weight. In such cases this code | |
2125 | * still "owns" the NAPI instance and therefore can | |
2126 | * move the instance around on the list at-will. | |
2127 | */ | |
2128 | if (unlikely(work == weight)) | |
2129 | list_move_tail(&n->poll_list, list); | |
2130 | ||
2131 | netpoll_poll_unlock(have); | |
1da177e4 LT |
2132 | } |
2133 | out: | |
515e06c4 | 2134 | local_irq_enable(); |
bea3348e | 2135 | |
db217334 CL |
2136 | #ifdef CONFIG_NET_DMA |
2137 | /* | |
2138 | * There may not be any more sk_buffs coming right now, so push | |
2139 | * any pending DMA copies to hardware | |
2140 | */ | |
d379b01e DW |
2141 | if (!cpus_empty(net_dma.channel_mask)) { |
2142 | int chan_idx; | |
2143 | for_each_cpu_mask(chan_idx, net_dma.channel_mask) { | |
2144 | struct dma_chan *chan = net_dma.channels[chan_idx]; | |
2145 | if (chan) | |
2146 | dma_async_memcpy_issue_pending(chan); | |
2147 | } | |
db217334 CL |
2148 | } |
2149 | #endif | |
bea3348e | 2150 | |
1da177e4 LT |
2151 | return; |
2152 | ||
2153 | softnet_break: | |
2154 | __get_cpu_var(netdev_rx_stat).time_squeeze++; | |
2155 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | |
2156 | goto out; | |
2157 | } | |
2158 | ||
2159 | static gifconf_func_t * gifconf_list [NPROTO]; | |
2160 | ||
2161 | /** | |
2162 | * register_gifconf - register a SIOCGIF handler | |
2163 | * @family: Address family | |
2164 | * @gifconf: Function handler | |
2165 | * | |
2166 | * Register protocol dependent address dumping routines. The handler | |
2167 | * that is passed must not be freed or reused until it has been replaced | |
2168 | * by another handler. | |
2169 | */ | |
2170 | int register_gifconf(unsigned int family, gifconf_func_t * gifconf) | |
2171 | { | |
2172 | if (family >= NPROTO) | |
2173 | return -EINVAL; | |
2174 | gifconf_list[family] = gifconf; | |
2175 | return 0; | |
2176 | } | |
2177 | ||
2178 | ||
2179 | /* | |
2180 | * Map an interface index to its name (SIOCGIFNAME) | |
2181 | */ | |
2182 | ||
2183 | /* | |
2184 | * We need this ioctl for efficient implementation of the | |
2185 | * if_indextoname() function required by the IPv6 API. Without | |
2186 | * it, we would have to search all the interfaces to find a | |
2187 | * match. --pb | |
2188 | */ | |
2189 | ||
2190 | static int dev_ifname(struct ifreq __user *arg) | |
2191 | { | |
2192 | struct net_device *dev; | |
2193 | struct ifreq ifr; | |
2194 | ||
2195 | /* | |
2196 | * Fetch the caller's info block. | |
2197 | */ | |
2198 | ||
2199 | if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) | |
2200 | return -EFAULT; | |
2201 | ||
2202 | read_lock(&dev_base_lock); | |
2203 | dev = __dev_get_by_index(ifr.ifr_ifindex); | |
2204 | if (!dev) { | |
2205 | read_unlock(&dev_base_lock); | |
2206 | return -ENODEV; | |
2207 | } | |
2208 | ||
2209 | strcpy(ifr.ifr_name, dev->name); | |
2210 | read_unlock(&dev_base_lock); | |
2211 | ||
2212 | if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) | |
2213 | return -EFAULT; | |
2214 | return 0; | |
2215 | } | |
2216 | ||
2217 | /* | |
2218 | * Perform a SIOCGIFCONF call. This structure will change | |
2219 | * size eventually, and there is nothing I can do about it. | |
2220 | * Thus we will need a 'compatibility mode'. | |
2221 | */ | |
2222 | ||
2223 | static int dev_ifconf(char __user *arg) | |
2224 | { | |
2225 | struct ifconf ifc; | |
2226 | struct net_device *dev; | |
2227 | char __user *pos; | |
2228 | int len; | |
2229 | int total; | |
2230 | int i; | |
2231 | ||
2232 | /* | |
2233 | * Fetch the caller's info block. | |
2234 | */ | |
2235 | ||
2236 | if (copy_from_user(&ifc, arg, sizeof(struct ifconf))) | |
2237 | return -EFAULT; | |
2238 | ||
2239 | pos = ifc.ifc_buf; | |
2240 | len = ifc.ifc_len; | |
2241 | ||
2242 | /* | |
2243 | * Loop over the interfaces, and write an info block for each. | |
2244 | */ | |
2245 | ||
2246 | total = 0; | |
7562f876 | 2247 | for_each_netdev(dev) { |
1da177e4 LT |
2248 | for (i = 0; i < NPROTO; i++) { |
2249 | if (gifconf_list[i]) { | |
2250 | int done; | |
2251 | if (!pos) | |
2252 | done = gifconf_list[i](dev, NULL, 0); | |
2253 | else | |
2254 | done = gifconf_list[i](dev, pos + total, | |
2255 | len - total); | |
2256 | if (done < 0) | |
2257 | return -EFAULT; | |
2258 | total += done; | |
2259 | } | |
2260 | } | |
4ec93edb | 2261 | } |
1da177e4 LT |
2262 | |
2263 | /* | |
2264 | * All done. Write the updated control block back to the caller. | |
2265 | */ | |
2266 | ifc.ifc_len = total; | |
2267 | ||
2268 | /* | |
2269 | * Both BSD and Solaris return 0 here, so we do too. | |
2270 | */ | |
2271 | return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0; | |
2272 | } | |
2273 | ||
2274 | #ifdef CONFIG_PROC_FS | |
2275 | /* | |
2276 | * This is invoked by the /proc filesystem handler to display a device | |
2277 | * in detail. | |
2278 | */ | |
7562f876 | 2279 | void *dev_seq_start(struct seq_file *seq, loff_t *pos) |
1da177e4 | 2280 | { |
7562f876 | 2281 | loff_t off; |
1da177e4 | 2282 | struct net_device *dev; |
1da177e4 | 2283 | |
7562f876 PE |
2284 | read_lock(&dev_base_lock); |
2285 | if (!*pos) | |
2286 | return SEQ_START_TOKEN; | |
1da177e4 | 2287 | |
7562f876 PE |
2288 | off = 1; |
2289 | for_each_netdev(dev) | |
2290 | if (off++ == *pos) | |
2291 | return dev; | |
1da177e4 | 2292 | |
7562f876 | 2293 | return NULL; |
1da177e4 LT |
2294 | } |
2295 | ||
2296 | void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
2297 | { | |
2298 | ++*pos; | |
7562f876 PE |
2299 | return v == SEQ_START_TOKEN ? |
2300 | first_net_device() : next_net_device((struct net_device *)v); | |
1da177e4 LT |
2301 | } |
2302 | ||
2303 | void dev_seq_stop(struct seq_file *seq, void *v) | |
2304 | { | |
2305 | read_unlock(&dev_base_lock); | |
2306 | } | |
2307 | ||
2308 | static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) | |
2309 | { | |
c45d286e | 2310 | struct net_device_stats *stats = dev->get_stats(dev); |
1da177e4 | 2311 | |
5a1b5898 RR |
2312 | seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " |
2313 | "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", | |
2314 | dev->name, stats->rx_bytes, stats->rx_packets, | |
2315 | stats->rx_errors, | |
2316 | stats->rx_dropped + stats->rx_missed_errors, | |
2317 | stats->rx_fifo_errors, | |
2318 | stats->rx_length_errors + stats->rx_over_errors + | |
2319 | stats->rx_crc_errors + stats->rx_frame_errors, | |
2320 | stats->rx_compressed, stats->multicast, | |
2321 | stats->tx_bytes, stats->tx_packets, | |
2322 | stats->tx_errors, stats->tx_dropped, | |
2323 | stats->tx_fifo_errors, stats->collisions, | |
2324 | stats->tx_carrier_errors + | |
2325 | stats->tx_aborted_errors + | |
2326 | stats->tx_window_errors + | |
2327 | stats->tx_heartbeat_errors, | |
2328 | stats->tx_compressed); | |
1da177e4 LT |
2329 | } |
2330 | ||
2331 | /* | |
2332 | * Called from the PROCfs module. This now uses the new arbitrary sized | |
2333 | * /proc/net interface to create /proc/net/dev | |
2334 | */ | |
2335 | static int dev_seq_show(struct seq_file *seq, void *v) | |
2336 | { | |
2337 | if (v == SEQ_START_TOKEN) | |
2338 | seq_puts(seq, "Inter-| Receive " | |
2339 | " | Transmit\n" | |
2340 | " face |bytes packets errs drop fifo frame " | |
2341 | "compressed multicast|bytes packets errs " | |
2342 | "drop fifo colls carrier compressed\n"); | |
2343 | else | |
2344 | dev_seq_printf_stats(seq, v); | |
2345 | return 0; | |
2346 | } | |
2347 | ||
2348 | static struct netif_rx_stats *softnet_get_online(loff_t *pos) | |
2349 | { | |
2350 | struct netif_rx_stats *rc = NULL; | |
2351 | ||
2352 | while (*pos < NR_CPUS) | |
4ec93edb | 2353 | if (cpu_online(*pos)) { |
1da177e4 LT |
2354 | rc = &per_cpu(netdev_rx_stat, *pos); |
2355 | break; | |
2356 | } else | |
2357 | ++*pos; | |
2358 | return rc; | |
2359 | } | |
2360 | ||
2361 | static void *softnet_seq_start(struct seq_file *seq, loff_t *pos) | |
2362 | { | |
2363 | return softnet_get_online(pos); | |
2364 | } | |
2365 | ||
2366 | static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
2367 | { | |
2368 | ++*pos; | |
2369 | return softnet_get_online(pos); | |
2370 | } | |
2371 | ||
2372 | static void softnet_seq_stop(struct seq_file *seq, void *v) | |
2373 | { | |
2374 | } | |
2375 | ||
2376 | static int softnet_seq_show(struct seq_file *seq, void *v) | |
2377 | { | |
2378 | struct netif_rx_stats *s = v; | |
2379 | ||
2380 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", | |
31aa02c5 | 2381 | s->total, s->dropped, s->time_squeeze, 0, |
c1ebcdb8 SH |
2382 | 0, 0, 0, 0, /* was fastroute */ |
2383 | s->cpu_collision ); | |
1da177e4 LT |
2384 | return 0; |
2385 | } | |
2386 | ||
f690808e | 2387 | static const struct seq_operations dev_seq_ops = { |
1da177e4 LT |
2388 | .start = dev_seq_start, |
2389 | .next = dev_seq_next, | |
2390 | .stop = dev_seq_stop, | |
2391 | .show = dev_seq_show, | |
2392 | }; | |
2393 | ||
2394 | static int dev_seq_open(struct inode *inode, struct file *file) | |
2395 | { | |
2396 | return seq_open(file, &dev_seq_ops); | |
2397 | } | |
2398 | ||
9a32144e | 2399 | static const struct file_operations dev_seq_fops = { |
1da177e4 LT |
2400 | .owner = THIS_MODULE, |
2401 | .open = dev_seq_open, | |
2402 | .read = seq_read, | |
2403 | .llseek = seq_lseek, | |
2404 | .release = seq_release, | |
2405 | }; | |
2406 | ||
f690808e | 2407 | static const struct seq_operations softnet_seq_ops = { |
1da177e4 LT |
2408 | .start = softnet_seq_start, |
2409 | .next = softnet_seq_next, | |
2410 | .stop = softnet_seq_stop, | |
2411 | .show = softnet_seq_show, | |
2412 | }; | |
2413 | ||
2414 | static int softnet_seq_open(struct inode *inode, struct file *file) | |
2415 | { | |
2416 | return seq_open(file, &softnet_seq_ops); | |
2417 | } | |
2418 | ||
9a32144e | 2419 | static const struct file_operations softnet_seq_fops = { |
1da177e4 LT |
2420 | .owner = THIS_MODULE, |
2421 | .open = softnet_seq_open, | |
2422 | .read = seq_read, | |
2423 | .llseek = seq_lseek, | |
2424 | .release = seq_release, | |
2425 | }; | |
2426 | ||
0e1256ff SH |
2427 | static void *ptype_get_idx(loff_t pos) |
2428 | { | |
2429 | struct packet_type *pt = NULL; | |
2430 | loff_t i = 0; | |
2431 | int t; | |
2432 | ||
2433 | list_for_each_entry_rcu(pt, &ptype_all, list) { | |
2434 | if (i == pos) | |
2435 | return pt; | |
2436 | ++i; | |
2437 | } | |
2438 | ||
2439 | for (t = 0; t < 16; t++) { | |
2440 | list_for_each_entry_rcu(pt, &ptype_base[t], list) { | |
2441 | if (i == pos) | |
2442 | return pt; | |
2443 | ++i; | |
2444 | } | |
2445 | } | |
2446 | return NULL; | |
2447 | } | |
2448 | ||
2449 | static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) | |
2450 | { | |
2451 | rcu_read_lock(); | |
2452 | return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN; | |
2453 | } | |
2454 | ||
2455 | static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
2456 | { | |
2457 | struct packet_type *pt; | |
2458 | struct list_head *nxt; | |
2459 | int hash; | |
2460 | ||
2461 | ++*pos; | |
2462 | if (v == SEQ_START_TOKEN) | |
2463 | return ptype_get_idx(0); | |
2464 | ||
2465 | pt = v; | |
2466 | nxt = pt->list.next; | |
2467 | if (pt->type == htons(ETH_P_ALL)) { | |
2468 | if (nxt != &ptype_all) | |
2469 | goto found; | |
2470 | hash = 0; | |
2471 | nxt = ptype_base[0].next; | |
2472 | } else | |
2473 | hash = ntohs(pt->type) & 15; | |
2474 | ||
2475 | while (nxt == &ptype_base[hash]) { | |
2476 | if (++hash >= 16) | |
2477 | return NULL; | |
2478 | nxt = ptype_base[hash].next; | |
2479 | } | |
2480 | found: | |
2481 | return list_entry(nxt, struct packet_type, list); | |
2482 | } | |
2483 | ||
2484 | static void ptype_seq_stop(struct seq_file *seq, void *v) | |
2485 | { | |
2486 | rcu_read_unlock(); | |
2487 | } | |
2488 | ||
2489 | static void ptype_seq_decode(struct seq_file *seq, void *sym) | |
2490 | { | |
2491 | #ifdef CONFIG_KALLSYMS | |
2492 | unsigned long offset = 0, symsize; | |
2493 | const char *symname; | |
2494 | char *modname; | |
2495 | char namebuf[128]; | |
2496 | ||
2497 | symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset, | |
2498 | &modname, namebuf); | |
2499 | ||
2500 | if (symname) { | |
2501 | char *delim = ":"; | |
2502 | ||
2503 | if (!modname) | |
2504 | modname = delim = ""; | |
2505 | seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim, | |
2506 | symname, offset); | |
2507 | return; | |
2508 | } | |
2509 | #endif | |
2510 | ||
2511 | seq_printf(seq, "[%p]", sym); | |
2512 | } | |
2513 | ||
2514 | static int ptype_seq_show(struct seq_file *seq, void *v) | |
2515 | { | |
2516 | struct packet_type *pt = v; | |
2517 | ||
2518 | if (v == SEQ_START_TOKEN) | |
2519 | seq_puts(seq, "Type Device Function\n"); | |
2520 | else { | |
2521 | if (pt->type == htons(ETH_P_ALL)) | |
2522 | seq_puts(seq, "ALL "); | |
2523 | else | |
2524 | seq_printf(seq, "%04x", ntohs(pt->type)); | |
2525 | ||
2526 | seq_printf(seq, " %-8s ", | |
2527 | pt->dev ? pt->dev->name : ""); | |
2528 | ptype_seq_decode(seq, pt->func); | |
2529 | seq_putc(seq, '\n'); | |
2530 | } | |
2531 | ||
2532 | return 0; | |
2533 | } | |
2534 | ||
2535 | static const struct seq_operations ptype_seq_ops = { | |
2536 | .start = ptype_seq_start, | |
2537 | .next = ptype_seq_next, | |
2538 | .stop = ptype_seq_stop, | |
2539 | .show = ptype_seq_show, | |
2540 | }; | |
2541 | ||
2542 | static int ptype_seq_open(struct inode *inode, struct file *file) | |
2543 | { | |
2544 | return seq_open(file, &ptype_seq_ops); | |
2545 | } | |
2546 | ||
2547 | static const struct file_operations ptype_seq_fops = { | |
2548 | .owner = THIS_MODULE, | |
2549 | .open = ptype_seq_open, | |
2550 | .read = seq_read, | |
2551 | .llseek = seq_lseek, | |
2552 | .release = seq_release, | |
2553 | }; | |
2554 | ||
2555 | ||
1da177e4 LT |
2556 | static int __init dev_proc_init(void) |
2557 | { | |
2558 | int rc = -ENOMEM; | |
2559 | ||
457c4cbc | 2560 | if (!proc_net_fops_create(&init_net, "dev", S_IRUGO, &dev_seq_fops)) |
1da177e4 | 2561 | goto out; |
457c4cbc | 2562 | if (!proc_net_fops_create(&init_net, "softnet_stat", S_IRUGO, &softnet_seq_fops)) |
1da177e4 | 2563 | goto out_dev; |
457c4cbc EB |
2564 | if (!proc_net_fops_create(&init_net, "ptype", S_IRUGO, &ptype_seq_fops)) |
2565 | goto out_softnet; | |
0e1256ff | 2566 | |
295f4a1f | 2567 | if (wext_proc_init()) |
457c4cbc | 2568 | goto out_ptype; |
1da177e4 LT |
2569 | rc = 0; |
2570 | out: | |
2571 | return rc; | |
457c4cbc EB |
2572 | out_ptype: |
2573 | proc_net_remove(&init_net, "ptype"); | |
1da177e4 | 2574 | out_softnet: |
457c4cbc | 2575 | proc_net_remove(&init_net, "softnet_stat"); |
1da177e4 | 2576 | out_dev: |
457c4cbc | 2577 | proc_net_remove(&init_net, "dev"); |
1da177e4 LT |
2578 | goto out; |
2579 | } | |
2580 | #else | |
2581 | #define dev_proc_init() 0 | |
2582 | #endif /* CONFIG_PROC_FS */ | |
2583 | ||
2584 | ||
2585 | /** | |
2586 | * netdev_set_master - set up master/slave pair | |
2587 | * @slave: slave device | |
2588 | * @master: new master device | |
2589 | * | |
2590 | * Changes the master device of the slave. Pass %NULL to break the | |
2591 | * bonding. The caller must hold the RTNL semaphore. On a failure | |
2592 | * a negative errno code is returned. On success the reference counts | |
2593 | * are adjusted, %RTM_NEWLINK is sent to the routing socket and the | |
2594 | * function returns zero. | |
2595 | */ | |
2596 | int netdev_set_master(struct net_device *slave, struct net_device *master) | |
2597 | { | |
2598 | struct net_device *old = slave->master; | |
2599 | ||
2600 | ASSERT_RTNL(); | |
2601 | ||
2602 | if (master) { | |
2603 | if (old) | |
2604 | return -EBUSY; | |
2605 | dev_hold(master); | |
2606 | } | |
2607 | ||
2608 | slave->master = master; | |
4ec93edb | 2609 | |
1da177e4 LT |
2610 | synchronize_net(); |
2611 | ||
2612 | if (old) | |
2613 | dev_put(old); | |
2614 | ||
2615 | if (master) | |
2616 | slave->flags |= IFF_SLAVE; | |
2617 | else | |
2618 | slave->flags &= ~IFF_SLAVE; | |
2619 | ||
2620 | rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE); | |
2621 | return 0; | |
2622 | } | |
2623 | ||
4417da66 | 2624 | static void __dev_set_promiscuity(struct net_device *dev, int inc) |
1da177e4 LT |
2625 | { |
2626 | unsigned short old_flags = dev->flags; | |
2627 | ||
24023451 PM |
2628 | ASSERT_RTNL(); |
2629 | ||
1da177e4 LT |
2630 | if ((dev->promiscuity += inc) == 0) |
2631 | dev->flags &= ~IFF_PROMISC; | |
52609c0b DC |
2632 | else |
2633 | dev->flags |= IFF_PROMISC; | |
2634 | if (dev->flags != old_flags) { | |
1da177e4 LT |
2635 | printk(KERN_INFO "device %s %s promiscuous mode\n", |
2636 | dev->name, (dev->flags & IFF_PROMISC) ? "entered" : | |
4ec93edb | 2637 | "left"); |
5bdb9886 SG |
2638 | audit_log(current->audit_context, GFP_ATOMIC, |
2639 | AUDIT_ANOM_PROMISCUOUS, | |
2640 | "dev=%s prom=%d old_prom=%d auid=%u", | |
2641 | dev->name, (dev->flags & IFF_PROMISC), | |
2642 | (old_flags & IFF_PROMISC), | |
4ec93edb | 2643 | audit_get_loginuid(current->audit_context)); |
24023451 PM |
2644 | |
2645 | if (dev->change_rx_flags) | |
2646 | dev->change_rx_flags(dev, IFF_PROMISC); | |
1da177e4 LT |
2647 | } |
2648 | } | |
2649 | ||
4417da66 PM |
2650 | /** |
2651 | * dev_set_promiscuity - update promiscuity count on a device | |
2652 | * @dev: device | |
2653 | * @inc: modifier | |
2654 | * | |
2655 | * Add or remove promiscuity from a device. While the count in the device | |
2656 | * remains above zero the interface remains promiscuous. Once it hits zero | |
2657 | * the device reverts back to normal filtering operation. A negative inc | |
2658 | * value is used to drop promiscuity on the device. | |
2659 | */ | |
2660 | void dev_set_promiscuity(struct net_device *dev, int inc) | |
2661 | { | |
2662 | unsigned short old_flags = dev->flags; | |
2663 | ||
2664 | __dev_set_promiscuity(dev, inc); | |
2665 | if (dev->flags != old_flags) | |
2666 | dev_set_rx_mode(dev); | |
2667 | } | |
2668 | ||
1da177e4 LT |
2669 | /** |
2670 | * dev_set_allmulti - update allmulti count on a device | |
2671 | * @dev: device | |
2672 | * @inc: modifier | |
2673 | * | |
2674 | * Add or remove reception of all multicast frames to a device. While the | |
2675 | * count in the device remains above zero the interface remains listening | |
2676 | * to all interfaces. Once it hits zero the device reverts back to normal | |
2677 | * filtering operation. A negative @inc value is used to drop the counter | |
2678 | * when releasing a resource needing all multicasts. | |
2679 | */ | |
2680 | ||
2681 | void dev_set_allmulti(struct net_device *dev, int inc) | |
2682 | { | |
2683 | unsigned short old_flags = dev->flags; | |
2684 | ||
24023451 PM |
2685 | ASSERT_RTNL(); |
2686 | ||
1da177e4 LT |
2687 | dev->flags |= IFF_ALLMULTI; |
2688 | if ((dev->allmulti += inc) == 0) | |
2689 | dev->flags &= ~IFF_ALLMULTI; | |
24023451 PM |
2690 | if (dev->flags ^ old_flags) { |
2691 | if (dev->change_rx_flags) | |
2692 | dev->change_rx_flags(dev, IFF_ALLMULTI); | |
4417da66 | 2693 | dev_set_rx_mode(dev); |
24023451 | 2694 | } |
4417da66 PM |
2695 | } |
2696 | ||
2697 | /* | |
2698 | * Upload unicast and multicast address lists to device and | |
2699 | * configure RX filtering. When the device doesn't support unicast | |
2700 | * filtering it is put in promiscous mode while unicast addresses | |
2701 | * are present. | |
2702 | */ | |
2703 | void __dev_set_rx_mode(struct net_device *dev) | |
2704 | { | |
2705 | /* dev_open will call this function so the list will stay sane. */ | |
2706 | if (!(dev->flags&IFF_UP)) | |
2707 | return; | |
2708 | ||
2709 | if (!netif_device_present(dev)) | |
40b77c94 | 2710 | return; |
4417da66 PM |
2711 | |
2712 | if (dev->set_rx_mode) | |
2713 | dev->set_rx_mode(dev); | |
2714 | else { | |
2715 | /* Unicast addresses changes may only happen under the rtnl, | |
2716 | * therefore calling __dev_set_promiscuity here is safe. | |
2717 | */ | |
2718 | if (dev->uc_count > 0 && !dev->uc_promisc) { | |
2719 | __dev_set_promiscuity(dev, 1); | |
2720 | dev->uc_promisc = 1; | |
2721 | } else if (dev->uc_count == 0 && dev->uc_promisc) { | |
2722 | __dev_set_promiscuity(dev, -1); | |
2723 | dev->uc_promisc = 0; | |
2724 | } | |
2725 | ||
2726 | if (dev->set_multicast_list) | |
2727 | dev->set_multicast_list(dev); | |
2728 | } | |
2729 | } | |
2730 | ||
2731 | void dev_set_rx_mode(struct net_device *dev) | |
2732 | { | |
2733 | netif_tx_lock_bh(dev); | |
2734 | __dev_set_rx_mode(dev); | |
2735 | netif_tx_unlock_bh(dev); | |
1da177e4 LT |
2736 | } |
2737 | ||
61cbc2fc PM |
2738 | int __dev_addr_delete(struct dev_addr_list **list, int *count, |
2739 | void *addr, int alen, int glbl) | |
bf742482 PM |
2740 | { |
2741 | struct dev_addr_list *da; | |
2742 | ||
2743 | for (; (da = *list) != NULL; list = &da->next) { | |
2744 | if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 && | |
2745 | alen == da->da_addrlen) { | |
2746 | if (glbl) { | |
2747 | int old_glbl = da->da_gusers; | |
2748 | da->da_gusers = 0; | |
2749 | if (old_glbl == 0) | |
2750 | break; | |
2751 | } | |
2752 | if (--da->da_users) | |
2753 | return 0; | |
2754 | ||
2755 | *list = da->next; | |
2756 | kfree(da); | |
61cbc2fc | 2757 | (*count)--; |
bf742482 PM |
2758 | return 0; |
2759 | } | |
2760 | } | |
2761 | return -ENOENT; | |
2762 | } | |
2763 | ||
61cbc2fc PM |
2764 | int __dev_addr_add(struct dev_addr_list **list, int *count, |
2765 | void *addr, int alen, int glbl) | |
bf742482 PM |
2766 | { |
2767 | struct dev_addr_list *da; | |
2768 | ||
2769 | for (da = *list; da != NULL; da = da->next) { | |
2770 | if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 && | |
2771 | da->da_addrlen == alen) { | |
2772 | if (glbl) { | |
2773 | int old_glbl = da->da_gusers; | |
2774 | da->da_gusers = 1; | |
2775 | if (old_glbl) | |
2776 | return 0; | |
2777 | } | |
2778 | da->da_users++; | |
2779 | return 0; | |
2780 | } | |
2781 | } | |
2782 | ||
2783 | da = kmalloc(sizeof(*da), GFP_ATOMIC); | |
2784 | if (da == NULL) | |
2785 | return -ENOMEM; | |
2786 | memcpy(da->da_addr, addr, alen); | |
2787 | da->da_addrlen = alen; | |
2788 | da->da_users = 1; | |
2789 | da->da_gusers = glbl ? 1 : 0; | |
2790 | da->next = *list; | |
2791 | *list = da; | |
61cbc2fc | 2792 | (*count)++; |
bf742482 PM |
2793 | return 0; |
2794 | } | |
2795 | ||
4417da66 PM |
2796 | /** |
2797 | * dev_unicast_delete - Release secondary unicast address. | |
2798 | * @dev: device | |
0ed72ec4 RD |
2799 | * @addr: address to delete |
2800 | * @alen: length of @addr | |
4417da66 PM |
2801 | * |
2802 | * Release reference to a secondary unicast address and remove it | |
0ed72ec4 | 2803 | * from the device if the reference count drops to zero. |
4417da66 PM |
2804 | * |
2805 | * The caller must hold the rtnl_mutex. | |
2806 | */ | |
2807 | int dev_unicast_delete(struct net_device *dev, void *addr, int alen) | |
2808 | { | |
2809 | int err; | |
2810 | ||
2811 | ASSERT_RTNL(); | |
2812 | ||
2813 | netif_tx_lock_bh(dev); | |
61cbc2fc PM |
2814 | err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0); |
2815 | if (!err) | |
4417da66 | 2816 | __dev_set_rx_mode(dev); |
4417da66 PM |
2817 | netif_tx_unlock_bh(dev); |
2818 | return err; | |
2819 | } | |
2820 | EXPORT_SYMBOL(dev_unicast_delete); | |
2821 | ||
2822 | /** | |
2823 | * dev_unicast_add - add a secondary unicast address | |
2824 | * @dev: device | |
0ed72ec4 RD |
2825 | * @addr: address to delete |
2826 | * @alen: length of @addr | |
4417da66 PM |
2827 | * |
2828 | * Add a secondary unicast address to the device or increase | |
2829 | * the reference count if it already exists. | |
2830 | * | |
2831 | * The caller must hold the rtnl_mutex. | |
2832 | */ | |
2833 | int dev_unicast_add(struct net_device *dev, void *addr, int alen) | |
2834 | { | |
2835 | int err; | |
2836 | ||
2837 | ASSERT_RTNL(); | |
2838 | ||
2839 | netif_tx_lock_bh(dev); | |
61cbc2fc PM |
2840 | err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0); |
2841 | if (!err) | |
4417da66 | 2842 | __dev_set_rx_mode(dev); |
4417da66 PM |
2843 | netif_tx_unlock_bh(dev); |
2844 | return err; | |
2845 | } | |
2846 | EXPORT_SYMBOL(dev_unicast_add); | |
2847 | ||
12972621 DC |
2848 | static void __dev_addr_discard(struct dev_addr_list **list) |
2849 | { | |
2850 | struct dev_addr_list *tmp; | |
2851 | ||
2852 | while (*list != NULL) { | |
2853 | tmp = *list; | |
2854 | *list = tmp->next; | |
2855 | if (tmp->da_users > tmp->da_gusers) | |
2856 | printk("__dev_addr_discard: address leakage! " | |
2857 | "da_users=%d\n", tmp->da_users); | |
2858 | kfree(tmp); | |
2859 | } | |
2860 | } | |
2861 | ||
26cc2522 | 2862 | static void dev_addr_discard(struct net_device *dev) |
4417da66 PM |
2863 | { |
2864 | netif_tx_lock_bh(dev); | |
26cc2522 | 2865 | |
4417da66 PM |
2866 | __dev_addr_discard(&dev->uc_list); |
2867 | dev->uc_count = 0; | |
4417da66 | 2868 | |
456ad75c DC |
2869 | __dev_addr_discard(&dev->mc_list); |
2870 | dev->mc_count = 0; | |
26cc2522 | 2871 | |
456ad75c DC |
2872 | netif_tx_unlock_bh(dev); |
2873 | } | |
2874 | ||
1da177e4 LT |
2875 | unsigned dev_get_flags(const struct net_device *dev) |
2876 | { | |
2877 | unsigned flags; | |
2878 | ||
2879 | flags = (dev->flags & ~(IFF_PROMISC | | |
2880 | IFF_ALLMULTI | | |
b00055aa SR |
2881 | IFF_RUNNING | |
2882 | IFF_LOWER_UP | | |
2883 | IFF_DORMANT)) | | |
1da177e4 LT |
2884 | (dev->gflags & (IFF_PROMISC | |
2885 | IFF_ALLMULTI)); | |
2886 | ||
b00055aa SR |
2887 | if (netif_running(dev)) { |
2888 | if (netif_oper_up(dev)) | |
2889 | flags |= IFF_RUNNING; | |
2890 | if (netif_carrier_ok(dev)) | |
2891 | flags |= IFF_LOWER_UP; | |
2892 | if (netif_dormant(dev)) | |
2893 | flags |= IFF_DORMANT; | |
2894 | } | |
1da177e4 LT |
2895 | |
2896 | return flags; | |
2897 | } | |
2898 | ||
2899 | int dev_change_flags(struct net_device *dev, unsigned flags) | |
2900 | { | |
7c355f53 | 2901 | int ret, changes; |
1da177e4 LT |
2902 | int old_flags = dev->flags; |
2903 | ||
24023451 PM |
2904 | ASSERT_RTNL(); |
2905 | ||
1da177e4 LT |
2906 | /* |
2907 | * Set the flags on our device. | |
2908 | */ | |
2909 | ||
2910 | dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | | |
2911 | IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | | |
2912 | IFF_AUTOMEDIA)) | | |
2913 | (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | | |
2914 | IFF_ALLMULTI)); | |
2915 | ||
2916 | /* | |
2917 | * Load in the correct multicast list now the flags have changed. | |
2918 | */ | |
2919 | ||
24023451 PM |
2920 | if (dev->change_rx_flags && (dev->flags ^ flags) & IFF_MULTICAST) |
2921 | dev->change_rx_flags(dev, IFF_MULTICAST); | |
2922 | ||
4417da66 | 2923 | dev_set_rx_mode(dev); |
1da177e4 LT |
2924 | |
2925 | /* | |
2926 | * Have we downed the interface. We handle IFF_UP ourselves | |
2927 | * according to user attempts to set it, rather than blindly | |
2928 | * setting it. | |
2929 | */ | |
2930 | ||
2931 | ret = 0; | |
2932 | if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ | |
2933 | ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev); | |
2934 | ||
2935 | if (!ret) | |
4417da66 | 2936 | dev_set_rx_mode(dev); |
1da177e4 LT |
2937 | } |
2938 | ||
2939 | if (dev->flags & IFF_UP && | |
2940 | ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI | | |
2941 | IFF_VOLATILE))) | |
f07d5b94 | 2942 | raw_notifier_call_chain(&netdev_chain, |
e041c683 | 2943 | NETDEV_CHANGE, dev); |
1da177e4 LT |
2944 | |
2945 | if ((flags ^ dev->gflags) & IFF_PROMISC) { | |
2946 | int inc = (flags & IFF_PROMISC) ? +1 : -1; | |
2947 | dev->gflags ^= IFF_PROMISC; | |
2948 | dev_set_promiscuity(dev, inc); | |
2949 | } | |
2950 | ||
2951 | /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI | |
2952 | is important. Some (broken) drivers set IFF_PROMISC, when | |
2953 | IFF_ALLMULTI is requested not asking us and not reporting. | |
2954 | */ | |
2955 | if ((flags ^ dev->gflags) & IFF_ALLMULTI) { | |
2956 | int inc = (flags & IFF_ALLMULTI) ? +1 : -1; | |
2957 | dev->gflags ^= IFF_ALLMULTI; | |
2958 | dev_set_allmulti(dev, inc); | |
2959 | } | |
2960 | ||
7c355f53 TG |
2961 | /* Exclude state transition flags, already notified */ |
2962 | changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING); | |
2963 | if (changes) | |
2964 | rtmsg_ifinfo(RTM_NEWLINK, dev, changes); | |
1da177e4 LT |
2965 | |
2966 | return ret; | |
2967 | } | |
2968 | ||
2969 | int dev_set_mtu(struct net_device *dev, int new_mtu) | |
2970 | { | |
2971 | int err; | |
2972 | ||
2973 | if (new_mtu == dev->mtu) | |
2974 | return 0; | |
2975 | ||
2976 | /* MTU must be positive. */ | |
2977 | if (new_mtu < 0) | |
2978 | return -EINVAL; | |
2979 | ||
2980 | if (!netif_device_present(dev)) | |
2981 | return -ENODEV; | |
2982 | ||
2983 | err = 0; | |
2984 | if (dev->change_mtu) | |
2985 | err = dev->change_mtu(dev, new_mtu); | |
2986 | else | |
2987 | dev->mtu = new_mtu; | |
2988 | if (!err && dev->flags & IFF_UP) | |
f07d5b94 | 2989 | raw_notifier_call_chain(&netdev_chain, |
e041c683 | 2990 | NETDEV_CHANGEMTU, dev); |
1da177e4 LT |
2991 | return err; |
2992 | } | |
2993 | ||
2994 | int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) | |
2995 | { | |
2996 | int err; | |
2997 | ||
2998 | if (!dev->set_mac_address) | |
2999 | return -EOPNOTSUPP; | |
3000 | if (sa->sa_family != dev->type) | |
3001 | return -EINVAL; | |
3002 | if (!netif_device_present(dev)) | |
3003 | return -ENODEV; | |
3004 | err = dev->set_mac_address(dev, sa); | |
3005 | if (!err) | |
f07d5b94 | 3006 | raw_notifier_call_chain(&netdev_chain, |
e041c683 | 3007 | NETDEV_CHANGEADDR, dev); |
1da177e4 LT |
3008 | return err; |
3009 | } | |
3010 | ||
3011 | /* | |
3012 | * Perform the SIOCxIFxxx calls. | |
3013 | */ | |
3014 | static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd) | |
3015 | { | |
3016 | int err; | |
3017 | struct net_device *dev = __dev_get_by_name(ifr->ifr_name); | |
3018 | ||
3019 | if (!dev) | |
3020 | return -ENODEV; | |
3021 | ||
3022 | switch (cmd) { | |
3023 | case SIOCGIFFLAGS: /* Get interface flags */ | |
3024 | ifr->ifr_flags = dev_get_flags(dev); | |
3025 | return 0; | |
3026 | ||
3027 | case SIOCSIFFLAGS: /* Set interface flags */ | |
3028 | return dev_change_flags(dev, ifr->ifr_flags); | |
3029 | ||
3030 | case SIOCGIFMETRIC: /* Get the metric on the interface | |
3031 | (currently unused) */ | |
3032 | ifr->ifr_metric = 0; | |
3033 | return 0; | |
3034 | ||
3035 | case SIOCSIFMETRIC: /* Set the metric on the interface | |
3036 | (currently unused) */ | |
3037 | return -EOPNOTSUPP; | |
3038 | ||
3039 | case SIOCGIFMTU: /* Get the MTU of a device */ | |
3040 | ifr->ifr_mtu = dev->mtu; | |
3041 | return 0; | |
3042 | ||
3043 | case SIOCSIFMTU: /* Set the MTU of a device */ | |
3044 | return dev_set_mtu(dev, ifr->ifr_mtu); | |
3045 | ||
3046 | case SIOCGIFHWADDR: | |
3047 | if (!dev->addr_len) | |
3048 | memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); | |
3049 | else | |
3050 | memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, | |
3051 | min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); | |
3052 | ifr->ifr_hwaddr.sa_family = dev->type; | |
3053 | return 0; | |
3054 | ||
3055 | case SIOCSIFHWADDR: | |
3056 | return dev_set_mac_address(dev, &ifr->ifr_hwaddr); | |
3057 | ||
3058 | case SIOCSIFHWBROADCAST: | |
3059 | if (ifr->ifr_hwaddr.sa_family != dev->type) | |
3060 | return -EINVAL; | |
3061 | memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, | |
3062 | min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); | |
f07d5b94 | 3063 | raw_notifier_call_chain(&netdev_chain, |
1da177e4 LT |
3064 | NETDEV_CHANGEADDR, dev); |
3065 | return 0; | |
3066 | ||
3067 | case SIOCGIFMAP: | |
3068 | ifr->ifr_map.mem_start = dev->mem_start; | |
3069 | ifr->ifr_map.mem_end = dev->mem_end; | |
3070 | ifr->ifr_map.base_addr = dev->base_addr; | |
3071 | ifr->ifr_map.irq = dev->irq; | |
3072 | ifr->ifr_map.dma = dev->dma; | |
3073 | ifr->ifr_map.port = dev->if_port; | |
3074 | return 0; | |
3075 | ||
3076 | case SIOCSIFMAP: | |
3077 | if (dev->set_config) { | |
3078 | if (!netif_device_present(dev)) | |
3079 | return -ENODEV; | |
3080 | return dev->set_config(dev, &ifr->ifr_map); | |
3081 | } | |
3082 | return -EOPNOTSUPP; | |
3083 | ||
3084 | case SIOCADDMULTI: | |
3085 | if (!dev->set_multicast_list || | |
3086 | ifr->ifr_hwaddr.sa_family != AF_UNSPEC) | |
3087 | return -EINVAL; | |
3088 | if (!netif_device_present(dev)) | |
3089 | return -ENODEV; | |
3090 | return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data, | |
3091 | dev->addr_len, 1); | |
3092 | ||
3093 | case SIOCDELMULTI: | |
3094 | if (!dev->set_multicast_list || | |
3095 | ifr->ifr_hwaddr.sa_family != AF_UNSPEC) | |
3096 | return -EINVAL; | |
3097 | if (!netif_device_present(dev)) | |
3098 | return -ENODEV; | |
3099 | return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data, | |
3100 | dev->addr_len, 1); | |
3101 | ||
3102 | case SIOCGIFINDEX: | |
3103 | ifr->ifr_ifindex = dev->ifindex; | |
3104 | return 0; | |
3105 | ||
3106 | case SIOCGIFTXQLEN: | |
3107 | ifr->ifr_qlen = dev->tx_queue_len; | |
3108 | return 0; | |
3109 | ||
3110 | case SIOCSIFTXQLEN: | |
3111 | if (ifr->ifr_qlen < 0) | |
3112 | return -EINVAL; | |
3113 | dev->tx_queue_len = ifr->ifr_qlen; | |
3114 | return 0; | |
3115 | ||
3116 | case SIOCSIFNAME: | |
3117 | ifr->ifr_newname[IFNAMSIZ-1] = '\0'; | |
3118 | return dev_change_name(dev, ifr->ifr_newname); | |
3119 | ||
3120 | /* | |
3121 | * Unknown or private ioctl | |
3122 | */ | |
3123 | ||
3124 | default: | |
3125 | if ((cmd >= SIOCDEVPRIVATE && | |
3126 | cmd <= SIOCDEVPRIVATE + 15) || | |
3127 | cmd == SIOCBONDENSLAVE || | |
3128 | cmd == SIOCBONDRELEASE || | |
3129 | cmd == SIOCBONDSETHWADDR || | |
3130 | cmd == SIOCBONDSLAVEINFOQUERY || | |
3131 | cmd == SIOCBONDINFOQUERY || | |
3132 | cmd == SIOCBONDCHANGEACTIVE || | |
3133 | cmd == SIOCGMIIPHY || | |
3134 | cmd == SIOCGMIIREG || | |
3135 | cmd == SIOCSMIIREG || | |
3136 | cmd == SIOCBRADDIF || | |
3137 | cmd == SIOCBRDELIF || | |
3138 | cmd == SIOCWANDEV) { | |
3139 | err = -EOPNOTSUPP; | |
3140 | if (dev->do_ioctl) { | |
3141 | if (netif_device_present(dev)) | |
3142 | err = dev->do_ioctl(dev, ifr, | |
3143 | cmd); | |
3144 | else | |
3145 | err = -ENODEV; | |
3146 | } | |
3147 | } else | |
3148 | err = -EINVAL; | |
3149 | ||
3150 | } | |
3151 | return err; | |
3152 | } | |
3153 | ||
3154 | /* | |
3155 | * This function handles all "interface"-type I/O control requests. The actual | |
3156 | * 'doing' part of this is dev_ifsioc above. | |
3157 | */ | |
3158 | ||
3159 | /** | |
3160 | * dev_ioctl - network device ioctl | |
3161 | * @cmd: command to issue | |
3162 | * @arg: pointer to a struct ifreq in user space | |
3163 | * | |
3164 | * Issue ioctl functions to devices. This is normally called by the | |
3165 | * user space syscall interfaces but can sometimes be useful for | |
3166 | * other purposes. The return value is the return from the syscall if | |
3167 | * positive or a negative errno code on error. | |
3168 | */ | |
3169 | ||
3170 | int dev_ioctl(unsigned int cmd, void __user *arg) | |
3171 | { | |
3172 | struct ifreq ifr; | |
3173 | int ret; | |
3174 | char *colon; | |
3175 | ||
3176 | /* One special case: SIOCGIFCONF takes ifconf argument | |
3177 | and requires shared lock, because it sleeps writing | |
3178 | to user space. | |
3179 | */ | |
3180 | ||
3181 | if (cmd == SIOCGIFCONF) { | |
6756ae4b | 3182 | rtnl_lock(); |
1da177e4 | 3183 | ret = dev_ifconf((char __user *) arg); |
6756ae4b | 3184 | rtnl_unlock(); |
1da177e4 LT |
3185 | return ret; |
3186 | } | |
3187 | if (cmd == SIOCGIFNAME) | |
3188 | return dev_ifname((struct ifreq __user *)arg); | |
3189 | ||
3190 | if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) | |
3191 | return -EFAULT; | |
3192 | ||
3193 | ifr.ifr_name[IFNAMSIZ-1] = 0; | |
3194 | ||
3195 | colon = strchr(ifr.ifr_name, ':'); | |
3196 | if (colon) | |
3197 | *colon = 0; | |
3198 | ||
3199 | /* | |
3200 | * See which interface the caller is talking about. | |
3201 | */ | |
3202 | ||
3203 | switch (cmd) { | |
3204 | /* | |
3205 | * These ioctl calls: | |
3206 | * - can be done by all. | |
3207 | * - atomic and do not require locking. | |
3208 | * - return a value | |
3209 | */ | |
3210 | case SIOCGIFFLAGS: | |
3211 | case SIOCGIFMETRIC: | |
3212 | case SIOCGIFMTU: | |
3213 | case SIOCGIFHWADDR: | |
3214 | case SIOCGIFSLAVE: | |
3215 | case SIOCGIFMAP: | |
3216 | case SIOCGIFINDEX: | |
3217 | case SIOCGIFTXQLEN: | |
3218 | dev_load(ifr.ifr_name); | |
3219 | read_lock(&dev_base_lock); | |
3220 | ret = dev_ifsioc(&ifr, cmd); | |
3221 | read_unlock(&dev_base_lock); | |
3222 | if (!ret) { | |
3223 | if (colon) | |
3224 | *colon = ':'; | |
3225 | if (copy_to_user(arg, &ifr, | |
3226 | sizeof(struct ifreq))) | |
3227 | ret = -EFAULT; | |
3228 | } | |
3229 | return ret; | |
3230 | ||
3231 | case SIOCETHTOOL: | |
3232 | dev_load(ifr.ifr_name); | |
3233 | rtnl_lock(); | |
3234 | ret = dev_ethtool(&ifr); | |
3235 | rtnl_unlock(); | |
3236 | if (!ret) { | |
3237 | if (colon) | |
3238 | *colon = ':'; | |
3239 | if (copy_to_user(arg, &ifr, | |
3240 | sizeof(struct ifreq))) | |
3241 | ret = -EFAULT; | |
3242 | } | |
3243 | return ret; | |
3244 | ||
3245 | /* | |
3246 | * These ioctl calls: | |
3247 | * - require superuser power. | |
3248 | * - require strict serialization. | |
3249 | * - return a value | |
3250 | */ | |
3251 | case SIOCGMIIPHY: | |
3252 | case SIOCGMIIREG: | |
3253 | case SIOCSIFNAME: | |
3254 | if (!capable(CAP_NET_ADMIN)) | |
3255 | return -EPERM; | |
3256 | dev_load(ifr.ifr_name); | |
3257 | rtnl_lock(); | |
3258 | ret = dev_ifsioc(&ifr, cmd); | |
3259 | rtnl_unlock(); | |
3260 | if (!ret) { | |
3261 | if (colon) | |
3262 | *colon = ':'; | |
3263 | if (copy_to_user(arg, &ifr, | |
3264 | sizeof(struct ifreq))) | |
3265 | ret = -EFAULT; | |
3266 | } | |
3267 | return ret; | |
3268 | ||
3269 | /* | |
3270 | * These ioctl calls: | |
3271 | * - require superuser power. | |
3272 | * - require strict serialization. | |
3273 | * - do not return a value | |
3274 | */ | |
3275 | case SIOCSIFFLAGS: | |
3276 | case SIOCSIFMETRIC: | |
3277 | case SIOCSIFMTU: | |
3278 | case SIOCSIFMAP: | |
3279 | case SIOCSIFHWADDR: | |
3280 | case SIOCSIFSLAVE: | |
3281 | case SIOCADDMULTI: | |
3282 | case SIOCDELMULTI: | |
3283 | case SIOCSIFHWBROADCAST: | |
3284 | case SIOCSIFTXQLEN: | |
3285 | case SIOCSMIIREG: | |
3286 | case SIOCBONDENSLAVE: | |
3287 | case SIOCBONDRELEASE: | |
3288 | case SIOCBONDSETHWADDR: | |
1da177e4 LT |
3289 | case SIOCBONDCHANGEACTIVE: |
3290 | case SIOCBRADDIF: | |
3291 | case SIOCBRDELIF: | |
3292 | if (!capable(CAP_NET_ADMIN)) | |
3293 | return -EPERM; | |
cabcac0b TG |
3294 | /* fall through */ |
3295 | case SIOCBONDSLAVEINFOQUERY: | |
3296 | case SIOCBONDINFOQUERY: | |
1da177e4 LT |
3297 | dev_load(ifr.ifr_name); |
3298 | rtnl_lock(); | |
3299 | ret = dev_ifsioc(&ifr, cmd); | |
3300 | rtnl_unlock(); | |
3301 | return ret; | |
3302 | ||
3303 | case SIOCGIFMEM: | |
3304 | /* Get the per device memory space. We can add this but | |
3305 | * currently do not support it */ | |
3306 | case SIOCSIFMEM: | |
3307 | /* Set the per device memory buffer space. | |
3308 | * Not applicable in our case */ | |
3309 | case SIOCSIFLINK: | |
3310 | return -EINVAL; | |
3311 | ||
3312 | /* | |
3313 | * Unknown or private ioctl. | |
3314 | */ | |
3315 | default: | |
3316 | if (cmd == SIOCWANDEV || | |
3317 | (cmd >= SIOCDEVPRIVATE && | |
3318 | cmd <= SIOCDEVPRIVATE + 15)) { | |
3319 | dev_load(ifr.ifr_name); | |
3320 | rtnl_lock(); | |
3321 | ret = dev_ifsioc(&ifr, cmd); | |
3322 | rtnl_unlock(); | |
3323 | if (!ret && copy_to_user(arg, &ifr, | |
3324 | sizeof(struct ifreq))) | |
3325 | ret = -EFAULT; | |
3326 | return ret; | |
3327 | } | |
1da177e4 | 3328 | /* Take care of Wireless Extensions */ |
295f4a1f JB |
3329 | if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) |
3330 | return wext_handle_ioctl(&ifr, cmd, arg); | |
1da177e4 LT |
3331 | return -EINVAL; |
3332 | } | |
3333 | } | |
3334 | ||
3335 | ||
3336 | /** | |
3337 | * dev_new_index - allocate an ifindex | |
3338 | * | |
3339 | * Returns a suitable unique value for a new device interface | |
3340 | * number. The caller must hold the rtnl semaphore or the | |
3341 | * dev_base_lock to be sure it remains unique. | |
3342 | */ | |
3343 | static int dev_new_index(void) | |
3344 | { | |
3345 | static int ifindex; | |
3346 | for (;;) { | |
3347 | if (++ifindex <= 0) | |
3348 | ifindex = 1; | |
3349 | if (!__dev_get_by_index(ifindex)) | |
3350 | return ifindex; | |
3351 | } | |
3352 | } | |
3353 | ||
3354 | static int dev_boot_phase = 1; | |
3355 | ||
3356 | /* Delayed registration/unregisteration */ | |
3357 | static DEFINE_SPINLOCK(net_todo_list_lock); | |
3358 | static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list); | |
3359 | ||
6f05f629 | 3360 | static void net_set_todo(struct net_device *dev) |
1da177e4 LT |
3361 | { |
3362 | spin_lock(&net_todo_list_lock); | |
3363 | list_add_tail(&dev->todo_list, &net_todo_list); | |
3364 | spin_unlock(&net_todo_list_lock); | |
3365 | } | |
3366 | ||
3367 | /** | |
3368 | * register_netdevice - register a network device | |
3369 | * @dev: device to register | |
3370 | * | |
3371 | * Take a completed network device structure and add it to the kernel | |
3372 | * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier | |
3373 | * chain. 0 is returned on success. A negative errno code is returned | |
3374 | * on a failure to set up the device, or if the name is a duplicate. | |
3375 | * | |
3376 | * Callers must hold the rtnl semaphore. You may want | |
3377 | * register_netdev() instead of this. | |
3378 | * | |
3379 | * BUGS: | |
3380 | * The locking appears insufficient to guarantee two parallel registers | |
3381 | * will not get the same name. | |
3382 | */ | |
3383 | ||
3384 | int register_netdevice(struct net_device *dev) | |
3385 | { | |
3386 | struct hlist_head *head; | |
3387 | struct hlist_node *p; | |
3388 | int ret; | |
3389 | ||
3390 | BUG_ON(dev_boot_phase); | |
3391 | ASSERT_RTNL(); | |
3392 | ||
b17a7c17 SH |
3393 | might_sleep(); |
3394 | ||
1da177e4 LT |
3395 | /* When net_device's are persistent, this will be fatal. */ |
3396 | BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); | |
3397 | ||
3398 | spin_lock_init(&dev->queue_lock); | |
932ff279 | 3399 | spin_lock_init(&dev->_xmit_lock); |
723e98b7 | 3400 | netdev_set_lockdep_class(&dev->_xmit_lock, dev->type); |
1da177e4 | 3401 | dev->xmit_lock_owner = -1; |
1da177e4 | 3402 | spin_lock_init(&dev->ingress_lock); |
1da177e4 | 3403 | |
1da177e4 LT |
3404 | dev->iflink = -1; |
3405 | ||
3406 | /* Init, if this function is available */ | |
3407 | if (dev->init) { | |
3408 | ret = dev->init(dev); | |
3409 | if (ret) { | |
3410 | if (ret > 0) | |
3411 | ret = -EIO; | |
90833aa4 | 3412 | goto out; |
1da177e4 LT |
3413 | } |
3414 | } | |
4ec93edb | 3415 | |
1da177e4 LT |
3416 | if (!dev_valid_name(dev->name)) { |
3417 | ret = -EINVAL; | |
7ce1b0ed | 3418 | goto err_uninit; |
1da177e4 LT |
3419 | } |
3420 | ||
3421 | dev->ifindex = dev_new_index(); | |
3422 | if (dev->iflink == -1) | |
3423 | dev->iflink = dev->ifindex; | |
3424 | ||
3425 | /* Check for existence of name */ | |
3426 | head = dev_name_hash(dev->name); | |
3427 | hlist_for_each(p, head) { | |
3428 | struct net_device *d | |
3429 | = hlist_entry(p, struct net_device, name_hlist); | |
3430 | if (!strncmp(d->name, dev->name, IFNAMSIZ)) { | |
3431 | ret = -EEXIST; | |
7ce1b0ed | 3432 | goto err_uninit; |
1da177e4 | 3433 | } |
4ec93edb | 3434 | } |
1da177e4 | 3435 | |
d212f87b SH |
3436 | /* Fix illegal checksum combinations */ |
3437 | if ((dev->features & NETIF_F_HW_CSUM) && | |
3438 | (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { | |
3439 | printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n", | |
3440 | dev->name); | |
3441 | dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); | |
3442 | } | |
3443 | ||
3444 | if ((dev->features & NETIF_F_NO_CSUM) && | |
3445 | (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { | |
3446 | printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n", | |
3447 | dev->name); | |
3448 | dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); | |
3449 | } | |
3450 | ||
3451 | ||
1da177e4 LT |
3452 | /* Fix illegal SG+CSUM combinations. */ |
3453 | if ((dev->features & NETIF_F_SG) && | |
8648b305 | 3454 | !(dev->features & NETIF_F_ALL_CSUM)) { |
5a8da02b | 3455 | printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n", |
1da177e4 LT |
3456 | dev->name); |
3457 | dev->features &= ~NETIF_F_SG; | |
3458 | } | |
3459 | ||
3460 | /* TSO requires that SG is present as well. */ | |
3461 | if ((dev->features & NETIF_F_TSO) && | |
3462 | !(dev->features & NETIF_F_SG)) { | |
5a8da02b | 3463 | printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n", |
1da177e4 LT |
3464 | dev->name); |
3465 | dev->features &= ~NETIF_F_TSO; | |
3466 | } | |
e89e9cf5 AR |
3467 | if (dev->features & NETIF_F_UFO) { |
3468 | if (!(dev->features & NETIF_F_HW_CSUM)) { | |
3469 | printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no " | |
3470 | "NETIF_F_HW_CSUM feature.\n", | |
3471 | dev->name); | |
3472 | dev->features &= ~NETIF_F_UFO; | |
3473 | } | |
3474 | if (!(dev->features & NETIF_F_SG)) { | |
3475 | printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no " | |
3476 | "NETIF_F_SG feature.\n", | |
3477 | dev->name); | |
3478 | dev->features &= ~NETIF_F_UFO; | |
3479 | } | |
3480 | } | |
1da177e4 LT |
3481 | |
3482 | /* | |
3483 | * nil rebuild_header routine, | |
3484 | * that should be never called and used as just bug trap. | |
3485 | */ | |
3486 | ||
3487 | if (!dev->rebuild_header) | |
3488 | dev->rebuild_header = default_rebuild_header; | |
3489 | ||
b17a7c17 SH |
3490 | ret = netdev_register_sysfs(dev); |
3491 | if (ret) | |
7ce1b0ed | 3492 | goto err_uninit; |
b17a7c17 SH |
3493 | dev->reg_state = NETREG_REGISTERED; |
3494 | ||
1da177e4 LT |
3495 | /* |
3496 | * Default initial state at registry is that the | |
3497 | * device is present. | |
3498 | */ | |
3499 | ||
3500 | set_bit(__LINK_STATE_PRESENT, &dev->state); | |
3501 | ||
1da177e4 LT |
3502 | dev_init_scheduler(dev); |
3503 | write_lock_bh(&dev_base_lock); | |
7562f876 | 3504 | list_add_tail(&dev->dev_list, &dev_base_head); |
1da177e4 LT |
3505 | hlist_add_head(&dev->name_hlist, head); |
3506 | hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex)); | |
3507 | dev_hold(dev); | |
1da177e4 LT |
3508 | write_unlock_bh(&dev_base_lock); |
3509 | ||
3510 | /* Notify protocols, that a new device appeared. */ | |
fcc5a03a HX |
3511 | ret = raw_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev); |
3512 | ret = notifier_to_errno(ret); | |
3513 | if (ret) | |
3514 | unregister_netdevice(dev); | |
1da177e4 LT |
3515 | |
3516 | out: | |
3517 | return ret; | |
7ce1b0ed HX |
3518 | |
3519 | err_uninit: | |
3520 | if (dev->uninit) | |
3521 | dev->uninit(dev); | |
3522 | goto out; | |
1da177e4 LT |
3523 | } |
3524 | ||
3525 | /** | |
3526 | * register_netdev - register a network device | |
3527 | * @dev: device to register | |
3528 | * | |
3529 | * Take a completed network device structure and add it to the kernel | |
3530 | * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier | |
3531 | * chain. 0 is returned on success. A negative errno code is returned | |
3532 | * on a failure to set up the device, or if the name is a duplicate. | |
3533 | * | |
38b4da38 | 3534 | * This is a wrapper around register_netdevice that takes the rtnl semaphore |
1da177e4 LT |
3535 | * and expands the device name if you passed a format string to |
3536 | * alloc_netdev. | |
3537 | */ | |
3538 | int register_netdev(struct net_device *dev) | |
3539 | { | |
3540 | int err; | |
3541 | ||
3542 | rtnl_lock(); | |
3543 | ||
3544 | /* | |
3545 | * If the name is a format string the caller wants us to do a | |
3546 | * name allocation. | |
3547 | */ | |
3548 | if (strchr(dev->name, '%')) { | |
3549 | err = dev_alloc_name(dev, dev->name); | |
3550 | if (err < 0) | |
3551 | goto out; | |
3552 | } | |
4ec93edb | 3553 | |
1da177e4 LT |
3554 | err = register_netdevice(dev); |
3555 | out: | |
3556 | rtnl_unlock(); | |
3557 | return err; | |
3558 | } | |
3559 | EXPORT_SYMBOL(register_netdev); | |
3560 | ||
3561 | /* | |
3562 | * netdev_wait_allrefs - wait until all references are gone. | |
3563 | * | |
3564 | * This is called when unregistering network devices. | |
3565 | * | |
3566 | * Any protocol or device that holds a reference should register | |
3567 | * for netdevice notification, and cleanup and put back the | |
3568 | * reference if they receive an UNREGISTER event. | |
3569 | * We can get stuck here if buggy protocols don't correctly | |
4ec93edb | 3570 | * call dev_put. |
1da177e4 LT |
3571 | */ |
3572 | static void netdev_wait_allrefs(struct net_device *dev) | |
3573 | { | |
3574 | unsigned long rebroadcast_time, warning_time; | |
3575 | ||
3576 | rebroadcast_time = warning_time = jiffies; | |
3577 | while (atomic_read(&dev->refcnt) != 0) { | |
3578 | if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { | |
6756ae4b | 3579 | rtnl_lock(); |
1da177e4 LT |
3580 | |
3581 | /* Rebroadcast unregister notification */ | |
f07d5b94 | 3582 | raw_notifier_call_chain(&netdev_chain, |
1da177e4 LT |
3583 | NETDEV_UNREGISTER, dev); |
3584 | ||
3585 | if (test_bit(__LINK_STATE_LINKWATCH_PENDING, | |
3586 | &dev->state)) { | |
3587 | /* We must not have linkwatch events | |
3588 | * pending on unregister. If this | |
3589 | * happens, we simply run the queue | |
3590 | * unscheduled, resulting in a noop | |
3591 | * for this device. | |
3592 | */ | |
3593 | linkwatch_run_queue(); | |
3594 | } | |
3595 | ||
6756ae4b | 3596 | __rtnl_unlock(); |
1da177e4 LT |
3597 | |
3598 | rebroadcast_time = jiffies; | |
3599 | } | |
3600 | ||
3601 | msleep(250); | |
3602 | ||
3603 | if (time_after(jiffies, warning_time + 10 * HZ)) { | |
3604 | printk(KERN_EMERG "unregister_netdevice: " | |
3605 | "waiting for %s to become free. Usage " | |
3606 | "count = %d\n", | |
3607 | dev->name, atomic_read(&dev->refcnt)); | |
3608 | warning_time = jiffies; | |
3609 | } | |
3610 | } | |
3611 | } | |
3612 | ||
3613 | /* The sequence is: | |
3614 | * | |
3615 | * rtnl_lock(); | |
3616 | * ... | |
3617 | * register_netdevice(x1); | |
3618 | * register_netdevice(x2); | |
3619 | * ... | |
3620 | * unregister_netdevice(y1); | |
3621 | * unregister_netdevice(y2); | |
3622 | * ... | |
3623 | * rtnl_unlock(); | |
3624 | * free_netdev(y1); | |
3625 | * free_netdev(y2); | |
3626 | * | |
3627 | * We are invoked by rtnl_unlock() after it drops the semaphore. | |
3628 | * This allows us to deal with problems: | |
b17a7c17 | 3629 | * 1) We can delete sysfs objects which invoke hotplug |
1da177e4 LT |
3630 | * without deadlocking with linkwatch via keventd. |
3631 | * 2) Since we run with the RTNL semaphore not held, we can sleep | |
3632 | * safely in order to wait for the netdev refcnt to drop to zero. | |
3633 | */ | |
4a3e2f71 | 3634 | static DEFINE_MUTEX(net_todo_run_mutex); |
1da177e4 LT |
3635 | void netdev_run_todo(void) |
3636 | { | |
626ab0e6 | 3637 | struct list_head list; |
1da177e4 LT |
3638 | |
3639 | /* Need to guard against multiple cpu's getting out of order. */ | |
4a3e2f71 | 3640 | mutex_lock(&net_todo_run_mutex); |
1da177e4 LT |
3641 | |
3642 | /* Not safe to do outside the semaphore. We must not return | |
3643 | * until all unregister events invoked by the local processor | |
3644 | * have been completed (either by this todo run, or one on | |
3645 | * another cpu). | |
3646 | */ | |
3647 | if (list_empty(&net_todo_list)) | |
3648 | goto out; | |
3649 | ||
3650 | /* Snapshot list, allow later requests */ | |
3651 | spin_lock(&net_todo_list_lock); | |
626ab0e6 | 3652 | list_replace_init(&net_todo_list, &list); |
1da177e4 | 3653 | spin_unlock(&net_todo_list_lock); |
626ab0e6 | 3654 | |
1da177e4 LT |
3655 | while (!list_empty(&list)) { |
3656 | struct net_device *dev | |
3657 | = list_entry(list.next, struct net_device, todo_list); | |
3658 | list_del(&dev->todo_list); | |
3659 | ||
b17a7c17 SH |
3660 | if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { |
3661 | printk(KERN_ERR "network todo '%s' but state %d\n", | |
3662 | dev->name, dev->reg_state); | |
3663 | dump_stack(); | |
3664 | continue; | |
3665 | } | |
1da177e4 | 3666 | |
b17a7c17 | 3667 | dev->reg_state = NETREG_UNREGISTERED; |
1da177e4 | 3668 | |
b17a7c17 | 3669 | netdev_wait_allrefs(dev); |
1da177e4 | 3670 | |
b17a7c17 SH |
3671 | /* paranoia */ |
3672 | BUG_ON(atomic_read(&dev->refcnt)); | |
3673 | BUG_TRAP(!dev->ip_ptr); | |
3674 | BUG_TRAP(!dev->ip6_ptr); | |
3675 | BUG_TRAP(!dev->dn_ptr); | |
1da177e4 | 3676 | |
b17a7c17 SH |
3677 | if (dev->destructor) |
3678 | dev->destructor(dev); | |
9093bbb2 SH |
3679 | |
3680 | /* Free network device */ | |
3681 | kobject_put(&dev->dev.kobj); | |
1da177e4 LT |
3682 | } |
3683 | ||
3684 | out: | |
4a3e2f71 | 3685 | mutex_unlock(&net_todo_run_mutex); |
1da177e4 LT |
3686 | } |
3687 | ||
5a1b5898 | 3688 | static struct net_device_stats *internal_stats(struct net_device *dev) |
c45d286e | 3689 | { |
5a1b5898 | 3690 | return &dev->stats; |
c45d286e RR |
3691 | } |
3692 | ||
1da177e4 | 3693 | /** |
f25f4e44 | 3694 | * alloc_netdev_mq - allocate network device |
1da177e4 LT |
3695 | * @sizeof_priv: size of private data to allocate space for |
3696 | * @name: device name format string | |
3697 | * @setup: callback to initialize device | |
f25f4e44 | 3698 | * @queue_count: the number of subqueues to allocate |
1da177e4 LT |
3699 | * |
3700 | * Allocates a struct net_device with private data area for driver use | |
f25f4e44 PWJ |
3701 | * and performs basic initialization. Also allocates subquue structs |
3702 | * for each queue on the device at the end of the netdevice. | |
1da177e4 | 3703 | */ |
f25f4e44 PWJ |
3704 | struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, |
3705 | void (*setup)(struct net_device *), unsigned int queue_count) | |
1da177e4 LT |
3706 | { |
3707 | void *p; | |
3708 | struct net_device *dev; | |
3709 | int alloc_size; | |
3710 | ||
b6fe17d6 SH |
3711 | BUG_ON(strlen(name) >= sizeof(dev->name)); |
3712 | ||
1da177e4 | 3713 | /* ensure 32-byte alignment of both the device and private area */ |
f25f4e44 | 3714 | alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST + |
31ce72a6 | 3715 | (sizeof(struct net_device_subqueue) * (queue_count - 1))) & |
f25f4e44 | 3716 | ~NETDEV_ALIGN_CONST; |
1da177e4 LT |
3717 | alloc_size += sizeof_priv + NETDEV_ALIGN_CONST; |
3718 | ||
31380de9 | 3719 | p = kzalloc(alloc_size, GFP_KERNEL); |
1da177e4 | 3720 | if (!p) { |
b6fe17d6 | 3721 | printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n"); |
1da177e4 LT |
3722 | return NULL; |
3723 | } | |
1da177e4 LT |
3724 | |
3725 | dev = (struct net_device *) | |
3726 | (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); | |
3727 | dev->padded = (char *)dev - (char *)p; | |
3728 | ||
f25f4e44 PWJ |
3729 | if (sizeof_priv) { |
3730 | dev->priv = ((char *)dev + | |
3731 | ((sizeof(struct net_device) + | |
3732 | (sizeof(struct net_device_subqueue) * | |
31ce72a6 | 3733 | (queue_count - 1)) + NETDEV_ALIGN_CONST) |
f25f4e44 PWJ |
3734 | & ~NETDEV_ALIGN_CONST)); |
3735 | } | |
3736 | ||
3737 | dev->egress_subqueue_count = queue_count; | |
1da177e4 | 3738 | |
5a1b5898 | 3739 | dev->get_stats = internal_stats; |
bea3348e | 3740 | netpoll_netdev_init(dev); |
1da177e4 LT |
3741 | setup(dev); |
3742 | strcpy(dev->name, name); | |
3743 | return dev; | |
3744 | } | |
f25f4e44 | 3745 | EXPORT_SYMBOL(alloc_netdev_mq); |
1da177e4 LT |
3746 | |
3747 | /** | |
3748 | * free_netdev - free network device | |
3749 | * @dev: device | |
3750 | * | |
4ec93edb YH |
3751 | * This function does the last stage of destroying an allocated device |
3752 | * interface. The reference to the device object is released. | |
1da177e4 LT |
3753 | * If this is the last reference then it will be freed. |
3754 | */ | |
3755 | void free_netdev(struct net_device *dev) | |
3756 | { | |
3757 | #ifdef CONFIG_SYSFS | |
3041a069 | 3758 | /* Compatibility with error handling in drivers */ |
1da177e4 LT |
3759 | if (dev->reg_state == NETREG_UNINITIALIZED) { |
3760 | kfree((char *)dev - dev->padded); | |
3761 | return; | |
3762 | } | |
3763 | ||
3764 | BUG_ON(dev->reg_state != NETREG_UNREGISTERED); | |
3765 | dev->reg_state = NETREG_RELEASED; | |
3766 | ||
43cb76d9 GKH |
3767 | /* will free via device release */ |
3768 | put_device(&dev->dev); | |
1da177e4 LT |
3769 | #else |
3770 | kfree((char *)dev - dev->padded); | |
3771 | #endif | |
3772 | } | |
4ec93edb | 3773 | |
1da177e4 | 3774 | /* Synchronize with packet receive processing. */ |
4ec93edb | 3775 | void synchronize_net(void) |
1da177e4 LT |
3776 | { |
3777 | might_sleep(); | |
fbd568a3 | 3778 | synchronize_rcu(); |
1da177e4 LT |
3779 | } |
3780 | ||
3781 | /** | |
3782 | * unregister_netdevice - remove device from the kernel | |
3783 | * @dev: device | |
3784 | * | |
3785 | * This function shuts down a device interface and removes it | |
3786 | * from the kernel tables. On success 0 is returned, on a failure | |
3787 | * a negative errno code is returned. | |
3788 | * | |
3789 | * Callers must hold the rtnl semaphore. You may want | |
3790 | * unregister_netdev() instead of this. | |
3791 | */ | |
3792 | ||
22f8cde5 | 3793 | void unregister_netdevice(struct net_device *dev) |
1da177e4 | 3794 | { |
1da177e4 LT |
3795 | BUG_ON(dev_boot_phase); |
3796 | ASSERT_RTNL(); | |
3797 | ||
3798 | /* Some devices call without registering for initialization unwind. */ | |
3799 | if (dev->reg_state == NETREG_UNINITIALIZED) { | |
3800 | printk(KERN_DEBUG "unregister_netdevice: device %s/%p never " | |
3801 | "was registered\n", dev->name, dev); | |
22f8cde5 SH |
3802 | |
3803 | WARN_ON(1); | |
3804 | return; | |
1da177e4 LT |
3805 | } |
3806 | ||
3807 | BUG_ON(dev->reg_state != NETREG_REGISTERED); | |
3808 | ||
3809 | /* If device is running, close it first. */ | |
3810 | if (dev->flags & IFF_UP) | |
3811 | dev_close(dev); | |
3812 | ||
3813 | /* And unlink it from device chain. */ | |
7562f876 PE |
3814 | write_lock_bh(&dev_base_lock); |
3815 | list_del(&dev->dev_list); | |
3816 | hlist_del(&dev->name_hlist); | |
3817 | hlist_del(&dev->index_hlist); | |
3818 | write_unlock_bh(&dev_base_lock); | |
1da177e4 LT |
3819 | |
3820 | dev->reg_state = NETREG_UNREGISTERING; | |
3821 | ||
3822 | synchronize_net(); | |
3823 | ||
3824 | /* Shutdown queueing discipline. */ | |
3825 | dev_shutdown(dev); | |
3826 | ||
4ec93edb | 3827 | |
1da177e4 LT |
3828 | /* Notify protocols, that we are about to destroy |
3829 | this device. They should clean all the things. | |
3830 | */ | |
f07d5b94 | 3831 | raw_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev); |
4ec93edb | 3832 | |
1da177e4 | 3833 | /* |
4417da66 | 3834 | * Flush the unicast and multicast chains |
1da177e4 | 3835 | */ |
26cc2522 | 3836 | dev_addr_discard(dev); |
1da177e4 LT |
3837 | |
3838 | if (dev->uninit) | |
3839 | dev->uninit(dev); | |
3840 | ||
3841 | /* Notifier chain MUST detach us from master device. */ | |
3842 | BUG_TRAP(!dev->master); | |
3843 | ||
9093bbb2 SH |
3844 | /* Remove entries from sysfs */ |
3845 | netdev_unregister_sysfs(dev); | |
3846 | ||
1da177e4 LT |
3847 | /* Finish processing unregister after unlock */ |
3848 | net_set_todo(dev); | |
3849 | ||
3850 | synchronize_net(); | |
3851 | ||
3852 | dev_put(dev); | |
1da177e4 LT |
3853 | } |
3854 | ||
3855 | /** | |
3856 | * unregister_netdev - remove device from the kernel | |
3857 | * @dev: device | |
3858 | * | |
3859 | * This function shuts down a device interface and removes it | |
3860 | * from the kernel tables. On success 0 is returned, on a failure | |
3861 | * a negative errno code is returned. | |
3862 | * | |
3863 | * This is just a wrapper for unregister_netdevice that takes | |
3864 | * the rtnl semaphore. In general you want to use this and not | |
3865 | * unregister_netdevice. | |
3866 | */ | |
3867 | void unregister_netdev(struct net_device *dev) | |
3868 | { | |
3869 | rtnl_lock(); | |
3870 | unregister_netdevice(dev); | |
3871 | rtnl_unlock(); | |
3872 | } | |
3873 | ||
3874 | EXPORT_SYMBOL(unregister_netdev); | |
3875 | ||
1da177e4 LT |
3876 | static int dev_cpu_callback(struct notifier_block *nfb, |
3877 | unsigned long action, | |
3878 | void *ocpu) | |
3879 | { | |
3880 | struct sk_buff **list_skb; | |
3881 | struct net_device **list_net; | |
3882 | struct sk_buff *skb; | |
3883 | unsigned int cpu, oldcpu = (unsigned long)ocpu; | |
3884 | struct softnet_data *sd, *oldsd; | |
3885 | ||
8bb78442 | 3886 | if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) |
1da177e4 LT |
3887 | return NOTIFY_OK; |
3888 | ||
3889 | local_irq_disable(); | |
3890 | cpu = smp_processor_id(); | |
3891 | sd = &per_cpu(softnet_data, cpu); | |
3892 | oldsd = &per_cpu(softnet_data, oldcpu); | |
3893 | ||
3894 | /* Find end of our completion_queue. */ | |
3895 | list_skb = &sd->completion_queue; | |
3896 | while (*list_skb) | |
3897 | list_skb = &(*list_skb)->next; | |
3898 | /* Append completion queue from offline CPU. */ | |
3899 | *list_skb = oldsd->completion_queue; | |
3900 | oldsd->completion_queue = NULL; | |
3901 | ||
3902 | /* Find end of our output_queue. */ | |
3903 | list_net = &sd->output_queue; | |
3904 | while (*list_net) | |
3905 | list_net = &(*list_net)->next_sched; | |
3906 | /* Append output queue from offline CPU. */ | |
3907 | *list_net = oldsd->output_queue; | |
3908 | oldsd->output_queue = NULL; | |
3909 | ||
3910 | raise_softirq_irqoff(NET_TX_SOFTIRQ); | |
3911 | local_irq_enable(); | |
3912 | ||
3913 | /* Process offline CPU's input_pkt_queue */ | |
3914 | while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) | |
3915 | netif_rx(skb); | |
3916 | ||
3917 | return NOTIFY_OK; | |
3918 | } | |
1da177e4 | 3919 | |
db217334 CL |
3920 | #ifdef CONFIG_NET_DMA |
3921 | /** | |
0ed72ec4 RD |
3922 | * net_dma_rebalance - try to maintain one DMA channel per CPU |
3923 | * @net_dma: DMA client and associated data (lock, channels, channel_mask) | |
3924 | * | |
3925 | * This is called when the number of channels allocated to the net_dma client | |
3926 | * changes. The net_dma client tries to have one DMA channel per CPU. | |
db217334 | 3927 | */ |
d379b01e DW |
3928 | |
3929 | static void net_dma_rebalance(struct net_dma *net_dma) | |
db217334 | 3930 | { |
d379b01e | 3931 | unsigned int cpu, i, n, chan_idx; |
db217334 CL |
3932 | struct dma_chan *chan; |
3933 | ||
d379b01e | 3934 | if (cpus_empty(net_dma->channel_mask)) { |
db217334 | 3935 | for_each_online_cpu(cpu) |
29bbd72d | 3936 | rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL); |
db217334 CL |
3937 | return; |
3938 | } | |
3939 | ||
3940 | i = 0; | |
3941 | cpu = first_cpu(cpu_online_map); | |
3942 | ||
d379b01e DW |
3943 | for_each_cpu_mask(chan_idx, net_dma->channel_mask) { |
3944 | chan = net_dma->channels[chan_idx]; | |
3945 | ||
3946 | n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) | |
3947 | + (i < (num_online_cpus() % | |
3948 | cpus_weight(net_dma->channel_mask)) ? 1 : 0)); | |
db217334 CL |
3949 | |
3950 | while(n) { | |
29bbd72d | 3951 | per_cpu(softnet_data, cpu).net_dma = chan; |
db217334 CL |
3952 | cpu = next_cpu(cpu, cpu_online_map); |
3953 | n--; | |
3954 | } | |
3955 | i++; | |
3956 | } | |
db217334 CL |
3957 | } |
3958 | ||
3959 | /** | |
3960 | * netdev_dma_event - event callback for the net_dma_client | |
3961 | * @client: should always be net_dma_client | |
f4b8ea78 | 3962 | * @chan: DMA channel for the event |
0ed72ec4 | 3963 | * @state: DMA state to be handled |
db217334 | 3964 | */ |
d379b01e DW |
3965 | static enum dma_state_client |
3966 | netdev_dma_event(struct dma_client *client, struct dma_chan *chan, | |
3967 | enum dma_state state) | |
3968 | { | |
3969 | int i, found = 0, pos = -1; | |
3970 | struct net_dma *net_dma = | |
3971 | container_of(client, struct net_dma, client); | |
3972 | enum dma_state_client ack = DMA_DUP; /* default: take no action */ | |
3973 | ||
3974 | spin_lock(&net_dma->lock); | |
3975 | switch (state) { | |
3976 | case DMA_RESOURCE_AVAILABLE: | |
3977 | for (i = 0; i < NR_CPUS; i++) | |
3978 | if (net_dma->channels[i] == chan) { | |
3979 | found = 1; | |
3980 | break; | |
3981 | } else if (net_dma->channels[i] == NULL && pos < 0) | |
3982 | pos = i; | |
3983 | ||
3984 | if (!found && pos >= 0) { | |
3985 | ack = DMA_ACK; | |
3986 | net_dma->channels[pos] = chan; | |
3987 | cpu_set(pos, net_dma->channel_mask); | |
3988 | net_dma_rebalance(net_dma); | |
3989 | } | |
db217334 CL |
3990 | break; |
3991 | case DMA_RESOURCE_REMOVED: | |
d379b01e DW |
3992 | for (i = 0; i < NR_CPUS; i++) |
3993 | if (net_dma->channels[i] == chan) { | |
3994 | found = 1; | |
3995 | pos = i; | |
3996 | break; | |
3997 | } | |
3998 | ||
3999 | if (found) { | |
4000 | ack = DMA_ACK; | |
4001 | cpu_clear(pos, net_dma->channel_mask); | |
4002 | net_dma->channels[i] = NULL; | |
4003 | net_dma_rebalance(net_dma); | |
4004 | } | |
db217334 CL |
4005 | break; |
4006 | default: | |
4007 | break; | |
4008 | } | |
d379b01e DW |
4009 | spin_unlock(&net_dma->lock); |
4010 | ||
4011 | return ack; | |
db217334 CL |
4012 | } |
4013 | ||
4014 | /** | |
4015 | * netdev_dma_regiser - register the networking subsystem as a DMA client | |
4016 | */ | |
4017 | static int __init netdev_dma_register(void) | |
4018 | { | |
d379b01e DW |
4019 | spin_lock_init(&net_dma.lock); |
4020 | dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask); | |
4021 | dma_async_client_register(&net_dma.client); | |
4022 | dma_async_client_chan_request(&net_dma.client); | |
db217334 CL |
4023 | return 0; |
4024 | } | |
4025 | ||
4026 | #else | |
4027 | static int __init netdev_dma_register(void) { return -ENODEV; } | |
4028 | #endif /* CONFIG_NET_DMA */ | |
1da177e4 | 4029 | |
7f353bf2 HX |
4030 | /** |
4031 | * netdev_compute_feature - compute conjunction of two feature sets | |
4032 | * @all: first feature set | |
4033 | * @one: second feature set | |
4034 | * | |
4035 | * Computes a new feature set after adding a device with feature set | |
4036 | * @one to the master device with current feature set @all. Returns | |
4037 | * the new feature set. | |
4038 | */ | |
4039 | int netdev_compute_features(unsigned long all, unsigned long one) | |
4040 | { | |
4041 | /* if device needs checksumming, downgrade to hw checksumming */ | |
4042 | if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM)) | |
4043 | all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM; | |
4044 | ||
4045 | /* if device can't do all checksum, downgrade to ipv4/ipv6 */ | |
4046 | if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM)) | |
4047 | all ^= NETIF_F_HW_CSUM | |
4048 | | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | |
4049 | ||
4050 | if (one & NETIF_F_GSO) | |
4051 | one |= NETIF_F_GSO_SOFTWARE; | |
4052 | one |= NETIF_F_GSO; | |
4053 | ||
4054 | /* If even one device supports robust GSO, enable it for all. */ | |
4055 | if (one & NETIF_F_GSO_ROBUST) | |
4056 | all |= NETIF_F_GSO_ROBUST; | |
4057 | ||
4058 | all &= one | NETIF_F_LLTX; | |
4059 | ||
4060 | if (!(all & NETIF_F_ALL_CSUM)) | |
4061 | all &= ~NETIF_F_SG; | |
4062 | if (!(all & NETIF_F_SG)) | |
4063 | all &= ~NETIF_F_GSO_MASK; | |
4064 | ||
4065 | return all; | |
4066 | } | |
4067 | EXPORT_SYMBOL(netdev_compute_features); | |
4068 | ||
1da177e4 LT |
4069 | /* |
4070 | * Initialize the DEV module. At boot time this walks the device list and | |
4071 | * unhooks any devices that fail to initialise (normally hardware not | |
4072 | * present) and leaves us with a valid list of present and active devices. | |
4073 | * | |
4074 | */ | |
4075 | ||
4076 | /* | |
4077 | * This is called single threaded during boot, so no need | |
4078 | * to take the rtnl semaphore. | |
4079 | */ | |
4080 | static int __init net_dev_init(void) | |
4081 | { | |
4082 | int i, rc = -ENOMEM; | |
4083 | ||
4084 | BUG_ON(!dev_boot_phase); | |
4085 | ||
1da177e4 LT |
4086 | if (dev_proc_init()) |
4087 | goto out; | |
4088 | ||
4089 | if (netdev_sysfs_init()) | |
4090 | goto out; | |
4091 | ||
4092 | INIT_LIST_HEAD(&ptype_all); | |
4ec93edb | 4093 | for (i = 0; i < 16; i++) |
1da177e4 LT |
4094 | INIT_LIST_HEAD(&ptype_base[i]); |
4095 | ||
4096 | for (i = 0; i < ARRAY_SIZE(dev_name_head); i++) | |
4097 | INIT_HLIST_HEAD(&dev_name_head[i]); | |
4098 | ||
4099 | for (i = 0; i < ARRAY_SIZE(dev_index_head); i++) | |
4100 | INIT_HLIST_HEAD(&dev_index_head[i]); | |
4101 | ||
4102 | /* | |
4103 | * Initialise the packet receive queues. | |
4104 | */ | |
4105 | ||
6f912042 | 4106 | for_each_possible_cpu(i) { |
1da177e4 LT |
4107 | struct softnet_data *queue; |
4108 | ||
4109 | queue = &per_cpu(softnet_data, i); | |
4110 | skb_queue_head_init(&queue->input_pkt_queue); | |
1da177e4 LT |
4111 | queue->completion_queue = NULL; |
4112 | INIT_LIST_HEAD(&queue->poll_list); | |
bea3348e SH |
4113 | |
4114 | queue->backlog.poll = process_backlog; | |
4115 | queue->backlog.weight = weight_p; | |
1da177e4 LT |
4116 | } |
4117 | ||
db217334 CL |
4118 | netdev_dma_register(); |
4119 | ||
1da177e4 LT |
4120 | dev_boot_phase = 0; |
4121 | ||
4122 | open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL); | |
4123 | open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL); | |
4124 | ||
4125 | hotcpu_notifier(dev_cpu_callback, 0); | |
4126 | dst_init(); | |
4127 | dev_mcast_init(); | |
4128 | rc = 0; | |
4129 | out: | |
4130 | return rc; | |
4131 | } | |
4132 | ||
4133 | subsys_initcall(net_dev_init); | |
4134 | ||
4135 | EXPORT_SYMBOL(__dev_get_by_index); | |
4136 | EXPORT_SYMBOL(__dev_get_by_name); | |
4137 | EXPORT_SYMBOL(__dev_remove_pack); | |
c2373ee9 | 4138 | EXPORT_SYMBOL(dev_valid_name); |
1da177e4 LT |
4139 | EXPORT_SYMBOL(dev_add_pack); |
4140 | EXPORT_SYMBOL(dev_alloc_name); | |
4141 | EXPORT_SYMBOL(dev_close); | |
4142 | EXPORT_SYMBOL(dev_get_by_flags); | |
4143 | EXPORT_SYMBOL(dev_get_by_index); | |
4144 | EXPORT_SYMBOL(dev_get_by_name); | |
1da177e4 LT |
4145 | EXPORT_SYMBOL(dev_open); |
4146 | EXPORT_SYMBOL(dev_queue_xmit); | |
4147 | EXPORT_SYMBOL(dev_remove_pack); | |
4148 | EXPORT_SYMBOL(dev_set_allmulti); | |
4149 | EXPORT_SYMBOL(dev_set_promiscuity); | |
4150 | EXPORT_SYMBOL(dev_change_flags); | |
4151 | EXPORT_SYMBOL(dev_set_mtu); | |
4152 | EXPORT_SYMBOL(dev_set_mac_address); | |
4153 | EXPORT_SYMBOL(free_netdev); | |
4154 | EXPORT_SYMBOL(netdev_boot_setup_check); | |
4155 | EXPORT_SYMBOL(netdev_set_master); | |
4156 | EXPORT_SYMBOL(netdev_state_change); | |
4157 | EXPORT_SYMBOL(netif_receive_skb); | |
4158 | EXPORT_SYMBOL(netif_rx); | |
4159 | EXPORT_SYMBOL(register_gifconf); | |
4160 | EXPORT_SYMBOL(register_netdevice); | |
4161 | EXPORT_SYMBOL(register_netdevice_notifier); | |
4162 | EXPORT_SYMBOL(skb_checksum_help); | |
4163 | EXPORT_SYMBOL(synchronize_net); | |
4164 | EXPORT_SYMBOL(unregister_netdevice); | |
4165 | EXPORT_SYMBOL(unregister_netdevice_notifier); | |
4166 | EXPORT_SYMBOL(net_enable_timestamp); | |
4167 | EXPORT_SYMBOL(net_disable_timestamp); | |
4168 | EXPORT_SYMBOL(dev_get_flags); | |
4169 | ||
4170 | #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) | |
4171 | EXPORT_SYMBOL(br_handle_frame_hook); | |
4172 | EXPORT_SYMBOL(br_fdb_get_hook); | |
4173 | EXPORT_SYMBOL(br_fdb_put_hook); | |
4174 | #endif | |
4175 | ||
4176 | #ifdef CONFIG_KMOD | |
4177 | EXPORT_SYMBOL(dev_load); | |
4178 | #endif | |
4179 | ||
4180 | EXPORT_PER_CPU_SYMBOL(softnet_data); |