Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livep...
[deliverable/linux.git] / include / linux / netdevice.h
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Interfaces handler.
7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * Moved to /usr/include/linux for NET3
24 */
25 #ifndef _LINUX_NETDEVICE_H
26 #define _LINUX_NETDEVICE_H
27
28 #include <linux/pm_qos.h>
29 #include <linux/timer.h>
30 #include <linux/bug.h>
31 #include <linux/delay.h>
32 #include <linux/atomic.h>
33 #include <linux/prefetch.h>
34 #include <asm/cache.h>
35 #include <asm/byteorder.h>
36
37 #include <linux/percpu.h>
38 #include <linux/rculist.h>
39 #include <linux/dmaengine.h>
40 #include <linux/workqueue.h>
41 #include <linux/dynamic_queue_limits.h>
42
43 #include <linux/ethtool.h>
44 #include <net/net_namespace.h>
45 #include <net/dsa.h>
46 #ifdef CONFIG_DCB
47 #include <net/dcbnl.h>
48 #endif
49 #include <net/netprio_cgroup.h>
50
51 #include <linux/netdev_features.h>
52 #include <linux/neighbour.h>
53 #include <uapi/linux/netdevice.h>
54 #include <uapi/linux/if_bonding.h>
55
56 struct netpoll_info;
57 struct device;
58 struct phy_device;
59 /* 802.11 specific */
60 struct wireless_dev;
61 /* 802.15.4 specific */
62 struct wpan_dev;
63
64 void netdev_set_default_ethtool_ops(struct net_device *dev,
65 const struct ethtool_ops *ops);
66
67 /* Backlog congestion levels */
68 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
69 #define NET_RX_DROP 1 /* packet dropped */
70
71 /*
72 * Transmit return codes: transmit return codes originate from three different
73 * namespaces:
74 *
75 * - qdisc return codes
76 * - driver transmit return codes
77 * - errno values
78 *
79 * Drivers are allowed to return any one of those in their hard_start_xmit()
80 * function. Real network devices commonly used with qdiscs should only return
81 * the driver transmit return codes though - when qdiscs are used, the actual
82 * transmission happens asynchronously, so the value is not propagated to
83 * higher layers. Virtual network devices transmit synchronously, in this case
84 * the driver transmit return codes are consumed by dev_queue_xmit(), all
85 * others are propagated to higher layers.
86 */
87
88 /* qdisc ->enqueue() return codes. */
89 #define NET_XMIT_SUCCESS 0x00
90 #define NET_XMIT_DROP 0x01 /* skb dropped */
91 #define NET_XMIT_CN 0x02 /* congestion notification */
92 #define NET_XMIT_POLICED 0x03 /* skb is shot by police */
93 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
94
95 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
96 * indicates that the device will soon be dropping packets, or already drops
97 * some packets of the same priority; prompting us to send less aggressively. */
98 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
99 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
100
101 /* Driver transmit return codes */
102 #define NETDEV_TX_MASK 0xf0
103
104 enum netdev_tx {
105 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
106 NETDEV_TX_OK = 0x00, /* driver took care of packet */
107 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
108 NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
109 };
110 typedef enum netdev_tx netdev_tx_t;
111
112 /*
113 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
114 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
115 */
116 static inline bool dev_xmit_complete(int rc)
117 {
118 /*
119 * Positive cases with an skb consumed by a driver:
120 * - successful transmission (rc == NETDEV_TX_OK)
121 * - error while transmitting (rc < 0)
122 * - error while queueing to a different device (rc & NET_XMIT_MASK)
123 */
124 if (likely(rc < NET_XMIT_MASK))
125 return true;
126
127 return false;
128 }
129
130 /*
131 * Compute the worst case header length according to the protocols
132 * used.
133 */
134
135 #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
136 # if defined(CONFIG_MAC80211_MESH)
137 # define LL_MAX_HEADER 128
138 # else
139 # define LL_MAX_HEADER 96
140 # endif
141 #else
142 # define LL_MAX_HEADER 32
143 #endif
144
145 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
146 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
147 #define MAX_HEADER LL_MAX_HEADER
148 #else
149 #define MAX_HEADER (LL_MAX_HEADER + 48)
150 #endif
151
152 /*
153 * Old network device statistics. Fields are native words
154 * (unsigned long) so they can be read and written atomically.
155 */
156
157 struct net_device_stats {
158 unsigned long rx_packets;
159 unsigned long tx_packets;
160 unsigned long rx_bytes;
161 unsigned long tx_bytes;
162 unsigned long rx_errors;
163 unsigned long tx_errors;
164 unsigned long rx_dropped;
165 unsigned long tx_dropped;
166 unsigned long multicast;
167 unsigned long collisions;
168 unsigned long rx_length_errors;
169 unsigned long rx_over_errors;
170 unsigned long rx_crc_errors;
171 unsigned long rx_frame_errors;
172 unsigned long rx_fifo_errors;
173 unsigned long rx_missed_errors;
174 unsigned long tx_aborted_errors;
175 unsigned long tx_carrier_errors;
176 unsigned long tx_fifo_errors;
177 unsigned long tx_heartbeat_errors;
178 unsigned long tx_window_errors;
179 unsigned long rx_compressed;
180 unsigned long tx_compressed;
181 };
182
183
184 #include <linux/cache.h>
185 #include <linux/skbuff.h>
186
187 #ifdef CONFIG_RPS
188 #include <linux/static_key.h>
189 extern struct static_key rps_needed;
190 #endif
191
192 struct neighbour;
193 struct neigh_parms;
194 struct sk_buff;
195
196 struct netdev_hw_addr {
197 struct list_head list;
198 unsigned char addr[MAX_ADDR_LEN];
199 unsigned char type;
200 #define NETDEV_HW_ADDR_T_LAN 1
201 #define NETDEV_HW_ADDR_T_SAN 2
202 #define NETDEV_HW_ADDR_T_SLAVE 3
203 #define NETDEV_HW_ADDR_T_UNICAST 4
204 #define NETDEV_HW_ADDR_T_MULTICAST 5
205 bool global_use;
206 int sync_cnt;
207 int refcount;
208 int synced;
209 struct rcu_head rcu_head;
210 };
211
212 struct netdev_hw_addr_list {
213 struct list_head list;
214 int count;
215 };
216
217 #define netdev_hw_addr_list_count(l) ((l)->count)
218 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
219 #define netdev_hw_addr_list_for_each(ha, l) \
220 list_for_each_entry(ha, &(l)->list, list)
221
222 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
223 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
224 #define netdev_for_each_uc_addr(ha, dev) \
225 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
226
227 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
228 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
229 #define netdev_for_each_mc_addr(ha, dev) \
230 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
231
232 struct hh_cache {
233 u16 hh_len;
234 u16 __pad;
235 seqlock_t hh_lock;
236
237 /* cached hardware header; allow for machine alignment needs. */
238 #define HH_DATA_MOD 16
239 #define HH_DATA_OFF(__len) \
240 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
241 #define HH_DATA_ALIGN(__len) \
242 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
243 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
244 };
245
246 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
247 * Alternative is:
248 * dev->hard_header_len ? (dev->hard_header_len +
249 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
250 *
251 * We could use other alignment values, but we must maintain the
252 * relationship HH alignment <= LL alignment.
253 */
254 #define LL_RESERVED_SPACE(dev) \
255 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
256 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
257 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
258
259 struct header_ops {
260 int (*create) (struct sk_buff *skb, struct net_device *dev,
261 unsigned short type, const void *daddr,
262 const void *saddr, unsigned int len);
263 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
264 int (*rebuild)(struct sk_buff *skb);
265 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
266 void (*cache_update)(struct hh_cache *hh,
267 const struct net_device *dev,
268 const unsigned char *haddr);
269 };
270
271 /* These flag bits are private to the generic network queueing
272 * layer, they may not be explicitly referenced by any other
273 * code.
274 */
275
276 enum netdev_state_t {
277 __LINK_STATE_START,
278 __LINK_STATE_PRESENT,
279 __LINK_STATE_NOCARRIER,
280 __LINK_STATE_LINKWATCH_PENDING,
281 __LINK_STATE_DORMANT,
282 };
283
284
285 /*
286 * This structure holds at boot time configured netdevice settings. They
287 * are then used in the device probing.
288 */
289 struct netdev_boot_setup {
290 char name[IFNAMSIZ];
291 struct ifmap map;
292 };
293 #define NETDEV_BOOT_SETUP_MAX 8
294
295 int __init netdev_boot_setup(char *str);
296
297 /*
298 * Structure for NAPI scheduling similar to tasklet but with weighting
299 */
300 struct napi_struct {
301 /* The poll_list must only be managed by the entity which
302 * changes the state of the NAPI_STATE_SCHED bit. This means
303 * whoever atomically sets that bit can add this napi_struct
304 * to the per-cpu poll_list, and whoever clears that bit
305 * can remove from the list right before clearing the bit.
306 */
307 struct list_head poll_list;
308
309 unsigned long state;
310 int weight;
311 unsigned int gro_count;
312 int (*poll)(struct napi_struct *, int);
313 #ifdef CONFIG_NETPOLL
314 spinlock_t poll_lock;
315 int poll_owner;
316 #endif
317 struct net_device *dev;
318 struct sk_buff *gro_list;
319 struct sk_buff *skb;
320 struct hrtimer timer;
321 struct list_head dev_list;
322 struct hlist_node napi_hash_node;
323 unsigned int napi_id;
324 };
325
326 enum {
327 NAPI_STATE_SCHED, /* Poll is scheduled */
328 NAPI_STATE_DISABLE, /* Disable pending */
329 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
330 NAPI_STATE_HASHED, /* In NAPI hash */
331 };
332
333 enum gro_result {
334 GRO_MERGED,
335 GRO_MERGED_FREE,
336 GRO_HELD,
337 GRO_NORMAL,
338 GRO_DROP,
339 };
340 typedef enum gro_result gro_result_t;
341
342 /*
343 * enum rx_handler_result - Possible return values for rx_handlers.
344 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
345 * further.
346 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
347 * case skb->dev was changed by rx_handler.
348 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
349 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
350 *
351 * rx_handlers are functions called from inside __netif_receive_skb(), to do
352 * special processing of the skb, prior to delivery to protocol handlers.
353 *
354 * Currently, a net_device can only have a single rx_handler registered. Trying
355 * to register a second rx_handler will return -EBUSY.
356 *
357 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
358 * To unregister a rx_handler on a net_device, use
359 * netdev_rx_handler_unregister().
360 *
361 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
362 * do with the skb.
363 *
364 * If the rx_handler consumed to skb in some way, it should return
365 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
366 * the skb to be delivered in some other ways.
367 *
368 * If the rx_handler changed skb->dev, to divert the skb to another
369 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
370 * new device will be called if it exists.
371 *
372 * If the rx_handler consider the skb should be ignored, it should return
373 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
374 * are registered on exact device (ptype->dev == skb->dev).
375 *
376 * If the rx_handler didn't changed skb->dev, but want the skb to be normally
377 * delivered, it should return RX_HANDLER_PASS.
378 *
379 * A device without a registered rx_handler will behave as if rx_handler
380 * returned RX_HANDLER_PASS.
381 */
382
383 enum rx_handler_result {
384 RX_HANDLER_CONSUMED,
385 RX_HANDLER_ANOTHER,
386 RX_HANDLER_EXACT,
387 RX_HANDLER_PASS,
388 };
389 typedef enum rx_handler_result rx_handler_result_t;
390 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
391
392 void __napi_schedule(struct napi_struct *n);
393 void __napi_schedule_irqoff(struct napi_struct *n);
394
395 static inline bool napi_disable_pending(struct napi_struct *n)
396 {
397 return test_bit(NAPI_STATE_DISABLE, &n->state);
398 }
399
400 /**
401 * napi_schedule_prep - check if napi can be scheduled
402 * @n: napi context
403 *
404 * Test if NAPI routine is already running, and if not mark
405 * it as running. This is used as a condition variable
406 * insure only one NAPI poll instance runs. We also make
407 * sure there is no pending NAPI disable.
408 */
409 static inline bool napi_schedule_prep(struct napi_struct *n)
410 {
411 return !napi_disable_pending(n) &&
412 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
413 }
414
415 /**
416 * napi_schedule - schedule NAPI poll
417 * @n: napi context
418 *
419 * Schedule NAPI poll routine to be called if it is not already
420 * running.
421 */
422 static inline void napi_schedule(struct napi_struct *n)
423 {
424 if (napi_schedule_prep(n))
425 __napi_schedule(n);
426 }
427
428 /**
429 * napi_schedule_irqoff - schedule NAPI poll
430 * @n: napi context
431 *
432 * Variant of napi_schedule(), assuming hard irqs are masked.
433 */
434 static inline void napi_schedule_irqoff(struct napi_struct *n)
435 {
436 if (napi_schedule_prep(n))
437 __napi_schedule_irqoff(n);
438 }
439
440 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
441 static inline bool napi_reschedule(struct napi_struct *napi)
442 {
443 if (napi_schedule_prep(napi)) {
444 __napi_schedule(napi);
445 return true;
446 }
447 return false;
448 }
449
450 void __napi_complete(struct napi_struct *n);
451 void napi_complete_done(struct napi_struct *n, int work_done);
452 /**
453 * napi_complete - NAPI processing complete
454 * @n: napi context
455 *
456 * Mark NAPI processing as complete.
457 * Consider using napi_complete_done() instead.
458 */
459 static inline void napi_complete(struct napi_struct *n)
460 {
461 return napi_complete_done(n, 0);
462 }
463
464 /**
465 * napi_by_id - lookup a NAPI by napi_id
466 * @napi_id: hashed napi_id
467 *
468 * lookup @napi_id in napi_hash table
469 * must be called under rcu_read_lock()
470 */
471 struct napi_struct *napi_by_id(unsigned int napi_id);
472
473 /**
474 * napi_hash_add - add a NAPI to global hashtable
475 * @napi: napi context
476 *
477 * generate a new napi_id and store a @napi under it in napi_hash
478 */
479 void napi_hash_add(struct napi_struct *napi);
480
481 /**
482 * napi_hash_del - remove a NAPI from global table
483 * @napi: napi context
484 *
485 * Warning: caller must observe rcu grace period
486 * before freeing memory containing @napi
487 */
488 void napi_hash_del(struct napi_struct *napi);
489
490 /**
491 * napi_disable - prevent NAPI from scheduling
492 * @n: napi context
493 *
494 * Stop NAPI from being scheduled on this context.
495 * Waits till any outstanding processing completes.
496 */
497 void napi_disable(struct napi_struct *n);
498
499 /**
500 * napi_enable - enable NAPI scheduling
501 * @n: napi context
502 *
503 * Resume NAPI from being scheduled on this context.
504 * Must be paired with napi_disable.
505 */
506 static inline void napi_enable(struct napi_struct *n)
507 {
508 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
509 smp_mb__before_atomic();
510 clear_bit(NAPI_STATE_SCHED, &n->state);
511 }
512
513 #ifdef CONFIG_SMP
514 /**
515 * napi_synchronize - wait until NAPI is not running
516 * @n: napi context
517 *
518 * Wait until NAPI is done being scheduled on this context.
519 * Waits till any outstanding processing completes but
520 * does not disable future activations.
521 */
522 static inline void napi_synchronize(const struct napi_struct *n)
523 {
524 while (test_bit(NAPI_STATE_SCHED, &n->state))
525 msleep(1);
526 }
527 #else
528 # define napi_synchronize(n) barrier()
529 #endif
530
531 enum netdev_queue_state_t {
532 __QUEUE_STATE_DRV_XOFF,
533 __QUEUE_STATE_STACK_XOFF,
534 __QUEUE_STATE_FROZEN,
535 };
536
537 #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
538 #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
539 #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
540
541 #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
542 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
543 QUEUE_STATE_FROZEN)
544 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
545 QUEUE_STATE_FROZEN)
546
547 /*
548 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
549 * netif_tx_* functions below are used to manipulate this flag. The
550 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
551 * queue independently. The netif_xmit_*stopped functions below are called
552 * to check if the queue has been stopped by the driver or stack (either
553 * of the XOFF bits are set in the state). Drivers should not need to call
554 * netif_xmit*stopped functions, they should only be using netif_tx_*.
555 */
556
557 struct netdev_queue {
558 /*
559 * read mostly part
560 */
561 struct net_device *dev;
562 struct Qdisc __rcu *qdisc;
563 struct Qdisc *qdisc_sleeping;
564 #ifdef CONFIG_SYSFS
565 struct kobject kobj;
566 #endif
567 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
568 int numa_node;
569 #endif
570 /*
571 * write mostly part
572 */
573 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
574 int xmit_lock_owner;
575 /*
576 * please use this field instead of dev->trans_start
577 */
578 unsigned long trans_start;
579
580 /*
581 * Number of TX timeouts for this queue
582 * (/sys/class/net/DEV/Q/trans_timeout)
583 */
584 unsigned long trans_timeout;
585
586 unsigned long state;
587
588 #ifdef CONFIG_BQL
589 struct dql dql;
590 #endif
591 } ____cacheline_aligned_in_smp;
592
593 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
594 {
595 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
596 return q->numa_node;
597 #else
598 return NUMA_NO_NODE;
599 #endif
600 }
601
602 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
603 {
604 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
605 q->numa_node = node;
606 #endif
607 }
608
609 #ifdef CONFIG_RPS
610 /*
611 * This structure holds an RPS map which can be of variable length. The
612 * map is an array of CPUs.
613 */
614 struct rps_map {
615 unsigned int len;
616 struct rcu_head rcu;
617 u16 cpus[0];
618 };
619 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
620
621 /*
622 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
623 * tail pointer for that CPU's input queue at the time of last enqueue, and
624 * a hardware filter index.
625 */
626 struct rps_dev_flow {
627 u16 cpu;
628 u16 filter;
629 unsigned int last_qtail;
630 };
631 #define RPS_NO_FILTER 0xffff
632
633 /*
634 * The rps_dev_flow_table structure contains a table of flow mappings.
635 */
636 struct rps_dev_flow_table {
637 unsigned int mask;
638 struct rcu_head rcu;
639 struct rps_dev_flow flows[0];
640 };
641 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
642 ((_num) * sizeof(struct rps_dev_flow)))
643
644 /*
645 * The rps_sock_flow_table contains mappings of flows to the last CPU
646 * on which they were processed by the application (set in recvmsg).
647 * Each entry is a 32bit value. Upper part is the high order bits
648 * of flow hash, lower part is cpu number.
649 * rps_cpu_mask is used to partition the space, depending on number of
650 * possible cpus : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
651 * For example, if 64 cpus are possible, rps_cpu_mask = 0x3f,
652 * meaning we use 32-6=26 bits for the hash.
653 */
654 struct rps_sock_flow_table {
655 u32 mask;
656
657 u32 ents[0] ____cacheline_aligned_in_smp;
658 };
659 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
660
661 #define RPS_NO_CPU 0xffff
662
663 extern u32 rps_cpu_mask;
664 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
665
666 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
667 u32 hash)
668 {
669 if (table && hash) {
670 unsigned int index = hash & table->mask;
671 u32 val = hash & ~rps_cpu_mask;
672
673 /* We only give a hint, preemption can change cpu under us */
674 val |= raw_smp_processor_id();
675
676 if (table->ents[index] != val)
677 table->ents[index] = val;
678 }
679 }
680
681 #ifdef CONFIG_RFS_ACCEL
682 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
683 u16 filter_id);
684 #endif
685 #endif /* CONFIG_RPS */
686
687 /* This structure contains an instance of an RX queue. */
688 struct netdev_rx_queue {
689 #ifdef CONFIG_RPS
690 struct rps_map __rcu *rps_map;
691 struct rps_dev_flow_table __rcu *rps_flow_table;
692 #endif
693 struct kobject kobj;
694 struct net_device *dev;
695 } ____cacheline_aligned_in_smp;
696
697 /*
698 * RX queue sysfs structures and functions.
699 */
700 struct rx_queue_attribute {
701 struct attribute attr;
702 ssize_t (*show)(struct netdev_rx_queue *queue,
703 struct rx_queue_attribute *attr, char *buf);
704 ssize_t (*store)(struct netdev_rx_queue *queue,
705 struct rx_queue_attribute *attr, const char *buf, size_t len);
706 };
707
708 #ifdef CONFIG_XPS
709 /*
710 * This structure holds an XPS map which can be of variable length. The
711 * map is an array of queues.
712 */
713 struct xps_map {
714 unsigned int len;
715 unsigned int alloc_len;
716 struct rcu_head rcu;
717 u16 queues[0];
718 };
719 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
720 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
721 / sizeof(u16))
722
723 /*
724 * This structure holds all XPS maps for device. Maps are indexed by CPU.
725 */
726 struct xps_dev_maps {
727 struct rcu_head rcu;
728 struct xps_map __rcu *cpu_map[0];
729 };
730 #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
731 (nr_cpu_ids * sizeof(struct xps_map *)))
732 #endif /* CONFIG_XPS */
733
734 #define TC_MAX_QUEUE 16
735 #define TC_BITMASK 15
736 /* HW offloaded queuing disciplines txq count and offset maps */
737 struct netdev_tc_txq {
738 u16 count;
739 u16 offset;
740 };
741
742 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
743 /*
744 * This structure is to hold information about the device
745 * configured to run FCoE protocol stack.
746 */
747 struct netdev_fcoe_hbainfo {
748 char manufacturer[64];
749 char serial_number[64];
750 char hardware_version[64];
751 char driver_version[64];
752 char optionrom_version[64];
753 char firmware_version[64];
754 char model[256];
755 char model_description[256];
756 };
757 #endif
758
759 #define MAX_PHYS_ITEM_ID_LEN 32
760
761 /* This structure holds a unique identifier to identify some
762 * physical item (port for example) used by a netdevice.
763 */
764 struct netdev_phys_item_id {
765 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
766 unsigned char id_len;
767 };
768
769 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
770 struct sk_buff *skb);
771
772 /*
773 * This structure defines the management hooks for network devices.
774 * The following hooks can be defined; unless noted otherwise, they are
775 * optional and can be filled with a null pointer.
776 *
777 * int (*ndo_init)(struct net_device *dev);
778 * This function is called once when network device is registered.
779 * The network device can use this to any late stage initializaton
780 * or semantic validattion. It can fail with an error code which will
781 * be propogated back to register_netdev
782 *
783 * void (*ndo_uninit)(struct net_device *dev);
784 * This function is called when device is unregistered or when registration
785 * fails. It is not called if init fails.
786 *
787 * int (*ndo_open)(struct net_device *dev);
788 * This function is called when network device transistions to the up
789 * state.
790 *
791 * int (*ndo_stop)(struct net_device *dev);
792 * This function is called when network device transistions to the down
793 * state.
794 *
795 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
796 * struct net_device *dev);
797 * Called when a packet needs to be transmitted.
798 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
799 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
800 * Required can not be NULL.
801 *
802 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
803 * void *accel_priv, select_queue_fallback_t fallback);
804 * Called to decide which queue to when device supports multiple
805 * transmit queues.
806 *
807 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
808 * This function is called to allow device receiver to make
809 * changes to configuration when multicast or promiscious is enabled.
810 *
811 * void (*ndo_set_rx_mode)(struct net_device *dev);
812 * This function is called device changes address list filtering.
813 * If driver handles unicast address filtering, it should set
814 * IFF_UNICAST_FLT to its priv_flags.
815 *
816 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
817 * This function is called when the Media Access Control address
818 * needs to be changed. If this interface is not defined, the
819 * mac address can not be changed.
820 *
821 * int (*ndo_validate_addr)(struct net_device *dev);
822 * Test if Media Access Control address is valid for the device.
823 *
824 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
825 * Called when a user request an ioctl which can't be handled by
826 * the generic interface code. If not defined ioctl's return
827 * not supported error code.
828 *
829 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
830 * Used to set network devices bus interface parameters. This interface
831 * is retained for legacy reason, new devices should use the bus
832 * interface (PCI) for low level management.
833 *
834 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
835 * Called when a user wants to change the Maximum Transfer Unit
836 * of a device. If not defined, any request to change MTU will
837 * will return an error.
838 *
839 * void (*ndo_tx_timeout)(struct net_device *dev);
840 * Callback uses when the transmitter has not made any progress
841 * for dev->watchdog ticks.
842 *
843 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
844 * struct rtnl_link_stats64 *storage);
845 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
846 * Called when a user wants to get the network device usage
847 * statistics. Drivers must do one of the following:
848 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
849 * rtnl_link_stats64 structure passed by the caller.
850 * 2. Define @ndo_get_stats to update a net_device_stats structure
851 * (which should normally be dev->stats) and return a pointer to
852 * it. The structure may be changed asynchronously only if each
853 * field is written atomically.
854 * 3. Update dev->stats asynchronously and atomically, and define
855 * neither operation.
856 *
857 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
858 * If device support VLAN filtering this function is called when a
859 * VLAN id is registered.
860 *
861 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
862 * If device support VLAN filtering this function is called when a
863 * VLAN id is unregistered.
864 *
865 * void (*ndo_poll_controller)(struct net_device *dev);
866 *
867 * SR-IOV management functions.
868 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
869 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
870 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
871 * int max_tx_rate);
872 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
873 * int (*ndo_get_vf_config)(struct net_device *dev,
874 * int vf, struct ifla_vf_info *ivf);
875 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
876 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
877 * struct nlattr *port[]);
878 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
879 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
880 * Called to setup 'tc' number of traffic classes in the net device. This
881 * is always called from the stack with the rtnl lock held and netif tx
882 * queues stopped. This allows the netdevice to perform queue management
883 * safely.
884 *
885 * Fiber Channel over Ethernet (FCoE) offload functions.
886 * int (*ndo_fcoe_enable)(struct net_device *dev);
887 * Called when the FCoE protocol stack wants to start using LLD for FCoE
888 * so the underlying device can perform whatever needed configuration or
889 * initialization to support acceleration of FCoE traffic.
890 *
891 * int (*ndo_fcoe_disable)(struct net_device *dev);
892 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
893 * so the underlying device can perform whatever needed clean-ups to
894 * stop supporting acceleration of FCoE traffic.
895 *
896 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
897 * struct scatterlist *sgl, unsigned int sgc);
898 * Called when the FCoE Initiator wants to initialize an I/O that
899 * is a possible candidate for Direct Data Placement (DDP). The LLD can
900 * perform necessary setup and returns 1 to indicate the device is set up
901 * successfully to perform DDP on this I/O, otherwise this returns 0.
902 *
903 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
904 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
905 * indicated by the FC exchange id 'xid', so the underlying device can
906 * clean up and reuse resources for later DDP requests.
907 *
908 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
909 * struct scatterlist *sgl, unsigned int sgc);
910 * Called when the FCoE Target wants to initialize an I/O that
911 * is a possible candidate for Direct Data Placement (DDP). The LLD can
912 * perform necessary setup and returns 1 to indicate the device is set up
913 * successfully to perform DDP on this I/O, otherwise this returns 0.
914 *
915 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
916 * struct netdev_fcoe_hbainfo *hbainfo);
917 * Called when the FCoE Protocol stack wants information on the underlying
918 * device. This information is utilized by the FCoE protocol stack to
919 * register attributes with Fiber Channel management service as per the
920 * FC-GS Fabric Device Management Information(FDMI) specification.
921 *
922 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
923 * Called when the underlying device wants to override default World Wide
924 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
925 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
926 * protocol stack to use.
927 *
928 * RFS acceleration.
929 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
930 * u16 rxq_index, u32 flow_id);
931 * Set hardware filter for RFS. rxq_index is the target queue index;
932 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
933 * Return the filter ID on success, or a negative error code.
934 *
935 * Slave management functions (for bridge, bonding, etc).
936 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
937 * Called to make another netdev an underling.
938 *
939 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
940 * Called to release previously enslaved netdev.
941 *
942 * Feature/offload setting functions.
943 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
944 * netdev_features_t features);
945 * Adjusts the requested feature flags according to device-specific
946 * constraints, and returns the resulting flags. Must not modify
947 * the device state.
948 *
949 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
950 * Called to update device configuration to new features. Passed
951 * feature set might be less than what was returned by ndo_fix_features()).
952 * Must return >0 or -errno if it changed dev->features itself.
953 *
954 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
955 * struct net_device *dev,
956 * const unsigned char *addr, u16 vid, u16 flags)
957 * Adds an FDB entry to dev for addr.
958 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
959 * struct net_device *dev,
960 * const unsigned char *addr, u16 vid)
961 * Deletes the FDB entry from dev coresponding to addr.
962 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
963 * struct net_device *dev, struct net_device *filter_dev,
964 * int idx)
965 * Used to add FDB entries to dump requests. Implementers should add
966 * entries to skb and update idx with the number of entries.
967 *
968 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
969 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
970 * struct net_device *dev, u32 filter_mask)
971 *
972 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
973 * Called to change device carrier. Soft-devices (like dummy, team, etc)
974 * which do not represent real hardware may define this to allow their
975 * userspace components to manage their virtual carrier state. Devices
976 * that determine carrier state from physical hardware properties (eg
977 * network cables) or protocol-dependent mechanisms (eg
978 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
979 *
980 * int (*ndo_get_phys_port_id)(struct net_device *dev,
981 * struct netdev_phys_item_id *ppid);
982 * Called to get ID of physical port of this device. If driver does
983 * not implement this, it is assumed that the hw is not able to have
984 * multiple net devices on single physical port.
985 *
986 * void (*ndo_add_vxlan_port)(struct net_device *dev,
987 * sa_family_t sa_family, __be16 port);
988 * Called by vxlan to notiy a driver about the UDP port and socket
989 * address family that vxlan is listnening to. It is called only when
990 * a new port starts listening. The operation is protected by the
991 * vxlan_net->sock_lock.
992 *
993 * void (*ndo_del_vxlan_port)(struct net_device *dev,
994 * sa_family_t sa_family, __be16 port);
995 * Called by vxlan to notify the driver about a UDP port and socket
996 * address family that vxlan is not listening to anymore. The operation
997 * is protected by the vxlan_net->sock_lock.
998 *
999 * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1000 * struct net_device *dev)
1001 * Called by upper layer devices to accelerate switching or other
1002 * station functionality into hardware. 'pdev is the lowerdev
1003 * to use for the offload and 'dev' is the net device that will
1004 * back the offload. Returns a pointer to the private structure
1005 * the upper layer will maintain.
1006 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
1007 * Called by upper layer device to delete the station created
1008 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
1009 * the station and priv is the structure returned by the add
1010 * operation.
1011 * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
1012 * struct net_device *dev,
1013 * void *priv);
1014 * Callback to use for xmit over the accelerated station. This
1015 * is used in place of ndo_start_xmit on accelerated net
1016 * devices.
1017 * netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
1018 * struct net_device *dev
1019 * netdev_features_t features);
1020 * Called by core transmit path to determine if device is capable of
1021 * performing offload operations on a given packet. This is to give
1022 * the device an opportunity to implement any restrictions that cannot
1023 * be otherwise expressed by feature flags. The check is called with
1024 * the set of features that the stack has calculated and it returns
1025 * those the driver believes to be appropriate.
1026 *
1027 * int (*ndo_switch_parent_id_get)(struct net_device *dev,
1028 * struct netdev_phys_item_id *psid);
1029 * Called to get an ID of the switch chip this port is part of.
1030 * If driver implements this, it indicates that it represents a port
1031 * of a switch chip.
1032 * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state);
1033 * Called to notify switch device port of bridge port STP
1034 * state change.
1035 */
1036 struct net_device_ops {
1037 int (*ndo_init)(struct net_device *dev);
1038 void (*ndo_uninit)(struct net_device *dev);
1039 int (*ndo_open)(struct net_device *dev);
1040 int (*ndo_stop)(struct net_device *dev);
1041 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
1042 struct net_device *dev);
1043 u16 (*ndo_select_queue)(struct net_device *dev,
1044 struct sk_buff *skb,
1045 void *accel_priv,
1046 select_queue_fallback_t fallback);
1047 void (*ndo_change_rx_flags)(struct net_device *dev,
1048 int flags);
1049 void (*ndo_set_rx_mode)(struct net_device *dev);
1050 int (*ndo_set_mac_address)(struct net_device *dev,
1051 void *addr);
1052 int (*ndo_validate_addr)(struct net_device *dev);
1053 int (*ndo_do_ioctl)(struct net_device *dev,
1054 struct ifreq *ifr, int cmd);
1055 int (*ndo_set_config)(struct net_device *dev,
1056 struct ifmap *map);
1057 int (*ndo_change_mtu)(struct net_device *dev,
1058 int new_mtu);
1059 int (*ndo_neigh_setup)(struct net_device *dev,
1060 struct neigh_parms *);
1061 void (*ndo_tx_timeout) (struct net_device *dev);
1062
1063 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
1064 struct rtnl_link_stats64 *storage);
1065 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1066
1067 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
1068 __be16 proto, u16 vid);
1069 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1070 __be16 proto, u16 vid);
1071 #ifdef CONFIG_NET_POLL_CONTROLLER
1072 void (*ndo_poll_controller)(struct net_device *dev);
1073 int (*ndo_netpoll_setup)(struct net_device *dev,
1074 struct netpoll_info *info);
1075 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1076 #endif
1077 #ifdef CONFIG_NET_RX_BUSY_POLL
1078 int (*ndo_busy_poll)(struct napi_struct *dev);
1079 #endif
1080 int (*ndo_set_vf_mac)(struct net_device *dev,
1081 int queue, u8 *mac);
1082 int (*ndo_set_vf_vlan)(struct net_device *dev,
1083 int queue, u16 vlan, u8 qos);
1084 int (*ndo_set_vf_rate)(struct net_device *dev,
1085 int vf, int min_tx_rate,
1086 int max_tx_rate);
1087 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1088 int vf, bool setting);
1089 int (*ndo_get_vf_config)(struct net_device *dev,
1090 int vf,
1091 struct ifla_vf_info *ivf);
1092 int (*ndo_set_vf_link_state)(struct net_device *dev,
1093 int vf, int link_state);
1094 int (*ndo_set_vf_port)(struct net_device *dev,
1095 int vf,
1096 struct nlattr *port[]);
1097 int (*ndo_get_vf_port)(struct net_device *dev,
1098 int vf, struct sk_buff *skb);
1099 int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
1100 #if IS_ENABLED(CONFIG_FCOE)
1101 int (*ndo_fcoe_enable)(struct net_device *dev);
1102 int (*ndo_fcoe_disable)(struct net_device *dev);
1103 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1104 u16 xid,
1105 struct scatterlist *sgl,
1106 unsigned int sgc);
1107 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1108 u16 xid);
1109 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1110 u16 xid,
1111 struct scatterlist *sgl,
1112 unsigned int sgc);
1113 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1114 struct netdev_fcoe_hbainfo *hbainfo);
1115 #endif
1116
1117 #if IS_ENABLED(CONFIG_LIBFCOE)
1118 #define NETDEV_FCOE_WWNN 0
1119 #define NETDEV_FCOE_WWPN 1
1120 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1121 u64 *wwn, int type);
1122 #endif
1123
1124 #ifdef CONFIG_RFS_ACCEL
1125 int (*ndo_rx_flow_steer)(struct net_device *dev,
1126 const struct sk_buff *skb,
1127 u16 rxq_index,
1128 u32 flow_id);
1129 #endif
1130 int (*ndo_add_slave)(struct net_device *dev,
1131 struct net_device *slave_dev);
1132 int (*ndo_del_slave)(struct net_device *dev,
1133 struct net_device *slave_dev);
1134 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1135 netdev_features_t features);
1136 int (*ndo_set_features)(struct net_device *dev,
1137 netdev_features_t features);
1138 int (*ndo_neigh_construct)(struct neighbour *n);
1139 void (*ndo_neigh_destroy)(struct neighbour *n);
1140
1141 int (*ndo_fdb_add)(struct ndmsg *ndm,
1142 struct nlattr *tb[],
1143 struct net_device *dev,
1144 const unsigned char *addr,
1145 u16 vid,
1146 u16 flags);
1147 int (*ndo_fdb_del)(struct ndmsg *ndm,
1148 struct nlattr *tb[],
1149 struct net_device *dev,
1150 const unsigned char *addr,
1151 u16 vid);
1152 int (*ndo_fdb_dump)(struct sk_buff *skb,
1153 struct netlink_callback *cb,
1154 struct net_device *dev,
1155 struct net_device *filter_dev,
1156 int idx);
1157
1158 int (*ndo_bridge_setlink)(struct net_device *dev,
1159 struct nlmsghdr *nlh,
1160 u16 flags);
1161 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1162 u32 pid, u32 seq,
1163 struct net_device *dev,
1164 u32 filter_mask);
1165 int (*ndo_bridge_dellink)(struct net_device *dev,
1166 struct nlmsghdr *nlh,
1167 u16 flags);
1168 int (*ndo_change_carrier)(struct net_device *dev,
1169 bool new_carrier);
1170 int (*ndo_get_phys_port_id)(struct net_device *dev,
1171 struct netdev_phys_item_id *ppid);
1172 void (*ndo_add_vxlan_port)(struct net_device *dev,
1173 sa_family_t sa_family,
1174 __be16 port);
1175 void (*ndo_del_vxlan_port)(struct net_device *dev,
1176 sa_family_t sa_family,
1177 __be16 port);
1178
1179 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1180 struct net_device *dev);
1181 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1182 void *priv);
1183
1184 netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
1185 struct net_device *dev,
1186 void *priv);
1187 int (*ndo_get_lock_subclass)(struct net_device *dev);
1188 netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
1189 struct net_device *dev,
1190 netdev_features_t features);
1191 #ifdef CONFIG_NET_SWITCHDEV
1192 int (*ndo_switch_parent_id_get)(struct net_device *dev,
1193 struct netdev_phys_item_id *psid);
1194 int (*ndo_switch_port_stp_update)(struct net_device *dev,
1195 u8 state);
1196 #endif
1197 };
1198
1199 /**
1200 * enum net_device_priv_flags - &struct net_device priv_flags
1201 *
1202 * These are the &struct net_device, they are only set internally
1203 * by drivers and used in the kernel. These flags are invisible to
1204 * userspace, this means that the order of these flags can change
1205 * during any kernel release.
1206 *
1207 * You should have a pretty good reason to be extending these flags.
1208 *
1209 * @IFF_802_1Q_VLAN: 802.1Q VLAN device
1210 * @IFF_EBRIDGE: Ethernet bridging device
1211 * @IFF_SLAVE_INACTIVE: bonding slave not the curr. active
1212 * @IFF_MASTER_8023AD: bonding master, 802.3ad
1213 * @IFF_MASTER_ALB: bonding master, balance-alb
1214 * @IFF_BONDING: bonding master or slave
1215 * @IFF_SLAVE_NEEDARP: need ARPs for validation
1216 * @IFF_ISATAP: ISATAP interface (RFC4214)
1217 * @IFF_MASTER_ARPMON: bonding master, ARP mon in use
1218 * @IFF_WAN_HDLC: WAN HDLC device
1219 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
1220 * release skb->dst
1221 * @IFF_DONT_BRIDGE: disallow bridging this ether dev
1222 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1223 * @IFF_MACVLAN_PORT: device used as macvlan port
1224 * @IFF_BRIDGE_PORT: device used as bridge port
1225 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
1226 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
1227 * @IFF_UNICAST_FLT: Supports unicast filtering
1228 * @IFF_TEAM_PORT: device used as team port
1229 * @IFF_SUPP_NOFCS: device supports sending custom FCS
1230 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1231 * change when it's running
1232 * @IFF_MACVLAN: Macvlan device
1233 */
1234 enum netdev_priv_flags {
1235 IFF_802_1Q_VLAN = 1<<0,
1236 IFF_EBRIDGE = 1<<1,
1237 IFF_SLAVE_INACTIVE = 1<<2,
1238 IFF_MASTER_8023AD = 1<<3,
1239 IFF_MASTER_ALB = 1<<4,
1240 IFF_BONDING = 1<<5,
1241 IFF_SLAVE_NEEDARP = 1<<6,
1242 IFF_ISATAP = 1<<7,
1243 IFF_MASTER_ARPMON = 1<<8,
1244 IFF_WAN_HDLC = 1<<9,
1245 IFF_XMIT_DST_RELEASE = 1<<10,
1246 IFF_DONT_BRIDGE = 1<<11,
1247 IFF_DISABLE_NETPOLL = 1<<12,
1248 IFF_MACVLAN_PORT = 1<<13,
1249 IFF_BRIDGE_PORT = 1<<14,
1250 IFF_OVS_DATAPATH = 1<<15,
1251 IFF_TX_SKB_SHARING = 1<<16,
1252 IFF_UNICAST_FLT = 1<<17,
1253 IFF_TEAM_PORT = 1<<18,
1254 IFF_SUPP_NOFCS = 1<<19,
1255 IFF_LIVE_ADDR_CHANGE = 1<<20,
1256 IFF_MACVLAN = 1<<21,
1257 IFF_XMIT_DST_RELEASE_PERM = 1<<22,
1258 IFF_IPVLAN_MASTER = 1<<23,
1259 IFF_IPVLAN_SLAVE = 1<<24,
1260 };
1261
1262 #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1263 #define IFF_EBRIDGE IFF_EBRIDGE
1264 #define IFF_SLAVE_INACTIVE IFF_SLAVE_INACTIVE
1265 #define IFF_MASTER_8023AD IFF_MASTER_8023AD
1266 #define IFF_MASTER_ALB IFF_MASTER_ALB
1267 #define IFF_BONDING IFF_BONDING
1268 #define IFF_SLAVE_NEEDARP IFF_SLAVE_NEEDARP
1269 #define IFF_ISATAP IFF_ISATAP
1270 #define IFF_MASTER_ARPMON IFF_MASTER_ARPMON
1271 #define IFF_WAN_HDLC IFF_WAN_HDLC
1272 #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1273 #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1274 #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1275 #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1276 #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1277 #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1278 #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1279 #define IFF_UNICAST_FLT IFF_UNICAST_FLT
1280 #define IFF_TEAM_PORT IFF_TEAM_PORT
1281 #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1282 #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1283 #define IFF_MACVLAN IFF_MACVLAN
1284 #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1285 #define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
1286 #define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
1287
1288 /**
1289 * struct net_device - The DEVICE structure.
1290 * Actually, this whole structure is a big mistake. It mixes I/O
1291 * data with strictly "high-level" data, and it has to know about
1292 * almost every data structure used in the INET module.
1293 *
1294 * @name: This is the first field of the "visible" part of this structure
1295 * (i.e. as seen by users in the "Space.c" file). It is the name
1296 * of the interface.
1297 *
1298 * @name_hlist: Device name hash chain, please keep it close to name[]
1299 * @ifalias: SNMP alias
1300 * @mem_end: Shared memory end
1301 * @mem_start: Shared memory start
1302 * @base_addr: Device I/O address
1303 * @irq: Device IRQ number
1304 *
1305 * @state: Generic network queuing layer state, see netdev_state_t
1306 * @dev_list: The global list of network devices
1307 * @napi_list: List entry, that is used for polling napi devices
1308 * @unreg_list: List entry, that is used, when we are unregistering the
1309 * device, see the function unregister_netdev
1310 * @close_list: List entry, that is used, when we are closing the device
1311 *
1312 * @adj_list: Directly linked devices, like slaves for bonding
1313 * @all_adj_list: All linked devices, *including* neighbours
1314 * @features: Currently active device features
1315 * @hw_features: User-changeable features
1316 *
1317 * @wanted_features: User-requested features
1318 * @vlan_features: Mask of features inheritable by VLAN devices
1319 *
1320 * @hw_enc_features: Mask of features inherited by encapsulating devices
1321 * This field indicates what encapsulation
1322 * offloads the hardware is capable of doing,
1323 * and drivers will need to set them appropriately.
1324 *
1325 * @mpls_features: Mask of features inheritable by MPLS
1326 *
1327 * @ifindex: interface index
1328 * @iflink: unique device identifier
1329 *
1330 * @stats: Statistics struct, which was left as a legacy, use
1331 * rtnl_link_stats64 instead
1332 *
1333 * @rx_dropped: Dropped packets by core network,
1334 * do not use this in drivers
1335 * @tx_dropped: Dropped packets by core network,
1336 * do not use this in drivers
1337 *
1338 * @carrier_changes: Stats to monitor carrier on<->off transitions
1339 *
1340 * @wireless_handlers: List of functions to handle Wireless Extensions,
1341 * instead of ioctl,
1342 * see <net/iw_handler.h> for details.
1343 * @wireless_data: Instance data managed by the core of wireless extensions
1344 *
1345 * @netdev_ops: Includes several pointers to callbacks,
1346 * if one wants to override the ndo_*() functions
1347 * @ethtool_ops: Management operations
1348 * @fwd_ops: Management operations
1349 * @header_ops: Includes callbacks for creating,parsing,rebuilding,etc
1350 * of Layer 2 headers.
1351 *
1352 * @flags: Interface flags (a la BSD)
1353 * @priv_flags: Like 'flags' but invisible to userspace,
1354 * see if.h for the definitions
1355 * @gflags: Global flags ( kept as legacy )
1356 * @padded: How much padding added by alloc_netdev()
1357 * @operstate: RFC2863 operstate
1358 * @link_mode: Mapping policy to operstate
1359 * @if_port: Selectable AUI, TP, ...
1360 * @dma: DMA channel
1361 * @mtu: Interface MTU value
1362 * @type: Interface hardware type
1363 * @hard_header_len: Hardware header length
1364 *
1365 * @needed_headroom: Extra headroom the hardware may need, but not in all
1366 * cases can this be guaranteed
1367 * @needed_tailroom: Extra tailroom the hardware may need, but not in all
1368 * cases can this be guaranteed. Some cases also use
1369 * LL_MAX_HEADER instead to allocate the skb
1370 *
1371 * interface address info:
1372 *
1373 * @perm_addr: Permanent hw address
1374 * @addr_assign_type: Hw address assignment type
1375 * @addr_len: Hardware address length
1376 * @neigh_priv_len; Used in neigh_alloc(),
1377 * initialized only in atm/clip.c
1378 * @dev_id: Used to differentiate devices that share
1379 * the same link layer address
1380 * @dev_port: Used to differentiate devices that share
1381 * the same function
1382 * @addr_list_lock: XXX: need comments on this one
1383 * @uc: unicast mac addresses
1384 * @mc: multicast mac addresses
1385 * @dev_addrs: list of device hw addresses
1386 * @queues_kset: Group of all Kobjects in the Tx and RX queues
1387 * @uc_promisc: Counter, that indicates, that promiscuous mode
1388 * has been enabled due to the need to listen to
1389 * additional unicast addresses in a device that
1390 * does not implement ndo_set_rx_mode()
1391 * @promiscuity: Number of times, the NIC is told to work in
1392 * Promiscuous mode, if it becomes 0 the NIC will
1393 * exit from working in Promiscuous mode
1394 * @allmulti: Counter, enables or disables allmulticast mode
1395 *
1396 * @vlan_info: VLAN info
1397 * @dsa_ptr: dsa specific data
1398 * @tipc_ptr: TIPC specific data
1399 * @atalk_ptr: AppleTalk link
1400 * @ip_ptr: IPv4 specific data
1401 * @dn_ptr: DECnet specific data
1402 * @ip6_ptr: IPv6 specific data
1403 * @ax25_ptr: AX.25 specific data
1404 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
1405 *
1406 * @last_rx: Time of last Rx
1407 * @dev_addr: Hw address (before bcast,
1408 * because most packets are unicast)
1409 *
1410 * @_rx: Array of RX queues
1411 * @num_rx_queues: Number of RX queues
1412 * allocated at register_netdev() time
1413 * @real_num_rx_queues: Number of RX queues currently active in device
1414 *
1415 * @rx_handler: handler for received packets
1416 * @rx_handler_data: XXX: need comments on this one
1417 * @ingress_queue: XXX: need comments on this one
1418 * @broadcast: hw bcast address
1419 *
1420 * @_tx: Array of TX queues
1421 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time
1422 * @real_num_tx_queues: Number of TX queues currently active in device
1423 * @qdisc: Root qdisc from userspace point of view
1424 * @tx_queue_len: Max frames per queue allowed
1425 * @tx_global_lock: XXX: need comments on this one
1426 *
1427 * @xps_maps: XXX: need comments on this one
1428 *
1429 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
1430 * indexed by RX queue number. Assigned by driver.
1431 * This must only be set if the ndo_rx_flow_steer
1432 * operation is defined
1433 *
1434 * @trans_start: Time (in jiffies) of last Tx
1435 * @watchdog_timeo: Represents the timeout that is used by
1436 * the watchdog ( see dev_watchdog() )
1437 * @watchdog_timer: List of timers
1438 *
1439 * @pcpu_refcnt: Number of references to this device
1440 * @todo_list: Delayed register/unregister
1441 * @index_hlist: Device index hash chain
1442 * @link_watch_list: XXX: need comments on this one
1443 *
1444 * @reg_state: Register/unregister state machine
1445 * @dismantle: Device is going to be freed
1446 * @rtnl_link_state: This enum represents the phases of creating
1447 * a new link
1448 *
1449 * @destructor: Called from unregister,
1450 * can be used to call free_netdev
1451 * @npinfo: XXX: need comments on this one
1452 * @nd_net: Network namespace this network device is inside
1453 *
1454 * @ml_priv: Mid-layer private
1455 * @lstats: Loopback statistics
1456 * @tstats: Tunnel statistics
1457 * @dstats: Dummy statistics
1458 * @vstats: Virtual ethernet statistics
1459 *
1460 * @garp_port: GARP
1461 * @mrp_port: MRP
1462 *
1463 * @dev: Class/net/name entry
1464 * @sysfs_groups: Space for optional device, statistics and wireless
1465 * sysfs groups
1466 *
1467 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
1468 * @rtnl_link_ops: Rtnl_link_ops
1469 *
1470 * @gso_max_size: Maximum size of generic segmentation offload
1471 * @gso_max_segs: Maximum number of segments that can be passed to the
1472 * NIC for GSO
1473 * @gso_min_segs: Minimum number of segments that can be passed to the
1474 * NIC for GSO
1475 *
1476 * @dcbnl_ops: Data Center Bridging netlink ops
1477 * @num_tc: Number of traffic classes in the net device
1478 * @tc_to_txq: XXX: need comments on this one
1479 * @prio_tc_map XXX: need comments on this one
1480 *
1481 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
1482 *
1483 * @priomap: XXX: need comments on this one
1484 * @phydev: Physical device may attach itself
1485 * for hardware timestamping
1486 *
1487 * @qdisc_tx_busylock: XXX: need comments on this one
1488 *
1489 * @group: The group, that the device belongs to
1490 * @pm_qos_req: Power Management QoS object
1491 *
1492 * FIXME: cleanup struct net_device such that network protocol info
1493 * moves out.
1494 */
1495
1496 struct net_device {
1497 char name[IFNAMSIZ];
1498 struct hlist_node name_hlist;
1499 char *ifalias;
1500 /*
1501 * I/O specific fields
1502 * FIXME: Merge these and struct ifmap into one
1503 */
1504 unsigned long mem_end;
1505 unsigned long mem_start;
1506 unsigned long base_addr;
1507 int irq;
1508
1509 /*
1510 * Some hardware also needs these fields (state,dev_list,
1511 * napi_list,unreg_list,close_list) but they are not
1512 * part of the usual set specified in Space.c.
1513 */
1514
1515 unsigned long state;
1516
1517 struct list_head dev_list;
1518 struct list_head napi_list;
1519 struct list_head unreg_list;
1520 struct list_head close_list;
1521 struct list_head ptype_all;
1522 struct list_head ptype_specific;
1523
1524 struct {
1525 struct list_head upper;
1526 struct list_head lower;
1527 } adj_list;
1528
1529 struct {
1530 struct list_head upper;
1531 struct list_head lower;
1532 } all_adj_list;
1533
1534 netdev_features_t features;
1535 netdev_features_t hw_features;
1536 netdev_features_t wanted_features;
1537 netdev_features_t vlan_features;
1538 netdev_features_t hw_enc_features;
1539 netdev_features_t mpls_features;
1540
1541 int ifindex;
1542 int iflink;
1543
1544 struct net_device_stats stats;
1545
1546 atomic_long_t rx_dropped;
1547 atomic_long_t tx_dropped;
1548
1549 atomic_t carrier_changes;
1550
1551 #ifdef CONFIG_WIRELESS_EXT
1552 const struct iw_handler_def * wireless_handlers;
1553 struct iw_public_data * wireless_data;
1554 #endif
1555 const struct net_device_ops *netdev_ops;
1556 const struct ethtool_ops *ethtool_ops;
1557 const struct forwarding_accel_ops *fwd_ops;
1558
1559 const struct header_ops *header_ops;
1560
1561 unsigned int flags;
1562 unsigned int priv_flags;
1563
1564 unsigned short gflags;
1565 unsigned short padded;
1566
1567 unsigned char operstate;
1568 unsigned char link_mode;
1569
1570 unsigned char if_port;
1571 unsigned char dma;
1572
1573 unsigned int mtu;
1574 unsigned short type;
1575 unsigned short hard_header_len;
1576
1577 unsigned short needed_headroom;
1578 unsigned short needed_tailroom;
1579
1580 /* Interface address info. */
1581 unsigned char perm_addr[MAX_ADDR_LEN];
1582 unsigned char addr_assign_type;
1583 unsigned char addr_len;
1584 unsigned short neigh_priv_len;
1585 unsigned short dev_id;
1586 unsigned short dev_port;
1587 spinlock_t addr_list_lock;
1588 struct netdev_hw_addr_list uc;
1589 struct netdev_hw_addr_list mc;
1590 struct netdev_hw_addr_list dev_addrs;
1591
1592 #ifdef CONFIG_SYSFS
1593 struct kset *queues_kset;
1594 #endif
1595
1596 unsigned char name_assign_type;
1597
1598 bool uc_promisc;
1599 unsigned int promiscuity;
1600 unsigned int allmulti;
1601
1602
1603 /* Protocol specific pointers */
1604
1605 #if IS_ENABLED(CONFIG_VLAN_8021Q)
1606 struct vlan_info __rcu *vlan_info;
1607 #endif
1608 #if IS_ENABLED(CONFIG_NET_DSA)
1609 struct dsa_switch_tree *dsa_ptr;
1610 #endif
1611 #if IS_ENABLED(CONFIG_TIPC)
1612 struct tipc_bearer __rcu *tipc_ptr;
1613 #endif
1614 void *atalk_ptr;
1615 struct in_device __rcu *ip_ptr;
1616 struct dn_dev __rcu *dn_ptr;
1617 struct inet6_dev __rcu *ip6_ptr;
1618 void *ax25_ptr;
1619 struct wireless_dev *ieee80211_ptr;
1620 struct wpan_dev *ieee802154_ptr;
1621
1622 /*
1623 * Cache lines mostly used on receive path (including eth_type_trans())
1624 */
1625 unsigned long last_rx;
1626
1627 /* Interface address info used in eth_type_trans() */
1628 unsigned char *dev_addr;
1629
1630
1631 #ifdef CONFIG_SYSFS
1632 struct netdev_rx_queue *_rx;
1633
1634 unsigned int num_rx_queues;
1635 unsigned int real_num_rx_queues;
1636
1637 #endif
1638
1639 unsigned long gro_flush_timeout;
1640 rx_handler_func_t __rcu *rx_handler;
1641 void __rcu *rx_handler_data;
1642
1643 struct netdev_queue __rcu *ingress_queue;
1644 unsigned char broadcast[MAX_ADDR_LEN];
1645
1646
1647 /*
1648 * Cache lines mostly used on transmit path
1649 */
1650 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1651 unsigned int num_tx_queues;
1652 unsigned int real_num_tx_queues;
1653 struct Qdisc *qdisc;
1654 unsigned long tx_queue_len;
1655 spinlock_t tx_global_lock;
1656
1657 #ifdef CONFIG_XPS
1658 struct xps_dev_maps __rcu *xps_maps;
1659 #endif
1660 #ifdef CONFIG_RFS_ACCEL
1661 struct cpu_rmap *rx_cpu_rmap;
1662 #endif
1663
1664 /* These may be needed for future network-power-down code. */
1665
1666 /*
1667 * trans_start here is expensive for high speed devices on SMP,
1668 * please use netdev_queue->trans_start instead.
1669 */
1670 unsigned long trans_start;
1671
1672 int watchdog_timeo;
1673 struct timer_list watchdog_timer;
1674
1675 int __percpu *pcpu_refcnt;
1676 struct list_head todo_list;
1677
1678 struct hlist_node index_hlist;
1679 struct list_head link_watch_list;
1680
1681 enum { NETREG_UNINITIALIZED=0,
1682 NETREG_REGISTERED, /* completed register_netdevice */
1683 NETREG_UNREGISTERING, /* called unregister_netdevice */
1684 NETREG_UNREGISTERED, /* completed unregister todo */
1685 NETREG_RELEASED, /* called free_netdev */
1686 NETREG_DUMMY, /* dummy device for NAPI poll */
1687 } reg_state:8;
1688
1689 bool dismantle;
1690
1691 enum {
1692 RTNL_LINK_INITIALIZED,
1693 RTNL_LINK_INITIALIZING,
1694 } rtnl_link_state:16;
1695
1696 void (*destructor)(struct net_device *dev);
1697
1698 #ifdef CONFIG_NETPOLL
1699 struct netpoll_info __rcu *npinfo;
1700 #endif
1701
1702 #ifdef CONFIG_NET_NS
1703 struct net *nd_net;
1704 #endif
1705
1706 /* mid-layer private */
1707 union {
1708 void *ml_priv;
1709 struct pcpu_lstats __percpu *lstats;
1710 struct pcpu_sw_netstats __percpu *tstats;
1711 struct pcpu_dstats __percpu *dstats;
1712 struct pcpu_vstats __percpu *vstats;
1713 };
1714
1715 struct garp_port __rcu *garp_port;
1716 struct mrp_port __rcu *mrp_port;
1717
1718 struct device dev;
1719 const struct attribute_group *sysfs_groups[4];
1720 const struct attribute_group *sysfs_rx_queue_group;
1721
1722 const struct rtnl_link_ops *rtnl_link_ops;
1723
1724 /* for setting kernel sock attribute on TCP connection setup */
1725 #define GSO_MAX_SIZE 65536
1726 unsigned int gso_max_size;
1727 #define GSO_MAX_SEGS 65535
1728 u16 gso_max_segs;
1729 u16 gso_min_segs;
1730 #ifdef CONFIG_DCB
1731 const struct dcbnl_rtnl_ops *dcbnl_ops;
1732 #endif
1733 u8 num_tc;
1734 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1735 u8 prio_tc_map[TC_BITMASK + 1];
1736
1737 #if IS_ENABLED(CONFIG_FCOE)
1738 unsigned int fcoe_ddp_xid;
1739 #endif
1740 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1741 struct netprio_map __rcu *priomap;
1742 #endif
1743 struct phy_device *phydev;
1744 struct lock_class_key *qdisc_tx_busylock;
1745 int group;
1746 struct pm_qos_request pm_qos_req;
1747 };
1748 #define to_net_dev(d) container_of(d, struct net_device, dev)
1749
1750 #define NETDEV_ALIGN 32
1751
1752 static inline
1753 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1754 {
1755 return dev->prio_tc_map[prio & TC_BITMASK];
1756 }
1757
1758 static inline
1759 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1760 {
1761 if (tc >= dev->num_tc)
1762 return -EINVAL;
1763
1764 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1765 return 0;
1766 }
1767
1768 static inline
1769 void netdev_reset_tc(struct net_device *dev)
1770 {
1771 dev->num_tc = 0;
1772 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1773 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1774 }
1775
1776 static inline
1777 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1778 {
1779 if (tc >= dev->num_tc)
1780 return -EINVAL;
1781
1782 dev->tc_to_txq[tc].count = count;
1783 dev->tc_to_txq[tc].offset = offset;
1784 return 0;
1785 }
1786
1787 static inline
1788 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1789 {
1790 if (num_tc > TC_MAX_QUEUE)
1791 return -EINVAL;
1792
1793 dev->num_tc = num_tc;
1794 return 0;
1795 }
1796
1797 static inline
1798 int netdev_get_num_tc(struct net_device *dev)
1799 {
1800 return dev->num_tc;
1801 }
1802
1803 static inline
1804 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1805 unsigned int index)
1806 {
1807 return &dev->_tx[index];
1808 }
1809
1810 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
1811 const struct sk_buff *skb)
1812 {
1813 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
1814 }
1815
1816 static inline void netdev_for_each_tx_queue(struct net_device *dev,
1817 void (*f)(struct net_device *,
1818 struct netdev_queue *,
1819 void *),
1820 void *arg)
1821 {
1822 unsigned int i;
1823
1824 for (i = 0; i < dev->num_tx_queues; i++)
1825 f(dev, &dev->_tx[i], arg);
1826 }
1827
1828 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1829 struct sk_buff *skb,
1830 void *accel_priv);
1831
1832 /*
1833 * Net namespace inlines
1834 */
1835 static inline
1836 struct net *dev_net(const struct net_device *dev)
1837 {
1838 return read_pnet(&dev->nd_net);
1839 }
1840
1841 static inline
1842 void dev_net_set(struct net_device *dev, struct net *net)
1843 {
1844 #ifdef CONFIG_NET_NS
1845 release_net(dev->nd_net);
1846 dev->nd_net = hold_net(net);
1847 #endif
1848 }
1849
1850 static inline bool netdev_uses_dsa(struct net_device *dev)
1851 {
1852 #if IS_ENABLED(CONFIG_NET_DSA)
1853 if (dev->dsa_ptr != NULL)
1854 return dsa_uses_tagged_protocol(dev->dsa_ptr);
1855 #endif
1856 return false;
1857 }
1858
1859 /**
1860 * netdev_priv - access network device private data
1861 * @dev: network device
1862 *
1863 * Get network device private data
1864 */
1865 static inline void *netdev_priv(const struct net_device *dev)
1866 {
1867 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
1868 }
1869
1870 /* Set the sysfs physical device reference for the network logical device
1871 * if set prior to registration will cause a symlink during initialization.
1872 */
1873 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1874
1875 /* Set the sysfs device type for the network logical device to allow
1876 * fine-grained identification of different network device types. For
1877 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1878 */
1879 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1880
1881 /* Default NAPI poll() weight
1882 * Device drivers are strongly advised to not use bigger value
1883 */
1884 #define NAPI_POLL_WEIGHT 64
1885
1886 /**
1887 * netif_napi_add - initialize a napi context
1888 * @dev: network device
1889 * @napi: napi context
1890 * @poll: polling function
1891 * @weight: default weight
1892 *
1893 * netif_napi_add() must be used to initialize a napi context prior to calling
1894 * *any* of the other napi related functions.
1895 */
1896 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1897 int (*poll)(struct napi_struct *, int), int weight);
1898
1899 /**
1900 * netif_napi_del - remove a napi context
1901 * @napi: napi context
1902 *
1903 * netif_napi_del() removes a napi context from the network device napi list
1904 */
1905 void netif_napi_del(struct napi_struct *napi);
1906
1907 struct napi_gro_cb {
1908 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1909 void *frag0;
1910
1911 /* Length of frag0. */
1912 unsigned int frag0_len;
1913
1914 /* This indicates where we are processing relative to skb->data. */
1915 int data_offset;
1916
1917 /* This is non-zero if the packet cannot be merged with the new skb. */
1918 u16 flush;
1919
1920 /* Save the IP ID here and check when we get to the transport layer */
1921 u16 flush_id;
1922
1923 /* Number of segments aggregated. */
1924 u16 count;
1925
1926 /* Start offset for remote checksum offload */
1927 u16 gro_remcsum_start;
1928
1929 /* jiffies when first packet was created/queued */
1930 unsigned long age;
1931
1932 /* Used in ipv6_gro_receive() and foo-over-udp */
1933 u16 proto;
1934
1935 /* This is non-zero if the packet may be of the same flow. */
1936 u8 same_flow:1;
1937
1938 /* Used in udp_gro_receive */
1939 u8 udp_mark:1;
1940
1941 /* GRO checksum is valid */
1942 u8 csum_valid:1;
1943
1944 /* Number of checksums via CHECKSUM_UNNECESSARY */
1945 u8 csum_cnt:3;
1946
1947 /* Free the skb? */
1948 u8 free:2;
1949 #define NAPI_GRO_FREE 1
1950 #define NAPI_GRO_FREE_STOLEN_HEAD 2
1951
1952 /* Used in foo-over-udp, set in udp[46]_gro_receive */
1953 u8 is_ipv6:1;
1954
1955 /* 7 bit hole */
1956
1957 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
1958 __wsum csum;
1959
1960 /* used in skb_gro_receive() slow path */
1961 struct sk_buff *last;
1962 };
1963
1964 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1965
1966 struct packet_type {
1967 __be16 type; /* This is really htons(ether_type). */
1968 struct net_device *dev; /* NULL is wildcarded here */
1969 int (*func) (struct sk_buff *,
1970 struct net_device *,
1971 struct packet_type *,
1972 struct net_device *);
1973 bool (*id_match)(struct packet_type *ptype,
1974 struct sock *sk);
1975 void *af_packet_priv;
1976 struct list_head list;
1977 };
1978
1979 struct offload_callbacks {
1980 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1981 netdev_features_t features);
1982 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1983 struct sk_buff *skb);
1984 int (*gro_complete)(struct sk_buff *skb, int nhoff);
1985 };
1986
1987 struct packet_offload {
1988 __be16 type; /* This is really htons(ether_type). */
1989 struct offload_callbacks callbacks;
1990 struct list_head list;
1991 };
1992
1993 struct udp_offload;
1994
1995 struct udp_offload_callbacks {
1996 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1997 struct sk_buff *skb,
1998 struct udp_offload *uoff);
1999 int (*gro_complete)(struct sk_buff *skb,
2000 int nhoff,
2001 struct udp_offload *uoff);
2002 };
2003
2004 struct udp_offload {
2005 __be16 port;
2006 u8 ipproto;
2007 struct udp_offload_callbacks callbacks;
2008 };
2009
2010 /* often modified stats are per cpu, other are shared (netdev->stats) */
2011 struct pcpu_sw_netstats {
2012 u64 rx_packets;
2013 u64 rx_bytes;
2014 u64 tx_packets;
2015 u64 tx_bytes;
2016 struct u64_stats_sync syncp;
2017 };
2018
2019 #define netdev_alloc_pcpu_stats(type) \
2020 ({ \
2021 typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
2022 if (pcpu_stats) { \
2023 int i; \
2024 for_each_possible_cpu(i) { \
2025 typeof(type) *stat; \
2026 stat = per_cpu_ptr(pcpu_stats, i); \
2027 u64_stats_init(&stat->syncp); \
2028 } \
2029 } \
2030 pcpu_stats; \
2031 })
2032
2033 #include <linux/notifier.h>
2034
2035 /* netdevice notifier chain. Please remember to update the rtnetlink
2036 * notification exclusion list in rtnetlink_event() when adding new
2037 * types.
2038 */
2039 #define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
2040 #define NETDEV_DOWN 0x0002
2041 #define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
2042 detected a hardware crash and restarted
2043 - we can use this eg to kick tcp sessions
2044 once done */
2045 #define NETDEV_CHANGE 0x0004 /* Notify device state change */
2046 #define NETDEV_REGISTER 0x0005
2047 #define NETDEV_UNREGISTER 0x0006
2048 #define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */
2049 #define NETDEV_CHANGEADDR 0x0008
2050 #define NETDEV_GOING_DOWN 0x0009
2051 #define NETDEV_CHANGENAME 0x000A
2052 #define NETDEV_FEAT_CHANGE 0x000B
2053 #define NETDEV_BONDING_FAILOVER 0x000C
2054 #define NETDEV_PRE_UP 0x000D
2055 #define NETDEV_PRE_TYPE_CHANGE 0x000E
2056 #define NETDEV_POST_TYPE_CHANGE 0x000F
2057 #define NETDEV_POST_INIT 0x0010
2058 #define NETDEV_UNREGISTER_FINAL 0x0011
2059 #define NETDEV_RELEASE 0x0012
2060 #define NETDEV_NOTIFY_PEERS 0x0013
2061 #define NETDEV_JOIN 0x0014
2062 #define NETDEV_CHANGEUPPER 0x0015
2063 #define NETDEV_RESEND_IGMP 0x0016
2064 #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
2065 #define NETDEV_CHANGEINFODATA 0x0018
2066 #define NETDEV_BONDING_INFO 0x0019
2067
2068 int register_netdevice_notifier(struct notifier_block *nb);
2069 int unregister_netdevice_notifier(struct notifier_block *nb);
2070
2071 struct netdev_notifier_info {
2072 struct net_device *dev;
2073 };
2074
2075 struct netdev_notifier_change_info {
2076 struct netdev_notifier_info info; /* must be first */
2077 unsigned int flags_changed;
2078 };
2079
2080 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2081 struct net_device *dev)
2082 {
2083 info->dev = dev;
2084 }
2085
2086 static inline struct net_device *
2087 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
2088 {
2089 return info->dev;
2090 }
2091
2092 int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
2093
2094
2095 extern rwlock_t dev_base_lock; /* Device list lock */
2096
2097 #define for_each_netdev(net, d) \
2098 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
2099 #define for_each_netdev_reverse(net, d) \
2100 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
2101 #define for_each_netdev_rcu(net, d) \
2102 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
2103 #define for_each_netdev_safe(net, d, n) \
2104 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2105 #define for_each_netdev_continue(net, d) \
2106 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
2107 #define for_each_netdev_continue_rcu(net, d) \
2108 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2109 #define for_each_netdev_in_bond_rcu(bond, slave) \
2110 for_each_netdev_rcu(&init_net, slave) \
2111 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
2112 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2113
2114 static inline struct net_device *next_net_device(struct net_device *dev)
2115 {
2116 struct list_head *lh;
2117 struct net *net;
2118
2119 net = dev_net(dev);
2120 lh = dev->dev_list.next;
2121 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2122 }
2123
2124 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
2125 {
2126 struct list_head *lh;
2127 struct net *net;
2128
2129 net = dev_net(dev);
2130 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
2131 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2132 }
2133
2134 static inline struct net_device *first_net_device(struct net *net)
2135 {
2136 return list_empty(&net->dev_base_head) ? NULL :
2137 net_device_entry(net->dev_base_head.next);
2138 }
2139
2140 static inline struct net_device *first_net_device_rcu(struct net *net)
2141 {
2142 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
2143
2144 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2145 }
2146
2147 int netdev_boot_setup_check(struct net_device *dev);
2148 unsigned long netdev_boot_base(const char *prefix, int unit);
2149 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
2150 const char *hwaddr);
2151 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
2152 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
2153 void dev_add_pack(struct packet_type *pt);
2154 void dev_remove_pack(struct packet_type *pt);
2155 void __dev_remove_pack(struct packet_type *pt);
2156 void dev_add_offload(struct packet_offload *po);
2157 void dev_remove_offload(struct packet_offload *po);
2158
2159 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2160 unsigned short mask);
2161 struct net_device *dev_get_by_name(struct net *net, const char *name);
2162 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2163 struct net_device *__dev_get_by_name(struct net *net, const char *name);
2164 int dev_alloc_name(struct net_device *dev, const char *name);
2165 int dev_open(struct net_device *dev);
2166 int dev_close(struct net_device *dev);
2167 void dev_disable_lro(struct net_device *dev);
2168 int dev_loopback_xmit(struct sk_buff *newskb);
2169 int dev_queue_xmit(struct sk_buff *skb);
2170 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
2171 int register_netdevice(struct net_device *dev);
2172 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
2173 void unregister_netdevice_many(struct list_head *head);
2174 static inline void unregister_netdevice(struct net_device *dev)
2175 {
2176 unregister_netdevice_queue(dev, NULL);
2177 }
2178
2179 int netdev_refcnt_read(const struct net_device *dev);
2180 void free_netdev(struct net_device *dev);
2181 void netdev_freemem(struct net_device *dev);
2182 void synchronize_net(void);
2183 int init_dummy_netdev(struct net_device *dev);
2184
2185 struct net_device *dev_get_by_index(struct net *net, int ifindex);
2186 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
2187 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2188 int netdev_get_name(struct net *net, char *name, int ifindex);
2189 int dev_restart(struct net_device *dev);
2190 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
2191
2192 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2193 {
2194 return NAPI_GRO_CB(skb)->data_offset;
2195 }
2196
2197 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
2198 {
2199 return skb->len - NAPI_GRO_CB(skb)->data_offset;
2200 }
2201
2202 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
2203 {
2204 NAPI_GRO_CB(skb)->data_offset += len;
2205 }
2206
2207 static inline void *skb_gro_header_fast(struct sk_buff *skb,
2208 unsigned int offset)
2209 {
2210 return NAPI_GRO_CB(skb)->frag0 + offset;
2211 }
2212
2213 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
2214 {
2215 return NAPI_GRO_CB(skb)->frag0_len < hlen;
2216 }
2217
2218 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
2219 unsigned int offset)
2220 {
2221 if (!pskb_may_pull(skb, hlen))
2222 return NULL;
2223
2224 NAPI_GRO_CB(skb)->frag0 = NULL;
2225 NAPI_GRO_CB(skb)->frag0_len = 0;
2226 return skb->data + offset;
2227 }
2228
2229 static inline void *skb_gro_network_header(struct sk_buff *skb)
2230 {
2231 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
2232 skb_network_offset(skb);
2233 }
2234
2235 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2236 const void *start, unsigned int len)
2237 {
2238 if (NAPI_GRO_CB(skb)->csum_valid)
2239 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
2240 csum_partial(start, len, 0));
2241 }
2242
2243 /* GRO checksum functions. These are logical equivalents of the normal
2244 * checksum functions (in skbuff.h) except that they operate on the GRO
2245 * offsets and fields in sk_buff.
2246 */
2247
2248 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
2249
2250 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
2251 {
2252 return (NAPI_GRO_CB(skb)->gro_remcsum_start - skb_headroom(skb) ==
2253 skb_gro_offset(skb));
2254 }
2255
2256 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
2257 bool zero_okay,
2258 __sum16 check)
2259 {
2260 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
2261 skb_checksum_start_offset(skb) <
2262 skb_gro_offset(skb)) &&
2263 !skb_at_gro_remcsum_start(skb) &&
2264 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2265 (!zero_okay || check));
2266 }
2267
2268 static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
2269 __wsum psum)
2270 {
2271 if (NAPI_GRO_CB(skb)->csum_valid &&
2272 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
2273 return 0;
2274
2275 NAPI_GRO_CB(skb)->csum = psum;
2276
2277 return __skb_gro_checksum_complete(skb);
2278 }
2279
2280 static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
2281 {
2282 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
2283 /* Consume a checksum from CHECKSUM_UNNECESSARY */
2284 NAPI_GRO_CB(skb)->csum_cnt--;
2285 } else {
2286 /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
2287 * verified a new top level checksum or an encapsulated one
2288 * during GRO. This saves work if we fallback to normal path.
2289 */
2290 __skb_incr_checksum_unnecessary(skb);
2291 }
2292 }
2293
2294 #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
2295 compute_pseudo) \
2296 ({ \
2297 __sum16 __ret = 0; \
2298 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
2299 __ret = __skb_gro_checksum_validate_complete(skb, \
2300 compute_pseudo(skb, proto)); \
2301 if (__ret) \
2302 __skb_mark_checksum_bad(skb); \
2303 else \
2304 skb_gro_incr_csum_unnecessary(skb); \
2305 __ret; \
2306 })
2307
2308 #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
2309 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
2310
2311 #define skb_gro_checksum_validate_zero_check(skb, proto, check, \
2312 compute_pseudo) \
2313 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
2314
2315 #define skb_gro_checksum_simple_validate(skb) \
2316 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
2317
2318 static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
2319 {
2320 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2321 !NAPI_GRO_CB(skb)->csum_valid);
2322 }
2323
2324 static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
2325 __sum16 check, __wsum pseudo)
2326 {
2327 NAPI_GRO_CB(skb)->csum = ~pseudo;
2328 NAPI_GRO_CB(skb)->csum_valid = 1;
2329 }
2330
2331 #define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
2332 do { \
2333 if (__skb_gro_checksum_convert_check(skb)) \
2334 __skb_gro_checksum_convert(skb, check, \
2335 compute_pseudo(skb, proto)); \
2336 } while (0)
2337
2338 struct gro_remcsum {
2339 int offset;
2340 __wsum delta;
2341 };
2342
2343 static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
2344 {
2345 grc->offset = 0;
2346 grc->delta = 0;
2347 }
2348
2349 static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
2350 int start, int offset,
2351 struct gro_remcsum *grc,
2352 bool nopartial)
2353 {
2354 __wsum delta;
2355
2356 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
2357
2358 if (!nopartial) {
2359 NAPI_GRO_CB(skb)->gro_remcsum_start =
2360 ((unsigned char *)ptr + start) - skb->head;
2361 return;
2362 }
2363
2364 delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset);
2365
2366 /* Adjust skb->csum since we changed the packet */
2367 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
2368
2369 grc->offset = (ptr + offset) - (void *)skb->head;
2370 grc->delta = delta;
2371 }
2372
2373 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2374 struct gro_remcsum *grc)
2375 {
2376 if (!grc->delta)
2377 return;
2378
2379 remcsum_unadjust((__sum16 *)(skb->head + grc->offset), grc->delta);
2380 }
2381
2382 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
2383 unsigned short type,
2384 const void *daddr, const void *saddr,
2385 unsigned int len)
2386 {
2387 if (!dev->header_ops || !dev->header_ops->create)
2388 return 0;
2389
2390 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
2391 }
2392
2393 static inline int dev_parse_header(const struct sk_buff *skb,
2394 unsigned char *haddr)
2395 {
2396 const struct net_device *dev = skb->dev;
2397
2398 if (!dev->header_ops || !dev->header_ops->parse)
2399 return 0;
2400 return dev->header_ops->parse(skb, haddr);
2401 }
2402
2403 static inline int dev_rebuild_header(struct sk_buff *skb)
2404 {
2405 const struct net_device *dev = skb->dev;
2406
2407 if (!dev->header_ops || !dev->header_ops->rebuild)
2408 return 0;
2409 return dev->header_ops->rebuild(skb);
2410 }
2411
2412 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
2413 int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
2414 static inline int unregister_gifconf(unsigned int family)
2415 {
2416 return register_gifconf(family, NULL);
2417 }
2418
2419 #ifdef CONFIG_NET_FLOW_LIMIT
2420 #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
2421 struct sd_flow_limit {
2422 u64 count;
2423 unsigned int num_buckets;
2424 unsigned int history_head;
2425 u16 history[FLOW_LIMIT_HISTORY];
2426 u8 buckets[];
2427 };
2428
2429 extern int netdev_flow_limit_table_len;
2430 #endif /* CONFIG_NET_FLOW_LIMIT */
2431
2432 /*
2433 * Incoming packets are placed on per-cpu queues
2434 */
2435 struct softnet_data {
2436 struct list_head poll_list;
2437 struct sk_buff_head process_queue;
2438
2439 /* stats */
2440 unsigned int processed;
2441 unsigned int time_squeeze;
2442 unsigned int cpu_collision;
2443 unsigned int received_rps;
2444 #ifdef CONFIG_RPS
2445 struct softnet_data *rps_ipi_list;
2446 #endif
2447 #ifdef CONFIG_NET_FLOW_LIMIT
2448 struct sd_flow_limit __rcu *flow_limit;
2449 #endif
2450 struct Qdisc *output_queue;
2451 struct Qdisc **output_queue_tailp;
2452 struct sk_buff *completion_queue;
2453
2454 #ifdef CONFIG_RPS
2455 /* Elements below can be accessed between CPUs for RPS */
2456 struct call_single_data csd ____cacheline_aligned_in_smp;
2457 struct softnet_data *rps_ipi_next;
2458 unsigned int cpu;
2459 unsigned int input_queue_head;
2460 unsigned int input_queue_tail;
2461 #endif
2462 unsigned int dropped;
2463 struct sk_buff_head input_pkt_queue;
2464 struct napi_struct backlog;
2465
2466 };
2467
2468 static inline void input_queue_head_incr(struct softnet_data *sd)
2469 {
2470 #ifdef CONFIG_RPS
2471 sd->input_queue_head++;
2472 #endif
2473 }
2474
2475 static inline void input_queue_tail_incr_save(struct softnet_data *sd,
2476 unsigned int *qtail)
2477 {
2478 #ifdef CONFIG_RPS
2479 *qtail = ++sd->input_queue_tail;
2480 #endif
2481 }
2482
2483 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
2484
2485 void __netif_schedule(struct Qdisc *q);
2486 void netif_schedule_queue(struct netdev_queue *txq);
2487
2488 static inline void netif_tx_schedule_all(struct net_device *dev)
2489 {
2490 unsigned int i;
2491
2492 for (i = 0; i < dev->num_tx_queues; i++)
2493 netif_schedule_queue(netdev_get_tx_queue(dev, i));
2494 }
2495
2496 static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
2497 {
2498 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2499 }
2500
2501 /**
2502 * netif_start_queue - allow transmit
2503 * @dev: network device
2504 *
2505 * Allow upper layers to call the device hard_start_xmit routine.
2506 */
2507 static inline void netif_start_queue(struct net_device *dev)
2508 {
2509 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
2510 }
2511
2512 static inline void netif_tx_start_all_queues(struct net_device *dev)
2513 {
2514 unsigned int i;
2515
2516 for (i = 0; i < dev->num_tx_queues; i++) {
2517 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2518 netif_tx_start_queue(txq);
2519 }
2520 }
2521
2522 void netif_tx_wake_queue(struct netdev_queue *dev_queue);
2523
2524 /**
2525 * netif_wake_queue - restart transmit
2526 * @dev: network device
2527 *
2528 * Allow upper layers to call the device hard_start_xmit routine.
2529 * Used for flow control when transmit resources are available.
2530 */
2531 static inline void netif_wake_queue(struct net_device *dev)
2532 {
2533 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
2534 }
2535
2536 static inline void netif_tx_wake_all_queues(struct net_device *dev)
2537 {
2538 unsigned int i;
2539
2540 for (i = 0; i < dev->num_tx_queues; i++) {
2541 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2542 netif_tx_wake_queue(txq);
2543 }
2544 }
2545
2546 static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
2547 {
2548 if (WARN_ON(!dev_queue)) {
2549 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
2550 return;
2551 }
2552 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2553 }
2554
2555 /**
2556 * netif_stop_queue - stop transmitted packets
2557 * @dev: network device
2558 *
2559 * Stop upper layers calling the device hard_start_xmit routine.
2560 * Used for flow control when transmit resources are unavailable.
2561 */
2562 static inline void netif_stop_queue(struct net_device *dev)
2563 {
2564 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
2565 }
2566
2567 static inline void netif_tx_stop_all_queues(struct net_device *dev)
2568 {
2569 unsigned int i;
2570
2571 for (i = 0; i < dev->num_tx_queues; i++) {
2572 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2573 netif_tx_stop_queue(txq);
2574 }
2575 }
2576
2577 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
2578 {
2579 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2580 }
2581
2582 /**
2583 * netif_queue_stopped - test if transmit queue is flowblocked
2584 * @dev: network device
2585 *
2586 * Test if transmit queue on device is currently unable to send.
2587 */
2588 static inline bool netif_queue_stopped(const struct net_device *dev)
2589 {
2590 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
2591 }
2592
2593 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
2594 {
2595 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
2596 }
2597
2598 static inline bool
2599 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
2600 {
2601 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
2602 }
2603
2604 static inline bool
2605 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
2606 {
2607 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
2608 }
2609
2610 /**
2611 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
2612 * @dev_queue: pointer to transmit queue
2613 *
2614 * BQL enabled drivers might use this helper in their ndo_start_xmit(),
2615 * to give appropriate hint to the cpu.
2616 */
2617 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
2618 {
2619 #ifdef CONFIG_BQL
2620 prefetchw(&dev_queue->dql.num_queued);
2621 #endif
2622 }
2623
2624 /**
2625 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
2626 * @dev_queue: pointer to transmit queue
2627 *
2628 * BQL enabled drivers might use this helper in their TX completion path,
2629 * to give appropriate hint to the cpu.
2630 */
2631 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
2632 {
2633 #ifdef CONFIG_BQL
2634 prefetchw(&dev_queue->dql.limit);
2635 #endif
2636 }
2637
2638 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
2639 unsigned int bytes)
2640 {
2641 #ifdef CONFIG_BQL
2642 dql_queued(&dev_queue->dql, bytes);
2643
2644 if (likely(dql_avail(&dev_queue->dql) >= 0))
2645 return;
2646
2647 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2648
2649 /*
2650 * The XOFF flag must be set before checking the dql_avail below,
2651 * because in netdev_tx_completed_queue we update the dql_completed
2652 * before checking the XOFF flag.
2653 */
2654 smp_mb();
2655
2656 /* check again in case another CPU has just made room avail */
2657 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
2658 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2659 #endif
2660 }
2661
2662 /**
2663 * netdev_sent_queue - report the number of bytes queued to hardware
2664 * @dev: network device
2665 * @bytes: number of bytes queued to the hardware device queue
2666 *
2667 * Report the number of bytes queued for sending/completion to the network
2668 * device hardware queue. @bytes should be a good approximation and should
2669 * exactly match netdev_completed_queue() @bytes
2670 */
2671 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
2672 {
2673 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
2674 }
2675
2676 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
2677 unsigned int pkts, unsigned int bytes)
2678 {
2679 #ifdef CONFIG_BQL
2680 if (unlikely(!bytes))
2681 return;
2682
2683 dql_completed(&dev_queue->dql, bytes);
2684
2685 /*
2686 * Without the memory barrier there is a small possiblity that
2687 * netdev_tx_sent_queue will miss the update and cause the queue to
2688 * be stopped forever
2689 */
2690 smp_mb();
2691
2692 if (dql_avail(&dev_queue->dql) < 0)
2693 return;
2694
2695 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
2696 netif_schedule_queue(dev_queue);
2697 #endif
2698 }
2699
2700 /**
2701 * netdev_completed_queue - report bytes and packets completed by device
2702 * @dev: network device
2703 * @pkts: actual number of packets sent over the medium
2704 * @bytes: actual number of bytes sent over the medium
2705 *
2706 * Report the number of bytes and packets transmitted by the network device
2707 * hardware queue over the physical medium, @bytes must exactly match the
2708 * @bytes amount passed to netdev_sent_queue()
2709 */
2710 static inline void netdev_completed_queue(struct net_device *dev,
2711 unsigned int pkts, unsigned int bytes)
2712 {
2713 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
2714 }
2715
2716 static inline void netdev_tx_reset_queue(struct netdev_queue *q)
2717 {
2718 #ifdef CONFIG_BQL
2719 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
2720 dql_reset(&q->dql);
2721 #endif
2722 }
2723
2724 /**
2725 * netdev_reset_queue - reset the packets and bytes count of a network device
2726 * @dev_queue: network device
2727 *
2728 * Reset the bytes and packet count of a network device and clear the
2729 * software flow control OFF bit for this network device
2730 */
2731 static inline void netdev_reset_queue(struct net_device *dev_queue)
2732 {
2733 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
2734 }
2735
2736 /**
2737 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
2738 * @dev: network device
2739 * @queue_index: given tx queue index
2740 *
2741 * Returns 0 if given tx queue index >= number of device tx queues,
2742 * otherwise returns the originally passed tx queue index.
2743 */
2744 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
2745 {
2746 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2747 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2748 dev->name, queue_index,
2749 dev->real_num_tx_queues);
2750 return 0;
2751 }
2752
2753 return queue_index;
2754 }
2755
2756 /**
2757 * netif_running - test if up
2758 * @dev: network device
2759 *
2760 * Test if the device has been brought up.
2761 */
2762 static inline bool netif_running(const struct net_device *dev)
2763 {
2764 return test_bit(__LINK_STATE_START, &dev->state);
2765 }
2766
2767 /*
2768 * Routines to manage the subqueues on a device. We only need start
2769 * stop, and a check if it's stopped. All other device management is
2770 * done at the overall netdevice level.
2771 * Also test the device if we're multiqueue.
2772 */
2773
2774 /**
2775 * netif_start_subqueue - allow sending packets on subqueue
2776 * @dev: network device
2777 * @queue_index: sub queue index
2778 *
2779 * Start individual transmit queue of a device with multiple transmit queues.
2780 */
2781 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
2782 {
2783 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2784
2785 netif_tx_start_queue(txq);
2786 }
2787
2788 /**
2789 * netif_stop_subqueue - stop sending packets on subqueue
2790 * @dev: network device
2791 * @queue_index: sub queue index
2792 *
2793 * Stop individual transmit queue of a device with multiple transmit queues.
2794 */
2795 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
2796 {
2797 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2798 netif_tx_stop_queue(txq);
2799 }
2800
2801 /**
2802 * netif_subqueue_stopped - test status of subqueue
2803 * @dev: network device
2804 * @queue_index: sub queue index
2805 *
2806 * Check individual transmit queue of a device with multiple transmit queues.
2807 */
2808 static inline bool __netif_subqueue_stopped(const struct net_device *dev,
2809 u16 queue_index)
2810 {
2811 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2812
2813 return netif_tx_queue_stopped(txq);
2814 }
2815
2816 static inline bool netif_subqueue_stopped(const struct net_device *dev,
2817 struct sk_buff *skb)
2818 {
2819 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2820 }
2821
2822 void netif_wake_subqueue(struct net_device *dev, u16 queue_index);
2823
2824 #ifdef CONFIG_XPS
2825 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2826 u16 index);
2827 #else
2828 static inline int netif_set_xps_queue(struct net_device *dev,
2829 const struct cpumask *mask,
2830 u16 index)
2831 {
2832 return 0;
2833 }
2834 #endif
2835
2836 /*
2837 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
2838 * as a distribution range limit for the returned value.
2839 */
2840 static inline u16 skb_tx_hash(const struct net_device *dev,
2841 struct sk_buff *skb)
2842 {
2843 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
2844 }
2845
2846 /**
2847 * netif_is_multiqueue - test if device has multiple transmit queues
2848 * @dev: network device
2849 *
2850 * Check if device has multiple transmit queues
2851 */
2852 static inline bool netif_is_multiqueue(const struct net_device *dev)
2853 {
2854 return dev->num_tx_queues > 1;
2855 }
2856
2857 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
2858
2859 #ifdef CONFIG_SYSFS
2860 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
2861 #else
2862 static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2863 unsigned int rxq)
2864 {
2865 return 0;
2866 }
2867 #endif
2868
2869 #ifdef CONFIG_SYSFS
2870 static inline unsigned int get_netdev_rx_queue_index(
2871 struct netdev_rx_queue *queue)
2872 {
2873 struct net_device *dev = queue->dev;
2874 int index = queue - dev->_rx;
2875
2876 BUG_ON(index >= dev->num_rx_queues);
2877 return index;
2878 }
2879 #endif
2880
2881 #define DEFAULT_MAX_NUM_RSS_QUEUES (8)
2882 int netif_get_num_default_rss_queues(void);
2883
2884 enum skb_free_reason {
2885 SKB_REASON_CONSUMED,
2886 SKB_REASON_DROPPED,
2887 };
2888
2889 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
2890 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
2891
2892 /*
2893 * It is not allowed to call kfree_skb() or consume_skb() from hardware
2894 * interrupt context or with hardware interrupts being disabled.
2895 * (in_irq() || irqs_disabled())
2896 *
2897 * We provide four helpers that can be used in following contexts :
2898 *
2899 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
2900 * replacing kfree_skb(skb)
2901 *
2902 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
2903 * Typically used in place of consume_skb(skb) in TX completion path
2904 *
2905 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
2906 * replacing kfree_skb(skb)
2907 *
2908 * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
2909 * and consumed a packet. Used in place of consume_skb(skb)
2910 */
2911 static inline void dev_kfree_skb_irq(struct sk_buff *skb)
2912 {
2913 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
2914 }
2915
2916 static inline void dev_consume_skb_irq(struct sk_buff *skb)
2917 {
2918 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
2919 }
2920
2921 static inline void dev_kfree_skb_any(struct sk_buff *skb)
2922 {
2923 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
2924 }
2925
2926 static inline void dev_consume_skb_any(struct sk_buff *skb)
2927 {
2928 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
2929 }
2930
2931 int netif_rx(struct sk_buff *skb);
2932 int netif_rx_ni(struct sk_buff *skb);
2933 int netif_receive_skb(struct sk_buff *skb);
2934 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
2935 void napi_gro_flush(struct napi_struct *napi, bool flush_old);
2936 struct sk_buff *napi_get_frags(struct napi_struct *napi);
2937 gro_result_t napi_gro_frags(struct napi_struct *napi);
2938 struct packet_offload *gro_find_receive_by_type(__be16 type);
2939 struct packet_offload *gro_find_complete_by_type(__be16 type);
2940
2941 static inline void napi_free_frags(struct napi_struct *napi)
2942 {
2943 kfree_skb(napi->skb);
2944 napi->skb = NULL;
2945 }
2946
2947 int netdev_rx_handler_register(struct net_device *dev,
2948 rx_handler_func_t *rx_handler,
2949 void *rx_handler_data);
2950 void netdev_rx_handler_unregister(struct net_device *dev);
2951
2952 bool dev_valid_name(const char *name);
2953 int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2954 int dev_ethtool(struct net *net, struct ifreq *);
2955 unsigned int dev_get_flags(const struct net_device *);
2956 int __dev_change_flags(struct net_device *, unsigned int flags);
2957 int dev_change_flags(struct net_device *, unsigned int);
2958 void __dev_notify_flags(struct net_device *, unsigned int old_flags,
2959 unsigned int gchanges);
2960 int dev_change_name(struct net_device *, const char *);
2961 int dev_set_alias(struct net_device *, const char *, size_t);
2962 int dev_change_net_namespace(struct net_device *, struct net *, const char *);
2963 int dev_set_mtu(struct net_device *, int);
2964 void dev_set_group(struct net_device *, int);
2965 int dev_set_mac_address(struct net_device *, struct sockaddr *);
2966 int dev_change_carrier(struct net_device *, bool new_carrier);
2967 int dev_get_phys_port_id(struct net_device *dev,
2968 struct netdev_phys_item_id *ppid);
2969 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
2970 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2971 struct netdev_queue *txq, int *ret);
2972 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
2973 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
2974 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
2975
2976 extern int netdev_budget;
2977
2978 /* Called by rtnetlink.c:rtnl_unlock() */
2979 void netdev_run_todo(void);
2980
2981 /**
2982 * dev_put - release reference to device
2983 * @dev: network device
2984 *
2985 * Release reference to device to allow it to be freed.
2986 */
2987 static inline void dev_put(struct net_device *dev)
2988 {
2989 this_cpu_dec(*dev->pcpu_refcnt);
2990 }
2991
2992 /**
2993 * dev_hold - get reference to device
2994 * @dev: network device
2995 *
2996 * Hold reference to device to keep it from being freed.
2997 */
2998 static inline void dev_hold(struct net_device *dev)
2999 {
3000 this_cpu_inc(*dev->pcpu_refcnt);
3001 }
3002
3003 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
3004 * and _off may be called from IRQ context, but it is caller
3005 * who is responsible for serialization of these calls.
3006 *
3007 * The name carrier is inappropriate, these functions should really be
3008 * called netif_lowerlayer_*() because they represent the state of any
3009 * kind of lower layer not just hardware media.
3010 */
3011
3012 void linkwatch_init_dev(struct net_device *dev);
3013 void linkwatch_fire_event(struct net_device *dev);
3014 void linkwatch_forget_dev(struct net_device *dev);
3015
3016 /**
3017 * netif_carrier_ok - test if carrier present
3018 * @dev: network device
3019 *
3020 * Check if carrier is present on device
3021 */
3022 static inline bool netif_carrier_ok(const struct net_device *dev)
3023 {
3024 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
3025 }
3026
3027 unsigned long dev_trans_start(struct net_device *dev);
3028
3029 void __netdev_watchdog_up(struct net_device *dev);
3030
3031 void netif_carrier_on(struct net_device *dev);
3032
3033 void netif_carrier_off(struct net_device *dev);
3034
3035 /**
3036 * netif_dormant_on - mark device as dormant.
3037 * @dev: network device
3038 *
3039 * Mark device as dormant (as per RFC2863).
3040 *
3041 * The dormant state indicates that the relevant interface is not
3042 * actually in a condition to pass packets (i.e., it is not 'up') but is
3043 * in a "pending" state, waiting for some external event. For "on-
3044 * demand" interfaces, this new state identifies the situation where the
3045 * interface is waiting for events to place it in the up state.
3046 *
3047 */
3048 static inline void netif_dormant_on(struct net_device *dev)
3049 {
3050 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
3051 linkwatch_fire_event(dev);
3052 }
3053
3054 /**
3055 * netif_dormant_off - set device as not dormant.
3056 * @dev: network device
3057 *
3058 * Device is not in dormant state.
3059 */
3060 static inline void netif_dormant_off(struct net_device *dev)
3061 {
3062 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
3063 linkwatch_fire_event(dev);
3064 }
3065
3066 /**
3067 * netif_dormant - test if carrier present
3068 * @dev: network device
3069 *
3070 * Check if carrier is present on device
3071 */
3072 static inline bool netif_dormant(const struct net_device *dev)
3073 {
3074 return test_bit(__LINK_STATE_DORMANT, &dev->state);
3075 }
3076
3077
3078 /**
3079 * netif_oper_up - test if device is operational
3080 * @dev: network device
3081 *
3082 * Check if carrier is operational
3083 */
3084 static inline bool netif_oper_up(const struct net_device *dev)
3085 {
3086 return (dev->operstate == IF_OPER_UP ||
3087 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
3088 }
3089
3090 /**
3091 * netif_device_present - is device available or removed
3092 * @dev: network device
3093 *
3094 * Check if device has not been removed from system.
3095 */
3096 static inline bool netif_device_present(struct net_device *dev)
3097 {
3098 return test_bit(__LINK_STATE_PRESENT, &dev->state);
3099 }
3100
3101 void netif_device_detach(struct net_device *dev);
3102
3103 void netif_device_attach(struct net_device *dev);
3104
3105 /*
3106 * Network interface message level settings
3107 */
3108
3109 enum {
3110 NETIF_MSG_DRV = 0x0001,
3111 NETIF_MSG_PROBE = 0x0002,
3112 NETIF_MSG_LINK = 0x0004,
3113 NETIF_MSG_TIMER = 0x0008,
3114 NETIF_MSG_IFDOWN = 0x0010,
3115 NETIF_MSG_IFUP = 0x0020,
3116 NETIF_MSG_RX_ERR = 0x0040,
3117 NETIF_MSG_TX_ERR = 0x0080,
3118 NETIF_MSG_TX_QUEUED = 0x0100,
3119 NETIF_MSG_INTR = 0x0200,
3120 NETIF_MSG_TX_DONE = 0x0400,
3121 NETIF_MSG_RX_STATUS = 0x0800,
3122 NETIF_MSG_PKTDATA = 0x1000,
3123 NETIF_MSG_HW = 0x2000,
3124 NETIF_MSG_WOL = 0x4000,
3125 };
3126
3127 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
3128 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
3129 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
3130 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
3131 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
3132 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
3133 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
3134 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
3135 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
3136 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
3137 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
3138 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
3139 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
3140 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
3141 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
3142
3143 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
3144 {
3145 /* use default */
3146 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
3147 return default_msg_enable_bits;
3148 if (debug_value == 0) /* no output */
3149 return 0;
3150 /* set low N bits */
3151 return (1 << debug_value) - 1;
3152 }
3153
3154 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
3155 {
3156 spin_lock(&txq->_xmit_lock);
3157 txq->xmit_lock_owner = cpu;
3158 }
3159
3160 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
3161 {
3162 spin_lock_bh(&txq->_xmit_lock);
3163 txq->xmit_lock_owner = smp_processor_id();
3164 }
3165
3166 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
3167 {
3168 bool ok = spin_trylock(&txq->_xmit_lock);
3169 if (likely(ok))
3170 txq->xmit_lock_owner = smp_processor_id();
3171 return ok;
3172 }
3173
3174 static inline void __netif_tx_unlock(struct netdev_queue *txq)
3175 {
3176 txq->xmit_lock_owner = -1;
3177 spin_unlock(&txq->_xmit_lock);
3178 }
3179
3180 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
3181 {
3182 txq->xmit_lock_owner = -1;
3183 spin_unlock_bh(&txq->_xmit_lock);
3184 }
3185
3186 static inline void txq_trans_update(struct netdev_queue *txq)
3187 {
3188 if (txq->xmit_lock_owner != -1)
3189 txq->trans_start = jiffies;
3190 }
3191
3192 /**
3193 * netif_tx_lock - grab network device transmit lock
3194 * @dev: network device
3195 *
3196 * Get network device transmit lock
3197 */
3198 static inline void netif_tx_lock(struct net_device *dev)
3199 {
3200 unsigned int i;
3201 int cpu;
3202
3203 spin_lock(&dev->tx_global_lock);
3204 cpu = smp_processor_id();
3205 for (i = 0; i < dev->num_tx_queues; i++) {
3206 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3207
3208 /* We are the only thread of execution doing a
3209 * freeze, but we have to grab the _xmit_lock in
3210 * order to synchronize with threads which are in
3211 * the ->hard_start_xmit() handler and already
3212 * checked the frozen bit.
3213 */
3214 __netif_tx_lock(txq, cpu);
3215 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
3216 __netif_tx_unlock(txq);
3217 }
3218 }
3219
3220 static inline void netif_tx_lock_bh(struct net_device *dev)
3221 {
3222 local_bh_disable();
3223 netif_tx_lock(dev);
3224 }
3225
3226 static inline void netif_tx_unlock(struct net_device *dev)
3227 {
3228 unsigned int i;
3229
3230 for (i = 0; i < dev->num_tx_queues; i++) {
3231 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3232
3233 /* No need to grab the _xmit_lock here. If the
3234 * queue is not stopped for another reason, we
3235 * force a schedule.
3236 */
3237 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
3238 netif_schedule_queue(txq);
3239 }
3240 spin_unlock(&dev->tx_global_lock);
3241 }
3242
3243 static inline void netif_tx_unlock_bh(struct net_device *dev)
3244 {
3245 netif_tx_unlock(dev);
3246 local_bh_enable();
3247 }
3248
3249 #define HARD_TX_LOCK(dev, txq, cpu) { \
3250 if ((dev->features & NETIF_F_LLTX) == 0) { \
3251 __netif_tx_lock(txq, cpu); \
3252 } \
3253 }
3254
3255 #define HARD_TX_TRYLOCK(dev, txq) \
3256 (((dev->features & NETIF_F_LLTX) == 0) ? \
3257 __netif_tx_trylock(txq) : \
3258 true )
3259
3260 #define HARD_TX_UNLOCK(dev, txq) { \
3261 if ((dev->features & NETIF_F_LLTX) == 0) { \
3262 __netif_tx_unlock(txq); \
3263 } \
3264 }
3265
3266 static inline void netif_tx_disable(struct net_device *dev)
3267 {
3268 unsigned int i;
3269 int cpu;
3270
3271 local_bh_disable();
3272 cpu = smp_processor_id();
3273 for (i = 0; i < dev->num_tx_queues; i++) {
3274 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3275
3276 __netif_tx_lock(txq, cpu);
3277 netif_tx_stop_queue(txq);
3278 __netif_tx_unlock(txq);
3279 }
3280 local_bh_enable();
3281 }
3282
3283 static inline void netif_addr_lock(struct net_device *dev)
3284 {
3285 spin_lock(&dev->addr_list_lock);
3286 }
3287
3288 static inline void netif_addr_lock_nested(struct net_device *dev)
3289 {
3290 int subclass = SINGLE_DEPTH_NESTING;
3291
3292 if (dev->netdev_ops->ndo_get_lock_subclass)
3293 subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
3294
3295 spin_lock_nested(&dev->addr_list_lock, subclass);
3296 }
3297
3298 static inline void netif_addr_lock_bh(struct net_device *dev)
3299 {
3300 spin_lock_bh(&dev->addr_list_lock);
3301 }
3302
3303 static inline void netif_addr_unlock(struct net_device *dev)
3304 {
3305 spin_unlock(&dev->addr_list_lock);
3306 }
3307
3308 static inline void netif_addr_unlock_bh(struct net_device *dev)
3309 {
3310 spin_unlock_bh(&dev->addr_list_lock);
3311 }
3312
3313 /*
3314 * dev_addrs walker. Should be used only for read access. Call with
3315 * rcu_read_lock held.
3316 */
3317 #define for_each_dev_addr(dev, ha) \
3318 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
3319
3320 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
3321
3322 void ether_setup(struct net_device *dev);
3323
3324 /* Support for loadable net-drivers */
3325 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
3326 unsigned char name_assign_type,
3327 void (*setup)(struct net_device *),
3328 unsigned int txqs, unsigned int rxqs);
3329 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
3330 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
3331
3332 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
3333 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
3334 count)
3335
3336 int register_netdev(struct net_device *dev);
3337 void unregister_netdev(struct net_device *dev);
3338
3339 /* General hardware address lists handling functions */
3340 int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3341 struct netdev_hw_addr_list *from_list, int addr_len);
3342 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3343 struct netdev_hw_addr_list *from_list, int addr_len);
3344 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
3345 struct net_device *dev,
3346 int (*sync)(struct net_device *, const unsigned char *),
3347 int (*unsync)(struct net_device *,
3348 const unsigned char *));
3349 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
3350 struct net_device *dev,
3351 int (*unsync)(struct net_device *,
3352 const unsigned char *));
3353 void __hw_addr_init(struct netdev_hw_addr_list *list);
3354
3355 /* Functions used for device addresses handling */
3356 int dev_addr_add(struct net_device *dev, const unsigned char *addr,
3357 unsigned char addr_type);
3358 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
3359 unsigned char addr_type);
3360 void dev_addr_flush(struct net_device *dev);
3361 int dev_addr_init(struct net_device *dev);
3362
3363 /* Functions used for unicast addresses handling */
3364 int dev_uc_add(struct net_device *dev, const unsigned char *addr);
3365 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
3366 int dev_uc_del(struct net_device *dev, const unsigned char *addr);
3367 int dev_uc_sync(struct net_device *to, struct net_device *from);
3368 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
3369 void dev_uc_unsync(struct net_device *to, struct net_device *from);
3370 void dev_uc_flush(struct net_device *dev);
3371 void dev_uc_init(struct net_device *dev);
3372
3373 /**
3374 * __dev_uc_sync - Synchonize device's unicast list
3375 * @dev: device to sync
3376 * @sync: function to call if address should be added
3377 * @unsync: function to call if address should be removed
3378 *
3379 * Add newly added addresses to the interface, and release
3380 * addresses that have been deleted.
3381 **/
3382 static inline int __dev_uc_sync(struct net_device *dev,
3383 int (*sync)(struct net_device *,
3384 const unsigned char *),
3385 int (*unsync)(struct net_device *,
3386 const unsigned char *))
3387 {
3388 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
3389 }
3390
3391 /**
3392 * __dev_uc_unsync - Remove synchronized addresses from device
3393 * @dev: device to sync
3394 * @unsync: function to call if address should be removed
3395 *
3396 * Remove all addresses that were added to the device by dev_uc_sync().
3397 **/
3398 static inline void __dev_uc_unsync(struct net_device *dev,
3399 int (*unsync)(struct net_device *,
3400 const unsigned char *))
3401 {
3402 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
3403 }
3404
3405 /* Functions used for multicast addresses handling */
3406 int dev_mc_add(struct net_device *dev, const unsigned char *addr);
3407 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
3408 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
3409 int dev_mc_del(struct net_device *dev, const unsigned char *addr);
3410 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
3411 int dev_mc_sync(struct net_device *to, struct net_device *from);
3412 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
3413 void dev_mc_unsync(struct net_device *to, struct net_device *from);
3414 void dev_mc_flush(struct net_device *dev);
3415 void dev_mc_init(struct net_device *dev);
3416
3417 /**
3418 * __dev_mc_sync - Synchonize device's multicast list
3419 * @dev: device to sync
3420 * @sync: function to call if address should be added
3421 * @unsync: function to call if address should be removed
3422 *
3423 * Add newly added addresses to the interface, and release
3424 * addresses that have been deleted.
3425 **/
3426 static inline int __dev_mc_sync(struct net_device *dev,
3427 int (*sync)(struct net_device *,
3428 const unsigned char *),
3429 int (*unsync)(struct net_device *,
3430 const unsigned char *))
3431 {
3432 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
3433 }
3434
3435 /**
3436 * __dev_mc_unsync - Remove synchronized addresses from device
3437 * @dev: device to sync
3438 * @unsync: function to call if address should be removed
3439 *
3440 * Remove all addresses that were added to the device by dev_mc_sync().
3441 **/
3442 static inline void __dev_mc_unsync(struct net_device *dev,
3443 int (*unsync)(struct net_device *,
3444 const unsigned char *))
3445 {
3446 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
3447 }
3448
3449 /* Functions used for secondary unicast and multicast support */
3450 void dev_set_rx_mode(struct net_device *dev);
3451 void __dev_set_rx_mode(struct net_device *dev);
3452 int dev_set_promiscuity(struct net_device *dev, int inc);
3453 int dev_set_allmulti(struct net_device *dev, int inc);
3454 void netdev_state_change(struct net_device *dev);
3455 void netdev_notify_peers(struct net_device *dev);
3456 void netdev_features_change(struct net_device *dev);
3457 /* Load a device via the kmod */
3458 void dev_load(struct net *net, const char *name);
3459 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
3460 struct rtnl_link_stats64 *storage);
3461 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
3462 const struct net_device_stats *netdev_stats);
3463
3464 extern int netdev_max_backlog;
3465 extern int netdev_tstamp_prequeue;
3466 extern int weight_p;
3467 extern int bpf_jit_enable;
3468
3469 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
3470 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
3471 struct list_head **iter);
3472 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
3473 struct list_head **iter);
3474
3475 /* iterate through upper list, must be called under RCU read lock */
3476 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
3477 for (iter = &(dev)->adj_list.upper, \
3478 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
3479 updev; \
3480 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
3481
3482 /* iterate through upper list, must be called under RCU read lock */
3483 #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
3484 for (iter = &(dev)->all_adj_list.upper, \
3485 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
3486 updev; \
3487 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
3488
3489 void *netdev_lower_get_next_private(struct net_device *dev,
3490 struct list_head **iter);
3491 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
3492 struct list_head **iter);
3493
3494 #define netdev_for_each_lower_private(dev, priv, iter) \
3495 for (iter = (dev)->adj_list.lower.next, \
3496 priv = netdev_lower_get_next_private(dev, &(iter)); \
3497 priv; \
3498 priv = netdev_lower_get_next_private(dev, &(iter)))
3499
3500 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
3501 for (iter = &(dev)->adj_list.lower, \
3502 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
3503 priv; \
3504 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
3505
3506 void *netdev_lower_get_next(struct net_device *dev,
3507 struct list_head **iter);
3508 #define netdev_for_each_lower_dev(dev, ldev, iter) \
3509 for (iter = &(dev)->adj_list.lower, \
3510 ldev = netdev_lower_get_next(dev, &(iter)); \
3511 ldev; \
3512 ldev = netdev_lower_get_next(dev, &(iter)))
3513
3514 void *netdev_adjacent_get_private(struct list_head *adj_list);
3515 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
3516 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
3517 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
3518 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
3519 int netdev_master_upper_dev_link(struct net_device *dev,
3520 struct net_device *upper_dev);
3521 int netdev_master_upper_dev_link_private(struct net_device *dev,
3522 struct net_device *upper_dev,
3523 void *private);
3524 void netdev_upper_dev_unlink(struct net_device *dev,
3525 struct net_device *upper_dev);
3526 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
3527 void *netdev_lower_dev_get_private(struct net_device *dev,
3528 struct net_device *lower_dev);
3529
3530 /* RSS keys are 40 or 52 bytes long */
3531 #define NETDEV_RSS_KEY_LEN 52
3532 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN];
3533 void netdev_rss_key_fill(void *buffer, size_t len);
3534
3535 int dev_get_nest_level(struct net_device *dev,
3536 bool (*type_check)(struct net_device *dev));
3537 int skb_checksum_help(struct sk_buff *skb);
3538 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3539 netdev_features_t features, bool tx_path);
3540 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3541 netdev_features_t features);
3542
3543 struct netdev_bonding_info {
3544 ifslave slave;
3545 ifbond master;
3546 };
3547
3548 struct netdev_notifier_bonding_info {
3549 struct netdev_notifier_info info; /* must be first */
3550 struct netdev_bonding_info bonding_info;
3551 };
3552
3553 void netdev_bonding_info_change(struct net_device *dev,
3554 struct netdev_bonding_info *bonding_info);
3555
3556 static inline
3557 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
3558 {
3559 return __skb_gso_segment(skb, features, true);
3560 }
3561 __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
3562
3563 static inline bool can_checksum_protocol(netdev_features_t features,
3564 __be16 protocol)
3565 {
3566 return ((features & NETIF_F_GEN_CSUM) ||
3567 ((features & NETIF_F_V4_CSUM) &&
3568 protocol == htons(ETH_P_IP)) ||
3569 ((features & NETIF_F_V6_CSUM) &&
3570 protocol == htons(ETH_P_IPV6)) ||
3571 ((features & NETIF_F_FCOE_CRC) &&
3572 protocol == htons(ETH_P_FCOE)));
3573 }
3574
3575 #ifdef CONFIG_BUG
3576 void netdev_rx_csum_fault(struct net_device *dev);
3577 #else
3578 static inline void netdev_rx_csum_fault(struct net_device *dev)
3579 {
3580 }
3581 #endif
3582 /* rx skb timestamps */
3583 void net_enable_timestamp(void);
3584 void net_disable_timestamp(void);
3585
3586 #ifdef CONFIG_PROC_FS
3587 int __init dev_proc_init(void);
3588 #else
3589 #define dev_proc_init() 0
3590 #endif
3591
3592 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
3593 struct sk_buff *skb, struct net_device *dev,
3594 bool more)
3595 {
3596 skb->xmit_more = more ? 1 : 0;
3597 return ops->ndo_start_xmit(skb, dev);
3598 }
3599
3600 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
3601 struct netdev_queue *txq, bool more)
3602 {
3603 const struct net_device_ops *ops = dev->netdev_ops;
3604 int rc;
3605
3606 rc = __netdev_start_xmit(ops, skb, dev, more);
3607 if (rc == NETDEV_TX_OK)
3608 txq_trans_update(txq);
3609
3610 return rc;
3611 }
3612
3613 int netdev_class_create_file_ns(struct class_attribute *class_attr,
3614 const void *ns);
3615 void netdev_class_remove_file_ns(struct class_attribute *class_attr,
3616 const void *ns);
3617
3618 static inline int netdev_class_create_file(struct class_attribute *class_attr)
3619 {
3620 return netdev_class_create_file_ns(class_attr, NULL);
3621 }
3622
3623 static inline void netdev_class_remove_file(struct class_attribute *class_attr)
3624 {
3625 netdev_class_remove_file_ns(class_attr, NULL);
3626 }
3627
3628 extern struct kobj_ns_type_operations net_ns_type_operations;
3629
3630 const char *netdev_drivername(const struct net_device *dev);
3631
3632 void linkwatch_run_queue(void);
3633
3634 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
3635 netdev_features_t f2)
3636 {
3637 if (f1 & NETIF_F_GEN_CSUM)
3638 f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
3639 if (f2 & NETIF_F_GEN_CSUM)
3640 f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
3641 f1 &= f2;
3642 if (f1 & NETIF_F_GEN_CSUM)
3643 f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
3644
3645 return f1;
3646 }
3647
3648 static inline netdev_features_t netdev_get_wanted_features(
3649 struct net_device *dev)
3650 {
3651 return (dev->features & ~dev->hw_features) | dev->wanted_features;
3652 }
3653 netdev_features_t netdev_increment_features(netdev_features_t all,
3654 netdev_features_t one, netdev_features_t mask);
3655
3656 /* Allow TSO being used on stacked device :
3657 * Performing the GSO segmentation before last device
3658 * is a performance improvement.
3659 */
3660 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
3661 netdev_features_t mask)
3662 {
3663 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
3664 }
3665
3666 int __netdev_update_features(struct net_device *dev);
3667 void netdev_update_features(struct net_device *dev);
3668 void netdev_change_features(struct net_device *dev);
3669
3670 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
3671 struct net_device *dev);
3672
3673 netdev_features_t netif_skb_features(struct sk_buff *skb);
3674
3675 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
3676 {
3677 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
3678
3679 /* check flags correspondence */
3680 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
3681 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
3682 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
3683 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
3684 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
3685 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
3686 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
3687 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
3688 BUILD_BUG_ON(SKB_GSO_IPIP != (NETIF_F_GSO_IPIP >> NETIF_F_GSO_SHIFT));
3689 BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT));
3690 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
3691 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
3692 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
3693
3694 return (features & feature) == feature;
3695 }
3696
3697 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
3698 {
3699 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
3700 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
3701 }
3702
3703 static inline bool netif_needs_gso(struct net_device *dev, struct sk_buff *skb,
3704 netdev_features_t features)
3705 {
3706 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
3707 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
3708 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
3709 }
3710
3711 static inline void netif_set_gso_max_size(struct net_device *dev,
3712 unsigned int size)
3713 {
3714 dev->gso_max_size = size;
3715 }
3716
3717 static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
3718 int pulled_hlen, u16 mac_offset,
3719 int mac_len)
3720 {
3721 skb->protocol = protocol;
3722 skb->encapsulation = 1;
3723 skb_push(skb, pulled_hlen);
3724 skb_reset_transport_header(skb);
3725 skb->mac_header = mac_offset;
3726 skb->network_header = skb->mac_header + mac_len;
3727 skb->mac_len = mac_len;
3728 }
3729
3730 static inline bool netif_is_macvlan(struct net_device *dev)
3731 {
3732 return dev->priv_flags & IFF_MACVLAN;
3733 }
3734
3735 static inline bool netif_is_macvlan_port(struct net_device *dev)
3736 {
3737 return dev->priv_flags & IFF_MACVLAN_PORT;
3738 }
3739
3740 static inline bool netif_is_ipvlan(struct net_device *dev)
3741 {
3742 return dev->priv_flags & IFF_IPVLAN_SLAVE;
3743 }
3744
3745 static inline bool netif_is_ipvlan_port(struct net_device *dev)
3746 {
3747 return dev->priv_flags & IFF_IPVLAN_MASTER;
3748 }
3749
3750 static inline bool netif_is_bond_master(struct net_device *dev)
3751 {
3752 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
3753 }
3754
3755 static inline bool netif_is_bond_slave(struct net_device *dev)
3756 {
3757 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
3758 }
3759
3760 static inline bool netif_supports_nofcs(struct net_device *dev)
3761 {
3762 return dev->priv_flags & IFF_SUPP_NOFCS;
3763 }
3764
3765 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
3766 static inline void netif_keep_dst(struct net_device *dev)
3767 {
3768 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
3769 }
3770
3771 extern struct pernet_operations __net_initdata loopback_net_ops;
3772
3773 /* Logging, debugging and troubleshooting/diagnostic helpers. */
3774
3775 /* netdev_printk helpers, similar to dev_printk */
3776
3777 static inline const char *netdev_name(const struct net_device *dev)
3778 {
3779 if (!dev->name[0] || strchr(dev->name, '%'))
3780 return "(unnamed net_device)";
3781 return dev->name;
3782 }
3783
3784 static inline const char *netdev_reg_state(const struct net_device *dev)
3785 {
3786 switch (dev->reg_state) {
3787 case NETREG_UNINITIALIZED: return " (uninitialized)";
3788 case NETREG_REGISTERED: return "";
3789 case NETREG_UNREGISTERING: return " (unregistering)";
3790 case NETREG_UNREGISTERED: return " (unregistered)";
3791 case NETREG_RELEASED: return " (released)";
3792 case NETREG_DUMMY: return " (dummy)";
3793 }
3794
3795 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
3796 return " (unknown)";
3797 }
3798
3799 __printf(3, 4)
3800 void netdev_printk(const char *level, const struct net_device *dev,
3801 const char *format, ...);
3802 __printf(2, 3)
3803 void netdev_emerg(const struct net_device *dev, const char *format, ...);
3804 __printf(2, 3)
3805 void netdev_alert(const struct net_device *dev, const char *format, ...);
3806 __printf(2, 3)
3807 void netdev_crit(const struct net_device *dev, const char *format, ...);
3808 __printf(2, 3)
3809 void netdev_err(const struct net_device *dev, const char *format, ...);
3810 __printf(2, 3)
3811 void netdev_warn(const struct net_device *dev, const char *format, ...);
3812 __printf(2, 3)
3813 void netdev_notice(const struct net_device *dev, const char *format, ...);
3814 __printf(2, 3)
3815 void netdev_info(const struct net_device *dev, const char *format, ...);
3816
3817 #define MODULE_ALIAS_NETDEV(device) \
3818 MODULE_ALIAS("netdev-" device)
3819
3820 #if defined(CONFIG_DYNAMIC_DEBUG)
3821 #define netdev_dbg(__dev, format, args...) \
3822 do { \
3823 dynamic_netdev_dbg(__dev, format, ##args); \
3824 } while (0)
3825 #elif defined(DEBUG)
3826 #define netdev_dbg(__dev, format, args...) \
3827 netdev_printk(KERN_DEBUG, __dev, format, ##args)
3828 #else
3829 #define netdev_dbg(__dev, format, args...) \
3830 ({ \
3831 if (0) \
3832 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
3833 })
3834 #endif
3835
3836 #if defined(VERBOSE_DEBUG)
3837 #define netdev_vdbg netdev_dbg
3838 #else
3839
3840 #define netdev_vdbg(dev, format, args...) \
3841 ({ \
3842 if (0) \
3843 netdev_printk(KERN_DEBUG, dev, format, ##args); \
3844 0; \
3845 })
3846 #endif
3847
3848 /*
3849 * netdev_WARN() acts like dev_printk(), but with the key difference
3850 * of using a WARN/WARN_ON to get the message out, including the
3851 * file/line information and a backtrace.
3852 */
3853 #define netdev_WARN(dev, format, args...) \
3854 WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \
3855 netdev_reg_state(dev), ##args)
3856
3857 /* netif printk helpers, similar to netdev_printk */
3858
3859 #define netif_printk(priv, type, level, dev, fmt, args...) \
3860 do { \
3861 if (netif_msg_##type(priv)) \
3862 netdev_printk(level, (dev), fmt, ##args); \
3863 } while (0)
3864
3865 #define netif_level(level, priv, type, dev, fmt, args...) \
3866 do { \
3867 if (netif_msg_##type(priv)) \
3868 netdev_##level(dev, fmt, ##args); \
3869 } while (0)
3870
3871 #define netif_emerg(priv, type, dev, fmt, args...) \
3872 netif_level(emerg, priv, type, dev, fmt, ##args)
3873 #define netif_alert(priv, type, dev, fmt, args...) \
3874 netif_level(alert, priv, type, dev, fmt, ##args)
3875 #define netif_crit(priv, type, dev, fmt, args...) \
3876 netif_level(crit, priv, type, dev, fmt, ##args)
3877 #define netif_err(priv, type, dev, fmt, args...) \
3878 netif_level(err, priv, type, dev, fmt, ##args)
3879 #define netif_warn(priv, type, dev, fmt, args...) \
3880 netif_level(warn, priv, type, dev, fmt, ##args)
3881 #define netif_notice(priv, type, dev, fmt, args...) \
3882 netif_level(notice, priv, type, dev, fmt, ##args)
3883 #define netif_info(priv, type, dev, fmt, args...) \
3884 netif_level(info, priv, type, dev, fmt, ##args)
3885
3886 #if defined(CONFIG_DYNAMIC_DEBUG)
3887 #define netif_dbg(priv, type, netdev, format, args...) \
3888 do { \
3889 if (netif_msg_##type(priv)) \
3890 dynamic_netdev_dbg(netdev, format, ##args); \
3891 } while (0)
3892 #elif defined(DEBUG)
3893 #define netif_dbg(priv, type, dev, format, args...) \
3894 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
3895 #else
3896 #define netif_dbg(priv, type, dev, format, args...) \
3897 ({ \
3898 if (0) \
3899 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
3900 0; \
3901 })
3902 #endif
3903
3904 #if defined(VERBOSE_DEBUG)
3905 #define netif_vdbg netif_dbg
3906 #else
3907 #define netif_vdbg(priv, type, dev, format, args...) \
3908 ({ \
3909 if (0) \
3910 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
3911 0; \
3912 })
3913 #endif
3914
3915 /*
3916 * The list of packet types we will receive (as opposed to discard)
3917 * and the routines to invoke.
3918 *
3919 * Why 16. Because with 16 the only overlap we get on a hash of the
3920 * low nibble of the protocol value is RARP/SNAP/X.25.
3921 *
3922 * NOTE: That is no longer true with the addition of VLAN tags. Not
3923 * sure which should go first, but I bet it won't make much
3924 * difference if we are running VLANs. The good news is that
3925 * this protocol won't be in the list unless compiled in, so
3926 * the average user (w/out VLANs) will not be adversely affected.
3927 * --BLG
3928 *
3929 * 0800 IP
3930 * 8100 802.1Q VLAN
3931 * 0001 802.3
3932 * 0002 AX.25
3933 * 0004 802.2
3934 * 8035 RARP
3935 * 0005 SNAP
3936 * 0805 X.25
3937 * 0806 ARP
3938 * 8137 IPX
3939 * 0009 Localtalk
3940 * 86DD IPv6
3941 */
3942 #define PTYPE_HASH_SIZE (16)
3943 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
3944
3945 #endif /* _LINUX_NETDEVICE_H */
This page took 0.135756 seconds and 6 git commands to generate.