2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the Interfaces handler.
8 * Version: @(#)dev.h 1.0.10 08/12/93
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
23 * Moved to /usr/include/linux for NET3
25 #ifndef _LINUX_NETDEVICE_H
26 #define _LINUX_NETDEVICE_H
29 #include <linux/if_ether.h>
30 #include <linux/if_packet.h>
31 #include <linux/if_link.h>
34 #include <linux/pm_qos.h>
35 #include <linux/timer.h>
36 #include <linux/bug.h>
37 #include <linux/delay.h>
38 #include <linux/atomic.h>
39 #include <asm/cache.h>
40 #include <asm/byteorder.h>
42 #include <linux/percpu.h>
43 #include <linux/rculist.h>
44 #include <linux/dmaengine.h>
45 #include <linux/workqueue.h>
46 #include <linux/dynamic_queue_limits.h>
48 #include <linux/ethtool.h>
49 #include <net/net_namespace.h>
52 #include <net/dcbnl.h>
54 #include <net/netprio_cgroup.h>
56 #include <linux/netdev_features.h>
57 #include <linux/neighbour.h>
64 /* source back-compat hooks */
65 #define SET_ETHTOOL_OPS(netdev,ops) \
66 ( (netdev)->ethtool_ops = (ops) )
68 /* hardware address assignment types */
69 #define NET_ADDR_PERM 0 /* address is permanent (default) */
70 #define NET_ADDR_RANDOM 1 /* address is generated randomly */
71 #define NET_ADDR_STOLEN 2 /* address is stolen from other device */
73 /* Backlog congestion levels */
74 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
75 #define NET_RX_DROP 1 /* packet dropped */
78 * Transmit return codes: transmit return codes originate from three different
81 * - qdisc return codes
82 * - driver transmit return codes
85 * Drivers are allowed to return any one of those in their hard_start_xmit()
86 * function. Real network devices commonly used with qdiscs should only return
87 * the driver transmit return codes though - when qdiscs are used, the actual
88 * transmission happens asynchronously, so the value is not propagated to
89 * higher layers. Virtual network devices transmit synchronously, in this case
90 * the driver transmit return codes are consumed by dev_queue_xmit(), all
91 * others are propagated to higher layers.
94 /* qdisc ->enqueue() return codes. */
95 #define NET_XMIT_SUCCESS 0x00
96 #define NET_XMIT_DROP 0x01 /* skb dropped */
97 #define NET_XMIT_CN 0x02 /* congestion notification */
98 #define NET_XMIT_POLICED 0x03 /* skb is shot by police */
99 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
101 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
102 * indicates that the device will soon be dropping packets, or already drops
103 * some packets of the same priority; prompting us to send less aggressively. */
104 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
105 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
107 /* Driver transmit return codes */
108 #define NETDEV_TX_MASK 0xf0
111 __NETDEV_TX_MIN
= INT_MIN
, /* make sure enum is signed */
112 NETDEV_TX_OK
= 0x00, /* driver took care of packet */
113 NETDEV_TX_BUSY
= 0x10, /* driver tx path was busy*/
114 NETDEV_TX_LOCKED
= 0x20, /* driver tx lock was already taken */
116 typedef enum netdev_tx netdev_tx_t
;
119 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
120 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
122 static inline bool dev_xmit_complete(int rc
)
125 * Positive cases with an skb consumed by a driver:
126 * - successful transmission (rc == NETDEV_TX_OK)
127 * - error while transmitting (rc < 0)
128 * - error while queueing to a different device (rc & NET_XMIT_MASK)
130 if (likely(rc
< NET_XMIT_MASK
))
138 #define MAX_ADDR_LEN 32 /* Largest hardware address length */
140 /* Initial net device group. All devices belong to group 0 by default. */
141 #define INIT_NETDEV_GROUP 0
145 * Compute the worst case header length according to the protocols
149 #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
150 # if defined(CONFIG_MAC80211_MESH)
151 # define LL_MAX_HEADER 128
153 # define LL_MAX_HEADER 96
155 #elif IS_ENABLED(CONFIG_TR)
156 # define LL_MAX_HEADER 48
158 # define LL_MAX_HEADER 32
161 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
162 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
163 #define MAX_HEADER LL_MAX_HEADER
165 #define MAX_HEADER (LL_MAX_HEADER + 48)
169 * Old network device statistics. Fields are native words
170 * (unsigned long) so they can be read and written atomically.
173 struct net_device_stats
{
174 unsigned long rx_packets
;
175 unsigned long tx_packets
;
176 unsigned long rx_bytes
;
177 unsigned long tx_bytes
;
178 unsigned long rx_errors
;
179 unsigned long tx_errors
;
180 unsigned long rx_dropped
;
181 unsigned long tx_dropped
;
182 unsigned long multicast
;
183 unsigned long collisions
;
184 unsigned long rx_length_errors
;
185 unsigned long rx_over_errors
;
186 unsigned long rx_crc_errors
;
187 unsigned long rx_frame_errors
;
188 unsigned long rx_fifo_errors
;
189 unsigned long rx_missed_errors
;
190 unsigned long tx_aborted_errors
;
191 unsigned long tx_carrier_errors
;
192 unsigned long tx_fifo_errors
;
193 unsigned long tx_heartbeat_errors
;
194 unsigned long tx_window_errors
;
195 unsigned long rx_compressed
;
196 unsigned long tx_compressed
;
199 #endif /* __KERNEL__ */
202 /* Media selection options. */
215 #include <linux/cache.h>
216 #include <linux/skbuff.h>
219 #include <linux/static_key.h>
220 extern struct static_key rps_needed
;
227 struct netdev_hw_addr
{
228 struct list_head list
;
229 unsigned char addr
[MAX_ADDR_LEN
];
231 #define NETDEV_HW_ADDR_T_LAN 1
232 #define NETDEV_HW_ADDR_T_SAN 2
233 #define NETDEV_HW_ADDR_T_SLAVE 3
234 #define NETDEV_HW_ADDR_T_UNICAST 4
235 #define NETDEV_HW_ADDR_T_MULTICAST 5
239 struct rcu_head rcu_head
;
242 struct netdev_hw_addr_list
{
243 struct list_head list
;
247 #define netdev_hw_addr_list_count(l) ((l)->count)
248 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
249 #define netdev_hw_addr_list_for_each(ha, l) \
250 list_for_each_entry(ha, &(l)->list, list)
252 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
253 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
254 #define netdev_for_each_uc_addr(ha, dev) \
255 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
257 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
258 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
259 #define netdev_for_each_mc_addr(ha, dev) \
260 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
267 /* cached hardware header; allow for machine alignment needs. */
268 #define HH_DATA_MOD 16
269 #define HH_DATA_OFF(__len) \
270 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
271 #define HH_DATA_ALIGN(__len) \
272 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
273 unsigned long hh_data
[HH_DATA_ALIGN(LL_MAX_HEADER
) / sizeof(long)];
276 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
278 * dev->hard_header_len ? (dev->hard_header_len +
279 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
281 * We could use other alignment values, but we must maintain the
282 * relationship HH alignment <= LL alignment.
284 #define LL_RESERVED_SPACE(dev) \
285 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
286 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
287 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
290 int (*create
) (struct sk_buff
*skb
, struct net_device
*dev
,
291 unsigned short type
, const void *daddr
,
292 const void *saddr
, unsigned int len
);
293 int (*parse
)(const struct sk_buff
*skb
, unsigned char *haddr
);
294 int (*rebuild
)(struct sk_buff
*skb
);
295 int (*cache
)(const struct neighbour
*neigh
, struct hh_cache
*hh
, __be16 type
);
296 void (*cache_update
)(struct hh_cache
*hh
,
297 const struct net_device
*dev
,
298 const unsigned char *haddr
);
301 /* These flag bits are private to the generic network queueing
302 * layer, they may not be explicitly referenced by any other
306 enum netdev_state_t
{
308 __LINK_STATE_PRESENT
,
309 __LINK_STATE_NOCARRIER
,
310 __LINK_STATE_LINKWATCH_PENDING
,
311 __LINK_STATE_DORMANT
,
316 * This structure holds at boot time configured netdevice settings. They
317 * are then used in the device probing.
319 struct netdev_boot_setup
{
323 #define NETDEV_BOOT_SETUP_MAX 8
325 extern int __init
netdev_boot_setup(char *str
);
328 * Structure for NAPI scheduling similar to tasklet but with weighting
331 /* The poll_list must only be managed by the entity which
332 * changes the state of the NAPI_STATE_SCHED bit. This means
333 * whoever atomically sets that bit can add this napi_struct
334 * to the per-cpu poll_list, and whoever clears that bit
335 * can remove from the list right before clearing the bit.
337 struct list_head poll_list
;
341 int (*poll
)(struct napi_struct
*, int);
342 #ifdef CONFIG_NETPOLL
343 spinlock_t poll_lock
;
347 unsigned int gro_count
;
349 struct net_device
*dev
;
350 struct list_head dev_list
;
351 struct sk_buff
*gro_list
;
356 NAPI_STATE_SCHED
, /* Poll is scheduled */
357 NAPI_STATE_DISABLE
, /* Disable pending */
358 NAPI_STATE_NPSVC
, /* Netpoll - don't dequeue from poll_list */
368 typedef enum gro_result gro_result_t
;
371 * enum rx_handler_result - Possible return values for rx_handlers.
372 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
374 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
375 * case skb->dev was changed by rx_handler.
376 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
377 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
379 * rx_handlers are functions called from inside __netif_receive_skb(), to do
380 * special processing of the skb, prior to delivery to protocol handlers.
382 * Currently, a net_device can only have a single rx_handler registered. Trying
383 * to register a second rx_handler will return -EBUSY.
385 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
386 * To unregister a rx_handler on a net_device, use
387 * netdev_rx_handler_unregister().
389 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
392 * If the rx_handler consumed to skb in some way, it should return
393 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
394 * the skb to be delivered in some other ways.
396 * If the rx_handler changed skb->dev, to divert the skb to another
397 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
398 * new device will be called if it exists.
400 * If the rx_handler consider the skb should be ignored, it should return
401 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
402 * are registred on exact device (ptype->dev == skb->dev).
404 * If the rx_handler didn't changed skb->dev, but want the skb to be normally
405 * delivered, it should return RX_HANDLER_PASS.
407 * A device without a registered rx_handler will behave as if rx_handler
408 * returned RX_HANDLER_PASS.
411 enum rx_handler_result
{
417 typedef enum rx_handler_result rx_handler_result_t
;
418 typedef rx_handler_result_t
rx_handler_func_t(struct sk_buff
**pskb
);
420 extern void __napi_schedule(struct napi_struct
*n
);
422 static inline bool napi_disable_pending(struct napi_struct
*n
)
424 return test_bit(NAPI_STATE_DISABLE
, &n
->state
);
428 * napi_schedule_prep - check if napi can be scheduled
431 * Test if NAPI routine is already running, and if not mark
432 * it as running. This is used as a condition variable
433 * insure only one NAPI poll instance runs. We also make
434 * sure there is no pending NAPI disable.
436 static inline bool napi_schedule_prep(struct napi_struct
*n
)
438 return !napi_disable_pending(n
) &&
439 !test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
);
443 * napi_schedule - schedule NAPI poll
446 * Schedule NAPI poll routine to be called if it is not already
449 static inline void napi_schedule(struct napi_struct
*n
)
451 if (napi_schedule_prep(n
))
455 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
456 static inline bool napi_reschedule(struct napi_struct
*napi
)
458 if (napi_schedule_prep(napi
)) {
459 __napi_schedule(napi
);
466 * napi_complete - NAPI processing complete
469 * Mark NAPI processing as complete.
471 extern void __napi_complete(struct napi_struct
*n
);
472 extern void napi_complete(struct napi_struct
*n
);
475 * napi_disable - prevent NAPI from scheduling
478 * Stop NAPI from being scheduled on this context.
479 * Waits till any outstanding processing completes.
481 static inline void napi_disable(struct napi_struct
*n
)
483 set_bit(NAPI_STATE_DISABLE
, &n
->state
);
484 while (test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
))
486 clear_bit(NAPI_STATE_DISABLE
, &n
->state
);
490 * napi_enable - enable NAPI scheduling
493 * Resume NAPI from being scheduled on this context.
494 * Must be paired with napi_disable.
496 static inline void napi_enable(struct napi_struct
*n
)
498 BUG_ON(!test_bit(NAPI_STATE_SCHED
, &n
->state
));
499 smp_mb__before_clear_bit();
500 clear_bit(NAPI_STATE_SCHED
, &n
->state
);
505 * napi_synchronize - wait until NAPI is not running
508 * Wait until NAPI is done being scheduled on this context.
509 * Waits till any outstanding processing completes but
510 * does not disable future activations.
512 static inline void napi_synchronize(const struct napi_struct
*n
)
514 while (test_bit(NAPI_STATE_SCHED
, &n
->state
))
518 # define napi_synchronize(n) barrier()
521 enum netdev_queue_state_t
{
522 __QUEUE_STATE_DRV_XOFF
,
523 __QUEUE_STATE_STACK_XOFF
,
524 __QUEUE_STATE_FROZEN
,
525 #define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
526 (1 << __QUEUE_STATE_STACK_XOFF))
527 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
528 (1 << __QUEUE_STATE_FROZEN))
531 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
532 * netif_tx_* functions below are used to manipulate this flag. The
533 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
534 * queue independently. The netif_xmit_*stopped functions below are called
535 * to check if the queue has been stopped by the driver or stack (either
536 * of the XOFF bits are set in the state). Drivers should not need to call
537 * netif_xmit*stopped functions, they should only be using netif_tx_*.
540 struct netdev_queue
{
544 struct net_device
*dev
;
546 struct Qdisc
*qdisc_sleeping
;
550 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
556 spinlock_t _xmit_lock ____cacheline_aligned_in_smp
;
559 * please use this field instead of dev->trans_start
561 unsigned long trans_start
;
564 * Number of TX timeouts for this queue
565 * (/sys/class/net/DEV/Q/trans_timeout)
567 unsigned long trans_timeout
;
574 } ____cacheline_aligned_in_smp
;
576 static inline int netdev_queue_numa_node_read(const struct netdev_queue
*q
)
578 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
585 static inline void netdev_queue_numa_node_write(struct netdev_queue
*q
, int node
)
587 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
594 * This structure holds an RPS map which can be of variable length. The
595 * map is an array of CPUs.
602 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
605 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
606 * tail pointer for that CPU's input queue at the time of last enqueue, and
607 * a hardware filter index.
609 struct rps_dev_flow
{
612 unsigned int last_qtail
;
614 #define RPS_NO_FILTER 0xffff
617 * The rps_dev_flow_table structure contains a table of flow mappings.
619 struct rps_dev_flow_table
{
622 struct work_struct free_work
;
623 struct rps_dev_flow flows
[0];
625 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
626 ((_num) * sizeof(struct rps_dev_flow)))
629 * The rps_sock_flow_table contains mappings of flows to the last CPU
630 * on which they were processed by the application (set in recvmsg).
632 struct rps_sock_flow_table
{
636 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
637 ((_num) * sizeof(u16)))
639 #define RPS_NO_CPU 0xffff
641 static inline void rps_record_sock_flow(struct rps_sock_flow_table
*table
,
645 unsigned int cpu
, index
= hash
& table
->mask
;
647 /* We only give a hint, preemption can change cpu under us */
648 cpu
= raw_smp_processor_id();
650 if (table
->ents
[index
] != cpu
)
651 table
->ents
[index
] = cpu
;
655 static inline void rps_reset_sock_flow(struct rps_sock_flow_table
*table
,
659 table
->ents
[hash
& table
->mask
] = RPS_NO_CPU
;
662 extern struct rps_sock_flow_table __rcu
*rps_sock_flow_table
;
664 #ifdef CONFIG_RFS_ACCEL
665 extern bool rps_may_expire_flow(struct net_device
*dev
, u16 rxq_index
,
666 u32 flow_id
, u16 filter_id
);
669 /* This structure contains an instance of an RX queue. */
670 struct netdev_rx_queue
{
671 struct rps_map __rcu
*rps_map
;
672 struct rps_dev_flow_table __rcu
*rps_flow_table
;
674 struct net_device
*dev
;
675 } ____cacheline_aligned_in_smp
;
676 #endif /* CONFIG_RPS */
680 * This structure holds an XPS map which can be of variable length. The
681 * map is an array of queues.
685 unsigned int alloc_len
;
689 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
690 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
694 * This structure holds all XPS maps for device. Maps are indexed by CPU.
696 struct xps_dev_maps
{
698 struct xps_map __rcu
*cpu_map
[0];
700 #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
701 (nr_cpu_ids * sizeof(struct xps_map *)))
702 #endif /* CONFIG_XPS */
704 #define TC_MAX_QUEUE 16
705 #define TC_BITMASK 15
706 /* HW offloaded queuing disciplines txq count and offset maps */
707 struct netdev_tc_txq
{
712 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
714 * This structure is to hold information about the device
715 * configured to run FCoE protocol stack.
717 struct netdev_fcoe_hbainfo
{
718 char manufacturer
[64];
719 char serial_number
[64];
720 char hardware_version
[64];
721 char driver_version
[64];
722 char optionrom_version
[64];
723 char firmware_version
[64];
725 char model_description
[256];
730 * This structure defines the management hooks for network devices.
731 * The following hooks can be defined; unless noted otherwise, they are
732 * optional and can be filled with a null pointer.
734 * int (*ndo_init)(struct net_device *dev);
735 * This function is called once when network device is registered.
736 * The network device can use this to any late stage initializaton
737 * or semantic validattion. It can fail with an error code which will
738 * be propogated back to register_netdev
740 * void (*ndo_uninit)(struct net_device *dev);
741 * This function is called when device is unregistered or when registration
742 * fails. It is not called if init fails.
744 * int (*ndo_open)(struct net_device *dev);
745 * This function is called when network device transistions to the up
748 * int (*ndo_stop)(struct net_device *dev);
749 * This function is called when network device transistions to the down
752 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
753 * struct net_device *dev);
754 * Called when a packet needs to be transmitted.
755 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
756 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
757 * Required can not be NULL.
759 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
760 * Called to decide which queue to when device supports multiple
763 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
764 * This function is called to allow device receiver to make
765 * changes to configuration when multicast or promiscious is enabled.
767 * void (*ndo_set_rx_mode)(struct net_device *dev);
768 * This function is called device changes address list filtering.
769 * If driver handles unicast address filtering, it should set
770 * IFF_UNICAST_FLT to its priv_flags.
772 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
773 * This function is called when the Media Access Control address
774 * needs to be changed. If this interface is not defined, the
775 * mac address can not be changed.
777 * int (*ndo_validate_addr)(struct net_device *dev);
778 * Test if Media Access Control address is valid for the device.
780 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
781 * Called when a user request an ioctl which can't be handled by
782 * the generic interface code. If not defined ioctl's return
783 * not supported error code.
785 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
786 * Used to set network devices bus interface parameters. This interface
787 * is retained for legacy reason, new devices should use the bus
788 * interface (PCI) for low level management.
790 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
791 * Called when a user wants to change the Maximum Transfer Unit
792 * of a device. If not defined, any request to change MTU will
793 * will return an error.
795 * void (*ndo_tx_timeout)(struct net_device *dev);
796 * Callback uses when the transmitter has not made any progress
797 * for dev->watchdog ticks.
799 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
800 * struct rtnl_link_stats64 *storage);
801 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
802 * Called when a user wants to get the network device usage
803 * statistics. Drivers must do one of the following:
804 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
805 * rtnl_link_stats64 structure passed by the caller.
806 * 2. Define @ndo_get_stats to update a net_device_stats structure
807 * (which should normally be dev->stats) and return a pointer to
808 * it. The structure may be changed asynchronously only if each
809 * field is written atomically.
810 * 3. Update dev->stats asynchronously and atomically, and define
813 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
814 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
815 * this function is called when a VLAN id is registered.
817 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
818 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
819 * this function is called when a VLAN id is unregistered.
821 * void (*ndo_poll_controller)(struct net_device *dev);
823 * SR-IOV management functions.
824 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
825 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
826 * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
827 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
828 * int (*ndo_get_vf_config)(struct net_device *dev,
829 * int vf, struct ifla_vf_info *ivf);
830 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
831 * struct nlattr *port[]);
832 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
833 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
834 * Called to setup 'tc' number of traffic classes in the net device. This
835 * is always called from the stack with the rtnl lock held and netif tx
836 * queues stopped. This allows the netdevice to perform queue management
839 * Fiber Channel over Ethernet (FCoE) offload functions.
840 * int (*ndo_fcoe_enable)(struct net_device *dev);
841 * Called when the FCoE protocol stack wants to start using LLD for FCoE
842 * so the underlying device can perform whatever needed configuration or
843 * initialization to support acceleration of FCoE traffic.
845 * int (*ndo_fcoe_disable)(struct net_device *dev);
846 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
847 * so the underlying device can perform whatever needed clean-ups to
848 * stop supporting acceleration of FCoE traffic.
850 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
851 * struct scatterlist *sgl, unsigned int sgc);
852 * Called when the FCoE Initiator wants to initialize an I/O that
853 * is a possible candidate for Direct Data Placement (DDP). The LLD can
854 * perform necessary setup and returns 1 to indicate the device is set up
855 * successfully to perform DDP on this I/O, otherwise this returns 0.
857 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
858 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
859 * indicated by the FC exchange id 'xid', so the underlying device can
860 * clean up and reuse resources for later DDP requests.
862 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
863 * struct scatterlist *sgl, unsigned int sgc);
864 * Called when the FCoE Target wants to initialize an I/O that
865 * is a possible candidate for Direct Data Placement (DDP). The LLD can
866 * perform necessary setup and returns 1 to indicate the device is set up
867 * successfully to perform DDP on this I/O, otherwise this returns 0.
869 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
870 * struct netdev_fcoe_hbainfo *hbainfo);
871 * Called when the FCoE Protocol stack wants information on the underlying
872 * device. This information is utilized by the FCoE protocol stack to
873 * register attributes with Fiber Channel management service as per the
874 * FC-GS Fabric Device Management Information(FDMI) specification.
876 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
877 * Called when the underlying device wants to override default World Wide
878 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
879 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
880 * protocol stack to use.
883 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
884 * u16 rxq_index, u32 flow_id);
885 * Set hardware filter for RFS. rxq_index is the target queue index;
886 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
887 * Return the filter ID on success, or a negative error code.
889 * Slave management functions (for bridge, bonding, etc). User should
890 * call netdev_set_master() to set dev->master properly.
891 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
892 * Called to make another netdev an underling.
894 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
895 * Called to release previously enslaved netdev.
897 * Feature/offload setting functions.
898 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
899 * netdev_features_t features);
900 * Adjusts the requested feature flags according to device-specific
901 * constraints, and returns the resulting flags. Must not modify
904 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
905 * Called to update device configuration to new features. Passed
906 * feature set might be less than what was returned by ndo_fix_features()).
907 * Must return >0 or -errno if it changed dev->features itself.
909 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct net_device *dev,
910 * unsigned char *addr, u16 flags)
911 * Adds an FDB entry to dev for addr.
912 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct net_device *dev,
913 * unsigned char *addr)
914 * Deletes the FDB entry from dev coresponding to addr.
915 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
916 * struct net_device *dev, int idx)
917 * Used to add FDB entries to dump requests. Implementers should add
918 * entries to skb and update idx with the number of entries.
920 struct net_device_ops
{
921 int (*ndo_init
)(struct net_device
*dev
);
922 void (*ndo_uninit
)(struct net_device
*dev
);
923 int (*ndo_open
)(struct net_device
*dev
);
924 int (*ndo_stop
)(struct net_device
*dev
);
925 netdev_tx_t (*ndo_start_xmit
) (struct sk_buff
*skb
,
926 struct net_device
*dev
);
927 u16 (*ndo_select_queue
)(struct net_device
*dev
,
928 struct sk_buff
*skb
);
929 void (*ndo_change_rx_flags
)(struct net_device
*dev
,
931 void (*ndo_set_rx_mode
)(struct net_device
*dev
);
932 int (*ndo_set_mac_address
)(struct net_device
*dev
,
934 int (*ndo_validate_addr
)(struct net_device
*dev
);
935 int (*ndo_do_ioctl
)(struct net_device
*dev
,
936 struct ifreq
*ifr
, int cmd
);
937 int (*ndo_set_config
)(struct net_device
*dev
,
939 int (*ndo_change_mtu
)(struct net_device
*dev
,
941 int (*ndo_neigh_setup
)(struct net_device
*dev
,
942 struct neigh_parms
*);
943 void (*ndo_tx_timeout
) (struct net_device
*dev
);
945 struct rtnl_link_stats64
* (*ndo_get_stats64
)(struct net_device
*dev
,
946 struct rtnl_link_stats64
*storage
);
947 struct net_device_stats
* (*ndo_get_stats
)(struct net_device
*dev
);
949 int (*ndo_vlan_rx_add_vid
)(struct net_device
*dev
,
951 int (*ndo_vlan_rx_kill_vid
)(struct net_device
*dev
,
953 #ifdef CONFIG_NET_POLL_CONTROLLER
954 void (*ndo_poll_controller
)(struct net_device
*dev
);
955 int (*ndo_netpoll_setup
)(struct net_device
*dev
,
956 struct netpoll_info
*info
);
957 void (*ndo_netpoll_cleanup
)(struct net_device
*dev
);
959 int (*ndo_set_vf_mac
)(struct net_device
*dev
,
961 int (*ndo_set_vf_vlan
)(struct net_device
*dev
,
962 int queue
, u16 vlan
, u8 qos
);
963 int (*ndo_set_vf_tx_rate
)(struct net_device
*dev
,
965 int (*ndo_set_vf_spoofchk
)(struct net_device
*dev
,
966 int vf
, bool setting
);
967 int (*ndo_get_vf_config
)(struct net_device
*dev
,
969 struct ifla_vf_info
*ivf
);
970 int (*ndo_set_vf_port
)(struct net_device
*dev
,
972 struct nlattr
*port
[]);
973 int (*ndo_get_vf_port
)(struct net_device
*dev
,
974 int vf
, struct sk_buff
*skb
);
975 int (*ndo_setup_tc
)(struct net_device
*dev
, u8 tc
);
976 #if IS_ENABLED(CONFIG_FCOE)
977 int (*ndo_fcoe_enable
)(struct net_device
*dev
);
978 int (*ndo_fcoe_disable
)(struct net_device
*dev
);
979 int (*ndo_fcoe_ddp_setup
)(struct net_device
*dev
,
981 struct scatterlist
*sgl
,
983 int (*ndo_fcoe_ddp_done
)(struct net_device
*dev
,
985 int (*ndo_fcoe_ddp_target
)(struct net_device
*dev
,
987 struct scatterlist
*sgl
,
989 int (*ndo_fcoe_get_hbainfo
)(struct net_device
*dev
,
990 struct netdev_fcoe_hbainfo
*hbainfo
);
993 #if IS_ENABLED(CONFIG_LIBFCOE)
994 #define NETDEV_FCOE_WWNN 0
995 #define NETDEV_FCOE_WWPN 1
996 int (*ndo_fcoe_get_wwn
)(struct net_device
*dev
,
1000 #ifdef CONFIG_RFS_ACCEL
1001 int (*ndo_rx_flow_steer
)(struct net_device
*dev
,
1002 const struct sk_buff
*skb
,
1006 int (*ndo_add_slave
)(struct net_device
*dev
,
1007 struct net_device
*slave_dev
);
1008 int (*ndo_del_slave
)(struct net_device
*dev
,
1009 struct net_device
*slave_dev
);
1010 netdev_features_t (*ndo_fix_features
)(struct net_device
*dev
,
1011 netdev_features_t features
);
1012 int (*ndo_set_features
)(struct net_device
*dev
,
1013 netdev_features_t features
);
1014 int (*ndo_neigh_construct
)(struct neighbour
*n
);
1015 void (*ndo_neigh_destroy
)(struct neighbour
*n
);
1017 int (*ndo_fdb_add
)(struct ndmsg
*ndm
,
1018 struct net_device
*dev
,
1019 unsigned char *addr
,
1021 int (*ndo_fdb_del
)(struct ndmsg
*ndm
,
1022 struct net_device
*dev
,
1023 unsigned char *addr
);
1024 int (*ndo_fdb_dump
)(struct sk_buff
*skb
,
1025 struct netlink_callback
*cb
,
1026 struct net_device
*dev
,
1031 * The DEVICE structure.
1032 * Actually, this whole structure is a big mistake. It mixes I/O
1033 * data with strictly "high-level" data, and it has to know about
1034 * almost every data structure used in the INET module.
1036 * FIXME: cleanup struct net_device such that network protocol info
1043 * This is the first field of the "visible" part of this structure
1044 * (i.e. as seen by users in the "Space.c" file). It is the name
1047 char name
[IFNAMSIZ
];
1049 struct pm_qos_request pm_qos_req
;
1051 /* device name hash chain */
1052 struct hlist_node name_hlist
;
1057 * I/O specific fields
1058 * FIXME: Merge these and struct ifmap into one
1060 unsigned long mem_end
; /* shared mem end */
1061 unsigned long mem_start
; /* shared mem start */
1062 unsigned long base_addr
; /* device I/O address */
1063 unsigned int irq
; /* device IRQ number */
1066 * Some hardware also needs these fields, but they are not
1067 * part of the usual set specified in Space.c.
1070 unsigned long state
;
1072 struct list_head dev_list
;
1073 struct list_head napi_list
;
1074 struct list_head unreg_list
;
1076 /* currently active device features */
1077 netdev_features_t features
;
1078 /* user-changeable features */
1079 netdev_features_t hw_features
;
1080 /* user-requested features */
1081 netdev_features_t wanted_features
;
1082 /* mask of features inheritable by VLAN devices */
1083 netdev_features_t vlan_features
;
1085 /* Interface index. Unique device identifier */
1089 struct net_device_stats stats
;
1090 atomic_long_t rx_dropped
; /* dropped packets by core network
1091 * Do not use this in drivers.
1094 #ifdef CONFIG_WIRELESS_EXT
1095 /* List of functions to handle Wireless Extensions (instead of ioctl).
1096 * See <net/iw_handler.h> for details. Jean II */
1097 const struct iw_handler_def
* wireless_handlers
;
1098 /* Instance data managed by the core of Wireless Extensions. */
1099 struct iw_public_data
* wireless_data
;
1101 /* Management operations */
1102 const struct net_device_ops
*netdev_ops
;
1103 const struct ethtool_ops
*ethtool_ops
;
1105 /* Hardware header description */
1106 const struct header_ops
*header_ops
;
1108 unsigned int flags
; /* interface flags (a la BSD) */
1109 unsigned int priv_flags
; /* Like 'flags' but invisible to userspace.
1110 * See if.h for definitions. */
1111 unsigned short gflags
;
1112 unsigned short padded
; /* How much padding added by alloc_netdev() */
1114 unsigned char operstate
; /* RFC2863 operstate */
1115 unsigned char link_mode
; /* mapping policy to operstate */
1117 unsigned char if_port
; /* Selectable AUI, TP,..*/
1118 unsigned char dma
; /* DMA channel */
1120 unsigned int mtu
; /* interface MTU value */
1121 unsigned short type
; /* interface hardware type */
1122 unsigned short hard_header_len
; /* hardware hdr length */
1124 /* extra head- and tailroom the hardware may need, but not in all cases
1125 * can this be guaranteed, especially tailroom. Some cases also use
1126 * LL_MAX_HEADER instead to allocate the skb.
1128 unsigned short needed_headroom
;
1129 unsigned short needed_tailroom
;
1131 /* Interface address info. */
1132 unsigned char perm_addr
[MAX_ADDR_LEN
]; /* permanent hw address */
1133 unsigned char addr_assign_type
; /* hw address assignment type */
1134 unsigned char addr_len
; /* hardware address length */
1135 unsigned char neigh_priv_len
;
1136 unsigned short dev_id
; /* for shared network cards */
1138 spinlock_t addr_list_lock
;
1139 struct netdev_hw_addr_list uc
; /* Unicast mac addresses */
1140 struct netdev_hw_addr_list mc
; /* Multicast mac addresses */
1142 unsigned int promiscuity
;
1143 unsigned int allmulti
;
1146 /* Protocol specific pointers */
1148 #if IS_ENABLED(CONFIG_VLAN_8021Q)
1149 struct vlan_info __rcu
*vlan_info
; /* VLAN info */
1151 #if IS_ENABLED(CONFIG_NET_DSA)
1152 struct dsa_switch_tree
*dsa_ptr
; /* dsa specific data */
1154 void *atalk_ptr
; /* AppleTalk link */
1155 struct in_device __rcu
*ip_ptr
; /* IPv4 specific data */
1156 struct dn_dev __rcu
*dn_ptr
; /* DECnet specific data */
1157 struct inet6_dev __rcu
*ip6_ptr
; /* IPv6 specific data */
1158 void *ax25_ptr
; /* AX.25 specific data */
1159 struct wireless_dev
*ieee80211_ptr
; /* IEEE 802.11 specific data,
1160 assign before registering */
1163 * Cache lines mostly used on receive path (including eth_type_trans())
1165 unsigned long last_rx
; /* Time of last Rx
1166 * This should not be set in
1167 * drivers, unless really needed,
1168 * because network stack (bonding)
1169 * use it if/when necessary, to
1170 * avoid dirtying this cache line.
1173 struct net_device
*master
; /* Pointer to master device of a group,
1174 * which this device is member of.
1177 /* Interface address info used in eth_type_trans() */
1178 unsigned char *dev_addr
; /* hw address, (before bcast
1179 because most packets are
1182 struct netdev_hw_addr_list dev_addrs
; /* list of device
1185 unsigned char broadcast
[MAX_ADDR_LEN
]; /* hw bcast add */
1188 struct kset
*queues_kset
;
1192 struct netdev_rx_queue
*_rx
;
1194 /* Number of RX queues allocated at register_netdev() time */
1195 unsigned int num_rx_queues
;
1197 /* Number of RX queues currently active in device */
1198 unsigned int real_num_rx_queues
;
1200 #ifdef CONFIG_RFS_ACCEL
1201 /* CPU reverse-mapping for RX completion interrupts, indexed
1202 * by RX queue number. Assigned by driver. This must only be
1203 * set if the ndo_rx_flow_steer operation is defined. */
1204 struct cpu_rmap
*rx_cpu_rmap
;
1208 rx_handler_func_t __rcu
*rx_handler
;
1209 void __rcu
*rx_handler_data
;
1211 struct netdev_queue __rcu
*ingress_queue
;
1214 * Cache lines mostly used on transmit path
1216 struct netdev_queue
*_tx ____cacheline_aligned_in_smp
;
1218 /* Number of TX queues allocated at alloc_netdev_mq() time */
1219 unsigned int num_tx_queues
;
1221 /* Number of TX queues currently active in device */
1222 unsigned int real_num_tx_queues
;
1224 /* root qdisc from userspace point of view */
1225 struct Qdisc
*qdisc
;
1227 unsigned long tx_queue_len
; /* Max frames per queue allowed */
1228 spinlock_t tx_global_lock
;
1231 struct xps_dev_maps __rcu
*xps_maps
;
1234 /* These may be needed for future network-power-down code. */
1237 * trans_start here is expensive for high speed devices on SMP,
1238 * please use netdev_queue->trans_start instead.
1240 unsigned long trans_start
; /* Time (in jiffies) of last Tx */
1242 int watchdog_timeo
; /* used by dev_watchdog() */
1243 struct timer_list watchdog_timer
;
1245 /* Number of references to this device */
1246 int __percpu
*pcpu_refcnt
;
1248 /* delayed register/unregister */
1249 struct list_head todo_list
;
1250 /* device index hash chain */
1251 struct hlist_node index_hlist
;
1253 struct list_head link_watch_list
;
1255 /* register/unregister state machine */
1256 enum { NETREG_UNINITIALIZED
=0,
1257 NETREG_REGISTERED
, /* completed register_netdevice */
1258 NETREG_UNREGISTERING
, /* called unregister_netdevice */
1259 NETREG_UNREGISTERED
, /* completed unregister todo */
1260 NETREG_RELEASED
, /* called free_netdev */
1261 NETREG_DUMMY
, /* dummy device for NAPI poll */
1264 bool dismantle
; /* device is going do be freed */
1267 RTNL_LINK_INITIALIZED
,
1268 RTNL_LINK_INITIALIZING
,
1269 } rtnl_link_state
:16;
1271 /* Called from unregister, can be used to call free_netdev */
1272 void (*destructor
)(struct net_device
*dev
);
1274 #ifdef CONFIG_NETPOLL
1275 struct netpoll_info
*npinfo
;
1278 #ifdef CONFIG_NET_NS
1279 /* Network namespace this network device is inside */
1283 /* mid-layer private */
1286 struct pcpu_lstats __percpu
*lstats
; /* loopback stats */
1287 struct pcpu_tstats __percpu
*tstats
; /* tunnel stats */
1288 struct pcpu_dstats __percpu
*dstats
; /* dummy stats */
1291 struct garp_port __rcu
*garp_port
;
1293 /* class/net/name entry */
1295 /* space for optional device, statistics, and wireless sysfs groups */
1296 const struct attribute_group
*sysfs_groups
[4];
1298 /* rtnetlink link ops */
1299 const struct rtnl_link_ops
*rtnl_link_ops
;
1301 /* for setting kernel sock attribute on TCP connection setup */
1302 #define GSO_MAX_SIZE 65536
1303 unsigned int gso_max_size
;
1306 /* Data Center Bridging netlink ops */
1307 const struct dcbnl_rtnl_ops
*dcbnl_ops
;
1310 struct netdev_tc_txq tc_to_txq
[TC_MAX_QUEUE
];
1311 u8 prio_tc_map
[TC_BITMASK
+ 1];
1313 #if IS_ENABLED(CONFIG_FCOE)
1314 /* max exchange id for FCoE LRO by ddp */
1315 unsigned int fcoe_ddp_xid
;
1317 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1318 struct netprio_map __rcu
*priomap
;
1320 /* phy device may attach itself for hardware timestamping */
1321 struct phy_device
*phydev
;
1323 /* group the device belongs to */
1326 #define to_net_dev(d) container_of(d, struct net_device, dev)
1328 #define NETDEV_ALIGN 32
1331 int netdev_get_prio_tc_map(const struct net_device
*dev
, u32 prio
)
1333 return dev
->prio_tc_map
[prio
& TC_BITMASK
];
1337 int netdev_set_prio_tc_map(struct net_device
*dev
, u8 prio
, u8 tc
)
1339 if (tc
>= dev
->num_tc
)
1342 dev
->prio_tc_map
[prio
& TC_BITMASK
] = tc
& TC_BITMASK
;
1347 void netdev_reset_tc(struct net_device
*dev
)
1350 memset(dev
->tc_to_txq
, 0, sizeof(dev
->tc_to_txq
));
1351 memset(dev
->prio_tc_map
, 0, sizeof(dev
->prio_tc_map
));
1355 int netdev_set_tc_queue(struct net_device
*dev
, u8 tc
, u16 count
, u16 offset
)
1357 if (tc
>= dev
->num_tc
)
1360 dev
->tc_to_txq
[tc
].count
= count
;
1361 dev
->tc_to_txq
[tc
].offset
= offset
;
1366 int netdev_set_num_tc(struct net_device
*dev
, u8 num_tc
)
1368 if (num_tc
> TC_MAX_QUEUE
)
1371 dev
->num_tc
= num_tc
;
1376 int netdev_get_num_tc(struct net_device
*dev
)
1382 struct netdev_queue
*netdev_get_tx_queue(const struct net_device
*dev
,
1385 return &dev
->_tx
[index
];
1388 static inline void netdev_for_each_tx_queue(struct net_device
*dev
,
1389 void (*f
)(struct net_device
*,
1390 struct netdev_queue
*,
1396 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
1397 f(dev
, &dev
->_tx
[i
], arg
);
1401 * Net namespace inlines
1404 struct net
*dev_net(const struct net_device
*dev
)
1406 return read_pnet(&dev
->nd_net
);
1410 void dev_net_set(struct net_device
*dev
, struct net
*net
)
1412 #ifdef CONFIG_NET_NS
1413 release_net(dev
->nd_net
);
1414 dev
->nd_net
= hold_net(net
);
1418 static inline bool netdev_uses_dsa_tags(struct net_device
*dev
)
1420 #ifdef CONFIG_NET_DSA_TAG_DSA
1421 if (dev
->dsa_ptr
!= NULL
)
1422 return dsa_uses_dsa_tags(dev
->dsa_ptr
);
1428 static inline bool netdev_uses_trailer_tags(struct net_device
*dev
)
1430 #ifdef CONFIG_NET_DSA_TAG_TRAILER
1431 if (dev
->dsa_ptr
!= NULL
)
1432 return dsa_uses_trailer_tags(dev
->dsa_ptr
);
1439 * netdev_priv - access network device private data
1440 * @dev: network device
1442 * Get network device private data
1444 static inline void *netdev_priv(const struct net_device
*dev
)
1446 return (char *)dev
+ ALIGN(sizeof(struct net_device
), NETDEV_ALIGN
);
1449 /* Set the sysfs physical device reference for the network logical device
1450 * if set prior to registration will cause a symlink during initialization.
1452 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1454 /* Set the sysfs device type for the network logical device to allow
1455 * fin grained indentification of different network device types. For
1456 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1458 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1461 * netif_napi_add - initialize a napi context
1462 * @dev: network device
1463 * @napi: napi context
1464 * @poll: polling function
1465 * @weight: default weight
1467 * netif_napi_add() must be used to initialize a napi context prior to calling
1468 * *any* of the other napi related functions.
1470 void netif_napi_add(struct net_device
*dev
, struct napi_struct
*napi
,
1471 int (*poll
)(struct napi_struct
*, int), int weight
);
1474 * netif_napi_del - remove a napi context
1475 * @napi: napi context
1477 * netif_napi_del() removes a napi context from the network device napi list
1479 void netif_napi_del(struct napi_struct
*napi
);
1481 struct napi_gro_cb
{
1482 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1485 /* Length of frag0. */
1486 unsigned int frag0_len
;
1488 /* This indicates where we are processing relative to skb->data. */
1491 /* This is non-zero if the packet may be of the same flow. */
1494 /* This is non-zero if the packet cannot be merged with the new skb. */
1497 /* Number of segments aggregated. */
1502 #define NAPI_GRO_FREE 1
1503 #define NAPI_GRO_FREE_STOLEN_HEAD 2
1506 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1508 struct packet_type
{
1509 __be16 type
; /* This is really htons(ether_type). */
1510 struct net_device
*dev
; /* NULL is wildcarded here */
1511 int (*func
) (struct sk_buff
*,
1512 struct net_device
*,
1513 struct packet_type
*,
1514 struct net_device
*);
1515 struct sk_buff
*(*gso_segment
)(struct sk_buff
*skb
,
1516 netdev_features_t features
);
1517 int (*gso_send_check
)(struct sk_buff
*skb
);
1518 struct sk_buff
**(*gro_receive
)(struct sk_buff
**head
,
1519 struct sk_buff
*skb
);
1520 int (*gro_complete
)(struct sk_buff
*skb
);
1521 void *af_packet_priv
;
1522 struct list_head list
;
1525 #include <linux/notifier.h>
1527 /* netdevice notifier chain. Please remember to update the rtnetlink
1528 * notification exclusion list in rtnetlink_event() when adding new
1531 #define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
1532 #define NETDEV_DOWN 0x0002
1533 #define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
1534 detected a hardware crash and restarted
1535 - we can use this eg to kick tcp sessions
1537 #define NETDEV_CHANGE 0x0004 /* Notify device state change */
1538 #define NETDEV_REGISTER 0x0005
1539 #define NETDEV_UNREGISTER 0x0006
1540 #define NETDEV_CHANGEMTU 0x0007
1541 #define NETDEV_CHANGEADDR 0x0008
1542 #define NETDEV_GOING_DOWN 0x0009
1543 #define NETDEV_CHANGENAME 0x000A
1544 #define NETDEV_FEAT_CHANGE 0x000B
1545 #define NETDEV_BONDING_FAILOVER 0x000C
1546 #define NETDEV_PRE_UP 0x000D
1547 #define NETDEV_PRE_TYPE_CHANGE 0x000E
1548 #define NETDEV_POST_TYPE_CHANGE 0x000F
1549 #define NETDEV_POST_INIT 0x0010
1550 #define NETDEV_UNREGISTER_BATCH 0x0011
1551 #define NETDEV_RELEASE 0x0012
1552 #define NETDEV_NOTIFY_PEERS 0x0013
1553 #define NETDEV_JOIN 0x0014
1555 extern int register_netdevice_notifier(struct notifier_block
*nb
);
1556 extern int unregister_netdevice_notifier(struct notifier_block
*nb
);
1557 extern int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
);
1560 extern rwlock_t dev_base_lock
; /* Device list lock */
1563 #define for_each_netdev(net, d) \
1564 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1565 #define for_each_netdev_reverse(net, d) \
1566 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
1567 #define for_each_netdev_rcu(net, d) \
1568 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
1569 #define for_each_netdev_safe(net, d, n) \
1570 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1571 #define for_each_netdev_continue(net, d) \
1572 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1573 #define for_each_netdev_continue_rcu(net, d) \
1574 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1575 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1577 static inline struct net_device
*next_net_device(struct net_device
*dev
)
1579 struct list_head
*lh
;
1583 lh
= dev
->dev_list
.next
;
1584 return lh
== &net
->dev_base_head
? NULL
: net_device_entry(lh
);
1587 static inline struct net_device
*next_net_device_rcu(struct net_device
*dev
)
1589 struct list_head
*lh
;
1593 lh
= rcu_dereference(list_next_rcu(&dev
->dev_list
));
1594 return lh
== &net
->dev_base_head
? NULL
: net_device_entry(lh
);
1597 static inline struct net_device
*first_net_device(struct net
*net
)
1599 return list_empty(&net
->dev_base_head
) ? NULL
:
1600 net_device_entry(net
->dev_base_head
.next
);
1603 static inline struct net_device
*first_net_device_rcu(struct net
*net
)
1605 struct list_head
*lh
= rcu_dereference(list_next_rcu(&net
->dev_base_head
));
1607 return lh
== &net
->dev_base_head
? NULL
: net_device_entry(lh
);
1610 extern int netdev_boot_setup_check(struct net_device
*dev
);
1611 extern unsigned long netdev_boot_base(const char *prefix
, int unit
);
1612 extern struct net_device
*dev_getbyhwaddr_rcu(struct net
*net
, unsigned short type
,
1613 const char *hwaddr
);
1614 extern struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
);
1615 extern struct net_device
*__dev_getfirstbyhwtype(struct net
*net
, unsigned short type
);
1616 extern void dev_add_pack(struct packet_type
*pt
);
1617 extern void dev_remove_pack(struct packet_type
*pt
);
1618 extern void __dev_remove_pack(struct packet_type
*pt
);
1620 extern struct net_device
*dev_get_by_flags_rcu(struct net
*net
, unsigned short flags
,
1621 unsigned short mask
);
1622 extern struct net_device
*dev_get_by_name(struct net
*net
, const char *name
);
1623 extern struct net_device
*dev_get_by_name_rcu(struct net
*net
, const char *name
);
1624 extern struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
);
1625 extern int dev_alloc_name(struct net_device
*dev
, const char *name
);
1626 extern int dev_open(struct net_device
*dev
);
1627 extern int dev_close(struct net_device
*dev
);
1628 extern void dev_disable_lro(struct net_device
*dev
);
1629 extern int dev_queue_xmit(struct sk_buff
*skb
);
1630 extern int register_netdevice(struct net_device
*dev
);
1631 extern void unregister_netdevice_queue(struct net_device
*dev
,
1632 struct list_head
*head
);
1633 extern void unregister_netdevice_many(struct list_head
*head
);
1634 static inline void unregister_netdevice(struct net_device
*dev
)
1636 unregister_netdevice_queue(dev
, NULL
);
1639 extern int netdev_refcnt_read(const struct net_device
*dev
);
1640 extern void free_netdev(struct net_device
*dev
);
1641 extern void synchronize_net(void);
1642 extern int init_dummy_netdev(struct net_device
*dev
);
1643 extern void netdev_resync_ops(struct net_device
*dev
);
1645 extern struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
);
1646 extern struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
);
1647 extern struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
);
1648 extern int dev_restart(struct net_device
*dev
);
1649 #ifdef CONFIG_NETPOLL_TRAP
1650 extern int netpoll_trap(void);
1652 extern int skb_gro_receive(struct sk_buff
**head
,
1653 struct sk_buff
*skb
);
1654 extern void skb_gro_reset_offset(struct sk_buff
*skb
);
1656 static inline unsigned int skb_gro_offset(const struct sk_buff
*skb
)
1658 return NAPI_GRO_CB(skb
)->data_offset
;
1661 static inline unsigned int skb_gro_len(const struct sk_buff
*skb
)
1663 return skb
->len
- NAPI_GRO_CB(skb
)->data_offset
;
1666 static inline void skb_gro_pull(struct sk_buff
*skb
, unsigned int len
)
1668 NAPI_GRO_CB(skb
)->data_offset
+= len
;
1671 static inline void *skb_gro_header_fast(struct sk_buff
*skb
,
1672 unsigned int offset
)
1674 return NAPI_GRO_CB(skb
)->frag0
+ offset
;
1677 static inline int skb_gro_header_hard(struct sk_buff
*skb
, unsigned int hlen
)
1679 return NAPI_GRO_CB(skb
)->frag0_len
< hlen
;
1682 static inline void *skb_gro_header_slow(struct sk_buff
*skb
, unsigned int hlen
,
1683 unsigned int offset
)
1685 if (!pskb_may_pull(skb
, hlen
))
1688 NAPI_GRO_CB(skb
)->frag0
= NULL
;
1689 NAPI_GRO_CB(skb
)->frag0_len
= 0;
1690 return skb
->data
+ offset
;
1693 static inline void *skb_gro_mac_header(struct sk_buff
*skb
)
1695 return NAPI_GRO_CB(skb
)->frag0
?: skb_mac_header(skb
);
1698 static inline void *skb_gro_network_header(struct sk_buff
*skb
)
1700 return (NAPI_GRO_CB(skb
)->frag0
?: skb
->data
) +
1701 skb_network_offset(skb
);
1704 static inline int dev_hard_header(struct sk_buff
*skb
, struct net_device
*dev
,
1705 unsigned short type
,
1706 const void *daddr
, const void *saddr
,
1709 if (!dev
->header_ops
|| !dev
->header_ops
->create
)
1712 return dev
->header_ops
->create(skb
, dev
, type
, daddr
, saddr
, len
);
1715 static inline int dev_parse_header(const struct sk_buff
*skb
,
1716 unsigned char *haddr
)
1718 const struct net_device
*dev
= skb
->dev
;
1720 if (!dev
->header_ops
|| !dev
->header_ops
->parse
)
1722 return dev
->header_ops
->parse(skb
, haddr
);
1725 typedef int gifconf_func_t(struct net_device
* dev
, char __user
* bufptr
, int len
);
1726 extern int register_gifconf(unsigned int family
, gifconf_func_t
* gifconf
);
1727 static inline int unregister_gifconf(unsigned int family
)
1729 return register_gifconf(family
, NULL
);
1733 * Incoming packets are placed on per-cpu queues
1735 struct softnet_data
{
1736 struct Qdisc
*output_queue
;
1737 struct Qdisc
**output_queue_tailp
;
1738 struct list_head poll_list
;
1739 struct sk_buff
*completion_queue
;
1740 struct sk_buff_head process_queue
;
1743 unsigned int processed
;
1744 unsigned int time_squeeze
;
1745 unsigned int cpu_collision
;
1746 unsigned int received_rps
;
1749 struct softnet_data
*rps_ipi_list
;
1751 /* Elements below can be accessed between CPUs for RPS */
1752 struct call_single_data csd ____cacheline_aligned_in_smp
;
1753 struct softnet_data
*rps_ipi_next
;
1755 unsigned int input_queue_head
;
1756 unsigned int input_queue_tail
;
1758 unsigned int dropped
;
1759 struct sk_buff_head input_pkt_queue
;
1760 struct napi_struct backlog
;
1763 static inline void input_queue_head_incr(struct softnet_data
*sd
)
1766 sd
->input_queue_head
++;
1770 static inline void input_queue_tail_incr_save(struct softnet_data
*sd
,
1771 unsigned int *qtail
)
1774 *qtail
= ++sd
->input_queue_tail
;
1778 DECLARE_PER_CPU_ALIGNED(struct softnet_data
, softnet_data
);
1780 extern void __netif_schedule(struct Qdisc
*q
);
1782 static inline void netif_schedule_queue(struct netdev_queue
*txq
)
1784 if (!(txq
->state
& QUEUE_STATE_ANY_XOFF
))
1785 __netif_schedule(txq
->qdisc
);
1788 static inline void netif_tx_schedule_all(struct net_device
*dev
)
1792 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
1793 netif_schedule_queue(netdev_get_tx_queue(dev
, i
));
1796 static inline void netif_tx_start_queue(struct netdev_queue
*dev_queue
)
1798 clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
);
1802 * netif_start_queue - allow transmit
1803 * @dev: network device
1805 * Allow upper layers to call the device hard_start_xmit routine.
1807 static inline void netif_start_queue(struct net_device
*dev
)
1809 netif_tx_start_queue(netdev_get_tx_queue(dev
, 0));
1812 static inline void netif_tx_start_all_queues(struct net_device
*dev
)
1816 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1817 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
1818 netif_tx_start_queue(txq
);
1822 static inline void netif_tx_wake_queue(struct netdev_queue
*dev_queue
)
1824 #ifdef CONFIG_NETPOLL_TRAP
1825 if (netpoll_trap()) {
1826 netif_tx_start_queue(dev_queue
);
1830 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
))
1831 __netif_schedule(dev_queue
->qdisc
);
1835 * netif_wake_queue - restart transmit
1836 * @dev: network device
1838 * Allow upper layers to call the device hard_start_xmit routine.
1839 * Used for flow control when transmit resources are available.
1841 static inline void netif_wake_queue(struct net_device
*dev
)
1843 netif_tx_wake_queue(netdev_get_tx_queue(dev
, 0));
1846 static inline void netif_tx_wake_all_queues(struct net_device
*dev
)
1850 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1851 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
1852 netif_tx_wake_queue(txq
);
1856 static inline void netif_tx_stop_queue(struct netdev_queue
*dev_queue
)
1858 if (WARN_ON(!dev_queue
)) {
1859 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
1862 set_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
);
1866 * netif_stop_queue - stop transmitted packets
1867 * @dev: network device
1869 * Stop upper layers calling the device hard_start_xmit routine.
1870 * Used for flow control when transmit resources are unavailable.
1872 static inline void netif_stop_queue(struct net_device
*dev
)
1874 netif_tx_stop_queue(netdev_get_tx_queue(dev
, 0));
1877 static inline void netif_tx_stop_all_queues(struct net_device
*dev
)
1881 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1882 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
1883 netif_tx_stop_queue(txq
);
1887 static inline bool netif_tx_queue_stopped(const struct netdev_queue
*dev_queue
)
1889 return test_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
);
1893 * netif_queue_stopped - test if transmit queue is flowblocked
1894 * @dev: network device
1896 * Test if transmit queue on device is currently unable to send.
1898 static inline bool netif_queue_stopped(const struct net_device
*dev
)
1900 return netif_tx_queue_stopped(netdev_get_tx_queue(dev
, 0));
1903 static inline bool netif_xmit_stopped(const struct netdev_queue
*dev_queue
)
1905 return dev_queue
->state
& QUEUE_STATE_ANY_XOFF
;
1908 static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue
*dev_queue
)
1910 return dev_queue
->state
& QUEUE_STATE_ANY_XOFF_OR_FROZEN
;
1913 static inline void netdev_tx_sent_queue(struct netdev_queue
*dev_queue
,
1917 dql_queued(&dev_queue
->dql
, bytes
);
1919 if (likely(dql_avail(&dev_queue
->dql
) >= 0))
1922 set_bit(__QUEUE_STATE_STACK_XOFF
, &dev_queue
->state
);
1925 * The XOFF flag must be set before checking the dql_avail below,
1926 * because in netdev_tx_completed_queue we update the dql_completed
1927 * before checking the XOFF flag.
1931 /* check again in case another CPU has just made room avail */
1932 if (unlikely(dql_avail(&dev_queue
->dql
) >= 0))
1933 clear_bit(__QUEUE_STATE_STACK_XOFF
, &dev_queue
->state
);
1937 static inline void netdev_sent_queue(struct net_device
*dev
, unsigned int bytes
)
1939 netdev_tx_sent_queue(netdev_get_tx_queue(dev
, 0), bytes
);
1942 static inline void netdev_tx_completed_queue(struct netdev_queue
*dev_queue
,
1943 unsigned int pkts
, unsigned int bytes
)
1946 if (unlikely(!bytes
))
1949 dql_completed(&dev_queue
->dql
, bytes
);
1952 * Without the memory barrier there is a small possiblity that
1953 * netdev_tx_sent_queue will miss the update and cause the queue to
1954 * be stopped forever
1958 if (dql_avail(&dev_queue
->dql
) < 0)
1961 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF
, &dev_queue
->state
))
1962 netif_schedule_queue(dev_queue
);
1966 static inline void netdev_completed_queue(struct net_device
*dev
,
1967 unsigned int pkts
, unsigned int bytes
)
1969 netdev_tx_completed_queue(netdev_get_tx_queue(dev
, 0), pkts
, bytes
);
1972 static inline void netdev_tx_reset_queue(struct netdev_queue
*q
)
1975 clear_bit(__QUEUE_STATE_STACK_XOFF
, &q
->state
);
1980 static inline void netdev_reset_queue(struct net_device
*dev_queue
)
1982 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue
, 0));
1986 * netif_running - test if up
1987 * @dev: network device
1989 * Test if the device has been brought up.
1991 static inline bool netif_running(const struct net_device
*dev
)
1993 return test_bit(__LINK_STATE_START
, &dev
->state
);
1997 * Routines to manage the subqueues on a device. We only need start
1998 * stop, and a check if it's stopped. All other device management is
1999 * done at the overall netdevice level.
2000 * Also test the device if we're multiqueue.
2004 * netif_start_subqueue - allow sending packets on subqueue
2005 * @dev: network device
2006 * @queue_index: sub queue index
2008 * Start individual transmit queue of a device with multiple transmit queues.
2010 static inline void netif_start_subqueue(struct net_device
*dev
, u16 queue_index
)
2012 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
2014 netif_tx_start_queue(txq
);
2018 * netif_stop_subqueue - stop sending packets on subqueue
2019 * @dev: network device
2020 * @queue_index: sub queue index
2022 * Stop individual transmit queue of a device with multiple transmit queues.
2024 static inline void netif_stop_subqueue(struct net_device
*dev
, u16 queue_index
)
2026 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
2027 #ifdef CONFIG_NETPOLL_TRAP
2031 netif_tx_stop_queue(txq
);
2035 * netif_subqueue_stopped - test status of subqueue
2036 * @dev: network device
2037 * @queue_index: sub queue index
2039 * Check individual transmit queue of a device with multiple transmit queues.
2041 static inline bool __netif_subqueue_stopped(const struct net_device
*dev
,
2044 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
2046 return netif_tx_queue_stopped(txq
);
2049 static inline bool netif_subqueue_stopped(const struct net_device
*dev
,
2050 struct sk_buff
*skb
)
2052 return __netif_subqueue_stopped(dev
, skb_get_queue_mapping(skb
));
2056 * netif_wake_subqueue - allow sending packets on subqueue
2057 * @dev: network device
2058 * @queue_index: sub queue index
2060 * Resume individual transmit queue of a device with multiple transmit queues.
2062 static inline void netif_wake_subqueue(struct net_device
*dev
, u16 queue_index
)
2064 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
2065 #ifdef CONFIG_NETPOLL_TRAP
2069 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &txq
->state
))
2070 __netif_schedule(txq
->qdisc
);
2074 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
2075 * as a distribution range limit for the returned value.
2077 static inline u16
skb_tx_hash(const struct net_device
*dev
,
2078 const struct sk_buff
*skb
)
2080 return __skb_tx_hash(dev
, skb
, dev
->real_num_tx_queues
);
2084 * netif_is_multiqueue - test if device has multiple transmit queues
2085 * @dev: network device
2087 * Check if device has multiple transmit queues
2089 static inline bool netif_is_multiqueue(const struct net_device
*dev
)
2091 return dev
->num_tx_queues
> 1;
2094 extern int netif_set_real_num_tx_queues(struct net_device
*dev
,
2098 extern int netif_set_real_num_rx_queues(struct net_device
*dev
,
2101 static inline int netif_set_real_num_rx_queues(struct net_device
*dev
,
2108 static inline int netif_copy_real_num_queues(struct net_device
*to_dev
,
2109 const struct net_device
*from_dev
)
2111 netif_set_real_num_tx_queues(to_dev
, from_dev
->real_num_tx_queues
);
2113 return netif_set_real_num_rx_queues(to_dev
,
2114 from_dev
->real_num_rx_queues
);
2120 /* Use this variant when it is known for sure that it
2121 * is executing from hardware interrupt context or with hardware interrupts
2124 extern void dev_kfree_skb_irq(struct sk_buff
*skb
);
2126 /* Use this variant in places where it could be invoked
2127 * from either hardware interrupt or other context, with hardware interrupts
2128 * either disabled or enabled.
2130 extern void dev_kfree_skb_any(struct sk_buff
*skb
);
2132 extern int netif_rx(struct sk_buff
*skb
);
2133 extern int netif_rx_ni(struct sk_buff
*skb
);
2134 extern int netif_receive_skb(struct sk_buff
*skb
);
2135 extern gro_result_t
dev_gro_receive(struct napi_struct
*napi
,
2136 struct sk_buff
*skb
);
2137 extern gro_result_t
napi_skb_finish(gro_result_t ret
, struct sk_buff
*skb
);
2138 extern gro_result_t
napi_gro_receive(struct napi_struct
*napi
,
2139 struct sk_buff
*skb
);
2140 extern void napi_gro_flush(struct napi_struct
*napi
);
2141 extern struct sk_buff
* napi_get_frags(struct napi_struct
*napi
);
2142 extern gro_result_t
napi_frags_finish(struct napi_struct
*napi
,
2143 struct sk_buff
*skb
,
2145 extern gro_result_t
napi_gro_frags(struct napi_struct
*napi
);
2147 static inline void napi_free_frags(struct napi_struct
*napi
)
2149 kfree_skb(napi
->skb
);
2153 extern int netdev_rx_handler_register(struct net_device
*dev
,
2154 rx_handler_func_t
*rx_handler
,
2155 void *rx_handler_data
);
2156 extern void netdev_rx_handler_unregister(struct net_device
*dev
);
2158 extern bool dev_valid_name(const char *name
);
2159 extern int dev_ioctl(struct net
*net
, unsigned int cmd
, void __user
*);
2160 extern int dev_ethtool(struct net
*net
, struct ifreq
*);
2161 extern unsigned int dev_get_flags(const struct net_device
*);
2162 extern int __dev_change_flags(struct net_device
*, unsigned int flags
);
2163 extern int dev_change_flags(struct net_device
*, unsigned int);
2164 extern void __dev_notify_flags(struct net_device
*, unsigned int old_flags
);
2165 extern int dev_change_name(struct net_device
*, const char *);
2166 extern int dev_set_alias(struct net_device
*, const char *, size_t);
2167 extern int dev_change_net_namespace(struct net_device
*,
2168 struct net
*, const char *);
2169 extern int dev_set_mtu(struct net_device
*, int);
2170 extern void dev_set_group(struct net_device
*, int);
2171 extern int dev_set_mac_address(struct net_device
*,
2173 extern int dev_hard_start_xmit(struct sk_buff
*skb
,
2174 struct net_device
*dev
,
2175 struct netdev_queue
*txq
);
2176 extern int dev_forward_skb(struct net_device
*dev
,
2177 struct sk_buff
*skb
);
2179 extern int netdev_budget
;
2181 /* Called by rtnetlink.c:rtnl_unlock() */
2182 extern void netdev_run_todo(void);
2185 * dev_put - release reference to device
2186 * @dev: network device
2188 * Release reference to device to allow it to be freed.
2190 static inline void dev_put(struct net_device
*dev
)
2192 this_cpu_dec(*dev
->pcpu_refcnt
);
2196 * dev_hold - get reference to device
2197 * @dev: network device
2199 * Hold reference to device to keep it from being freed.
2201 static inline void dev_hold(struct net_device
*dev
)
2203 this_cpu_inc(*dev
->pcpu_refcnt
);
2206 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
2207 * and _off may be called from IRQ context, but it is caller
2208 * who is responsible for serialization of these calls.
2210 * The name carrier is inappropriate, these functions should really be
2211 * called netif_lowerlayer_*() because they represent the state of any
2212 * kind of lower layer not just hardware media.
2215 extern void linkwatch_fire_event(struct net_device
*dev
);
2216 extern void linkwatch_forget_dev(struct net_device
*dev
);
2219 * netif_carrier_ok - test if carrier present
2220 * @dev: network device
2222 * Check if carrier is present on device
2224 static inline bool netif_carrier_ok(const struct net_device
*dev
)
2226 return !test_bit(__LINK_STATE_NOCARRIER
, &dev
->state
);
2229 extern unsigned long dev_trans_start(struct net_device
*dev
);
2231 extern void __netdev_watchdog_up(struct net_device
*dev
);
2233 extern void netif_carrier_on(struct net_device
*dev
);
2235 extern void netif_carrier_off(struct net_device
*dev
);
2237 extern void netif_notify_peers(struct net_device
*dev
);
2240 * netif_dormant_on - mark device as dormant.
2241 * @dev: network device
2243 * Mark device as dormant (as per RFC2863).
2245 * The dormant state indicates that the relevant interface is not
2246 * actually in a condition to pass packets (i.e., it is not 'up') but is
2247 * in a "pending" state, waiting for some external event. For "on-
2248 * demand" interfaces, this new state identifies the situation where the
2249 * interface is waiting for events to place it in the up state.
2252 static inline void netif_dormant_on(struct net_device
*dev
)
2254 if (!test_and_set_bit(__LINK_STATE_DORMANT
, &dev
->state
))
2255 linkwatch_fire_event(dev
);
2259 * netif_dormant_off - set device as not dormant.
2260 * @dev: network device
2262 * Device is not in dormant state.
2264 static inline void netif_dormant_off(struct net_device
*dev
)
2266 if (test_and_clear_bit(__LINK_STATE_DORMANT
, &dev
->state
))
2267 linkwatch_fire_event(dev
);
2271 * netif_dormant - test if carrier present
2272 * @dev: network device
2274 * Check if carrier is present on device
2276 static inline bool netif_dormant(const struct net_device
*dev
)
2278 return test_bit(__LINK_STATE_DORMANT
, &dev
->state
);
2283 * netif_oper_up - test if device is operational
2284 * @dev: network device
2286 * Check if carrier is operational
2288 static inline bool netif_oper_up(const struct net_device
*dev
)
2290 return (dev
->operstate
== IF_OPER_UP
||
2291 dev
->operstate
== IF_OPER_UNKNOWN
/* backward compat */);
2295 * netif_device_present - is device available or removed
2296 * @dev: network device
2298 * Check if device has not been removed from system.
2300 static inline bool netif_device_present(struct net_device
*dev
)
2302 return test_bit(__LINK_STATE_PRESENT
, &dev
->state
);
2305 extern void netif_device_detach(struct net_device
*dev
);
2307 extern void netif_device_attach(struct net_device
*dev
);
2310 * Network interface message level settings
2314 NETIF_MSG_DRV
= 0x0001,
2315 NETIF_MSG_PROBE
= 0x0002,
2316 NETIF_MSG_LINK
= 0x0004,
2317 NETIF_MSG_TIMER
= 0x0008,
2318 NETIF_MSG_IFDOWN
= 0x0010,
2319 NETIF_MSG_IFUP
= 0x0020,
2320 NETIF_MSG_RX_ERR
= 0x0040,
2321 NETIF_MSG_TX_ERR
= 0x0080,
2322 NETIF_MSG_TX_QUEUED
= 0x0100,
2323 NETIF_MSG_INTR
= 0x0200,
2324 NETIF_MSG_TX_DONE
= 0x0400,
2325 NETIF_MSG_RX_STATUS
= 0x0800,
2326 NETIF_MSG_PKTDATA
= 0x1000,
2327 NETIF_MSG_HW
= 0x2000,
2328 NETIF_MSG_WOL
= 0x4000,
2331 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2332 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2333 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2334 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2335 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2336 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2337 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2338 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2339 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2340 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2341 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2342 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2343 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2344 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2345 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2347 static inline u32
netif_msg_init(int debug_value
, int default_msg_enable_bits
)
2350 if (debug_value
< 0 || debug_value
>= (sizeof(u32
) * 8))
2351 return default_msg_enable_bits
;
2352 if (debug_value
== 0) /* no output */
2354 /* set low N bits */
2355 return (1 << debug_value
) - 1;
2358 static inline void __netif_tx_lock(struct netdev_queue
*txq
, int cpu
)
2360 spin_lock(&txq
->_xmit_lock
);
2361 txq
->xmit_lock_owner
= cpu
;
2364 static inline void __netif_tx_lock_bh(struct netdev_queue
*txq
)
2366 spin_lock_bh(&txq
->_xmit_lock
);
2367 txq
->xmit_lock_owner
= smp_processor_id();
2370 static inline bool __netif_tx_trylock(struct netdev_queue
*txq
)
2372 bool ok
= spin_trylock(&txq
->_xmit_lock
);
2374 txq
->xmit_lock_owner
= smp_processor_id();
2378 static inline void __netif_tx_unlock(struct netdev_queue
*txq
)
2380 txq
->xmit_lock_owner
= -1;
2381 spin_unlock(&txq
->_xmit_lock
);
2384 static inline void __netif_tx_unlock_bh(struct netdev_queue
*txq
)
2386 txq
->xmit_lock_owner
= -1;
2387 spin_unlock_bh(&txq
->_xmit_lock
);
2390 static inline void txq_trans_update(struct netdev_queue
*txq
)
2392 if (txq
->xmit_lock_owner
!= -1)
2393 txq
->trans_start
= jiffies
;
2397 * netif_tx_lock - grab network device transmit lock
2398 * @dev: network device
2400 * Get network device transmit lock
2402 static inline void netif_tx_lock(struct net_device
*dev
)
2407 spin_lock(&dev
->tx_global_lock
);
2408 cpu
= smp_processor_id();
2409 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2410 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
2412 /* We are the only thread of execution doing a
2413 * freeze, but we have to grab the _xmit_lock in
2414 * order to synchronize with threads which are in
2415 * the ->hard_start_xmit() handler and already
2416 * checked the frozen bit.
2418 __netif_tx_lock(txq
, cpu
);
2419 set_bit(__QUEUE_STATE_FROZEN
, &txq
->state
);
2420 __netif_tx_unlock(txq
);
2424 static inline void netif_tx_lock_bh(struct net_device
*dev
)
2430 static inline void netif_tx_unlock(struct net_device
*dev
)
2434 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2435 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
2437 /* No need to grab the _xmit_lock here. If the
2438 * queue is not stopped for another reason, we
2441 clear_bit(__QUEUE_STATE_FROZEN
, &txq
->state
);
2442 netif_schedule_queue(txq
);
2444 spin_unlock(&dev
->tx_global_lock
);
2447 static inline void netif_tx_unlock_bh(struct net_device
*dev
)
2449 netif_tx_unlock(dev
);
2453 #define HARD_TX_LOCK(dev, txq, cpu) { \
2454 if ((dev->features & NETIF_F_LLTX) == 0) { \
2455 __netif_tx_lock(txq, cpu); \
2459 #define HARD_TX_UNLOCK(dev, txq) { \
2460 if ((dev->features & NETIF_F_LLTX) == 0) { \
2461 __netif_tx_unlock(txq); \
2465 static inline void netif_tx_disable(struct net_device
*dev
)
2471 cpu
= smp_processor_id();
2472 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2473 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
2475 __netif_tx_lock(txq
, cpu
);
2476 netif_tx_stop_queue(txq
);
2477 __netif_tx_unlock(txq
);
2482 static inline void netif_addr_lock(struct net_device
*dev
)
2484 spin_lock(&dev
->addr_list_lock
);
2487 static inline void netif_addr_lock_nested(struct net_device
*dev
)
2489 spin_lock_nested(&dev
->addr_list_lock
, SINGLE_DEPTH_NESTING
);
2492 static inline void netif_addr_lock_bh(struct net_device
*dev
)
2494 spin_lock_bh(&dev
->addr_list_lock
);
2497 static inline void netif_addr_unlock(struct net_device
*dev
)
2499 spin_unlock(&dev
->addr_list_lock
);
2502 static inline void netif_addr_unlock_bh(struct net_device
*dev
)
2504 spin_unlock_bh(&dev
->addr_list_lock
);
2508 * dev_addrs walker. Should be used only for read access. Call with
2509 * rcu_read_lock held.
2511 #define for_each_dev_addr(dev, ha) \
2512 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
2514 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
2516 extern void ether_setup(struct net_device
*dev
);
2518 /* Support for loadable net-drivers */
2519 extern struct net_device
*alloc_netdev_mqs(int sizeof_priv
, const char *name
,
2520 void (*setup
)(struct net_device
*),
2521 unsigned int txqs
, unsigned int rxqs
);
2522 #define alloc_netdev(sizeof_priv, name, setup) \
2523 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2525 #define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2526 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2528 extern int register_netdev(struct net_device
*dev
);
2529 extern void unregister_netdev(struct net_device
*dev
);
2531 /* General hardware address lists handling functions */
2532 extern int __hw_addr_add_multiple(struct netdev_hw_addr_list
*to_list
,
2533 struct netdev_hw_addr_list
*from_list
,
2534 int addr_len
, unsigned char addr_type
);
2535 extern void __hw_addr_del_multiple(struct netdev_hw_addr_list
*to_list
,
2536 struct netdev_hw_addr_list
*from_list
,
2537 int addr_len
, unsigned char addr_type
);
2538 extern int __hw_addr_sync(struct netdev_hw_addr_list
*to_list
,
2539 struct netdev_hw_addr_list
*from_list
,
2541 extern void __hw_addr_unsync(struct netdev_hw_addr_list
*to_list
,
2542 struct netdev_hw_addr_list
*from_list
,
2544 extern void __hw_addr_flush(struct netdev_hw_addr_list
*list
);
2545 extern void __hw_addr_init(struct netdev_hw_addr_list
*list
);
2547 /* Functions used for device addresses handling */
2548 extern int dev_addr_add(struct net_device
*dev
, unsigned char *addr
,
2549 unsigned char addr_type
);
2550 extern int dev_addr_del(struct net_device
*dev
, unsigned char *addr
,
2551 unsigned char addr_type
);
2552 extern int dev_addr_add_multiple(struct net_device
*to_dev
,
2553 struct net_device
*from_dev
,
2554 unsigned char addr_type
);
2555 extern int dev_addr_del_multiple(struct net_device
*to_dev
,
2556 struct net_device
*from_dev
,
2557 unsigned char addr_type
);
2558 extern void dev_addr_flush(struct net_device
*dev
);
2559 extern int dev_addr_init(struct net_device
*dev
);
2561 /* Functions used for unicast addresses handling */
2562 extern int dev_uc_add(struct net_device
*dev
, unsigned char *addr
);
2563 extern int dev_uc_add_excl(struct net_device
*dev
, unsigned char *addr
);
2564 extern int dev_uc_del(struct net_device
*dev
, unsigned char *addr
);
2565 extern int dev_uc_sync(struct net_device
*to
, struct net_device
*from
);
2566 extern void dev_uc_unsync(struct net_device
*to
, struct net_device
*from
);
2567 extern void dev_uc_flush(struct net_device
*dev
);
2568 extern void dev_uc_init(struct net_device
*dev
);
2570 /* Functions used for multicast addresses handling */
2571 extern int dev_mc_add(struct net_device
*dev
, unsigned char *addr
);
2572 extern int dev_mc_add_global(struct net_device
*dev
, unsigned char *addr
);
2573 extern int dev_mc_add_excl(struct net_device
*dev
, unsigned char *addr
);
2574 extern int dev_mc_del(struct net_device
*dev
, unsigned char *addr
);
2575 extern int dev_mc_del_global(struct net_device
*dev
, unsigned char *addr
);
2576 extern int dev_mc_sync(struct net_device
*to
, struct net_device
*from
);
2577 extern void dev_mc_unsync(struct net_device
*to
, struct net_device
*from
);
2578 extern void dev_mc_flush(struct net_device
*dev
);
2579 extern void dev_mc_init(struct net_device
*dev
);
2581 /* Functions used for secondary unicast and multicast support */
2582 extern void dev_set_rx_mode(struct net_device
*dev
);
2583 extern void __dev_set_rx_mode(struct net_device
*dev
);
2584 extern int dev_set_promiscuity(struct net_device
*dev
, int inc
);
2585 extern int dev_set_allmulti(struct net_device
*dev
, int inc
);
2586 extern void netdev_state_change(struct net_device
*dev
);
2587 extern int netdev_bonding_change(struct net_device
*dev
,
2588 unsigned long event
);
2589 extern void netdev_features_change(struct net_device
*dev
);
2590 /* Load a device via the kmod */
2591 extern void dev_load(struct net
*net
, const char *name
);
2592 extern void dev_mcast_init(void);
2593 extern struct rtnl_link_stats64
*dev_get_stats(struct net_device
*dev
,
2594 struct rtnl_link_stats64
*storage
);
2595 extern void netdev_stats_to_stats64(struct rtnl_link_stats64
*stats64
,
2596 const struct net_device_stats
*netdev_stats
);
2598 extern int netdev_max_backlog
;
2599 extern int netdev_tstamp_prequeue
;
2600 extern int weight_p
;
2601 extern int bpf_jit_enable
;
2602 extern int netdev_set_master(struct net_device
*dev
, struct net_device
*master
);
2603 extern int netdev_set_bond_master(struct net_device
*dev
,
2604 struct net_device
*master
);
2605 extern int skb_checksum_help(struct sk_buff
*skb
);
2606 extern struct sk_buff
*skb_gso_segment(struct sk_buff
*skb
,
2607 netdev_features_t features
);
2609 extern void netdev_rx_csum_fault(struct net_device
*dev
);
2611 static inline void netdev_rx_csum_fault(struct net_device
*dev
)
2615 /* rx skb timestamps */
2616 extern void net_enable_timestamp(void);
2617 extern void net_disable_timestamp(void);
2619 #ifdef CONFIG_PROC_FS
2620 extern void *dev_seq_start(struct seq_file
*seq
, loff_t
*pos
);
2621 extern void *dev_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
);
2622 extern void dev_seq_stop(struct seq_file
*seq
, void *v
);
2625 extern int netdev_class_create_file(struct class_attribute
*class_attr
);
2626 extern void netdev_class_remove_file(struct class_attribute
*class_attr
);
2628 extern struct kobj_ns_type_operations net_ns_type_operations
;
2630 extern const char *netdev_drivername(const struct net_device
*dev
);
2632 extern void linkwatch_run_queue(void);
2634 static inline netdev_features_t
netdev_get_wanted_features(
2635 struct net_device
*dev
)
2637 return (dev
->features
& ~dev
->hw_features
) | dev
->wanted_features
;
2639 netdev_features_t
netdev_increment_features(netdev_features_t all
,
2640 netdev_features_t one
, netdev_features_t mask
);
2641 int __netdev_update_features(struct net_device
*dev
);
2642 void netdev_update_features(struct net_device
*dev
);
2643 void netdev_change_features(struct net_device
*dev
);
2645 void netif_stacked_transfer_operstate(const struct net_device
*rootdev
,
2646 struct net_device
*dev
);
2648 netdev_features_t
netif_skb_features(struct sk_buff
*skb
);
2650 static inline bool net_gso_ok(netdev_features_t features
, int gso_type
)
2652 netdev_features_t feature
= gso_type
<< NETIF_F_GSO_SHIFT
;
2654 /* check flags correspondence */
2655 BUILD_BUG_ON(SKB_GSO_TCPV4
!= (NETIF_F_TSO
>> NETIF_F_GSO_SHIFT
));
2656 BUILD_BUG_ON(SKB_GSO_UDP
!= (NETIF_F_UFO
>> NETIF_F_GSO_SHIFT
));
2657 BUILD_BUG_ON(SKB_GSO_DODGY
!= (NETIF_F_GSO_ROBUST
>> NETIF_F_GSO_SHIFT
));
2658 BUILD_BUG_ON(SKB_GSO_TCP_ECN
!= (NETIF_F_TSO_ECN
>> NETIF_F_GSO_SHIFT
));
2659 BUILD_BUG_ON(SKB_GSO_TCPV6
!= (NETIF_F_TSO6
>> NETIF_F_GSO_SHIFT
));
2660 BUILD_BUG_ON(SKB_GSO_FCOE
!= (NETIF_F_FSO
>> NETIF_F_GSO_SHIFT
));
2662 return (features
& feature
) == feature
;
2665 static inline bool skb_gso_ok(struct sk_buff
*skb
, netdev_features_t features
)
2667 return net_gso_ok(features
, skb_shinfo(skb
)->gso_type
) &&
2668 (!skb_has_frag_list(skb
) || (features
& NETIF_F_FRAGLIST
));
2671 static inline bool netif_needs_gso(struct sk_buff
*skb
,
2672 netdev_features_t features
)
2674 return skb_is_gso(skb
) && (!skb_gso_ok(skb
, features
) ||
2675 unlikely((skb
->ip_summed
!= CHECKSUM_PARTIAL
) &&
2676 (skb
->ip_summed
!= CHECKSUM_UNNECESSARY
)));
2679 static inline void netif_set_gso_max_size(struct net_device
*dev
,
2682 dev
->gso_max_size
= size
;
2685 static inline bool netif_is_bond_slave(struct net_device
*dev
)
2687 return dev
->flags
& IFF_SLAVE
&& dev
->priv_flags
& IFF_BONDING
;
2690 static inline bool netif_supports_nofcs(struct net_device
*dev
)
2692 return dev
->priv_flags
& IFF_SUPP_NOFCS
;
2695 extern struct pernet_operations __net_initdata loopback_net_ops
;
2697 /* Logging, debugging and troubleshooting/diagnostic helpers. */
2699 /* netdev_printk helpers, similar to dev_printk */
2701 static inline const char *netdev_name(const struct net_device
*dev
)
2703 if (dev
->reg_state
!= NETREG_REGISTERED
)
2704 return "(unregistered net_device)";
2708 extern int __netdev_printk(const char *level
, const struct net_device
*dev
,
2709 struct va_format
*vaf
);
2711 extern __printf(3, 4)
2712 int netdev_printk(const char *level
, const struct net_device
*dev
,
2713 const char *format
, ...);
2714 extern __printf(2, 3)
2715 int netdev_emerg(const struct net_device
*dev
, const char *format
, ...);
2716 extern __printf(2, 3)
2717 int netdev_alert(const struct net_device
*dev
, const char *format
, ...);
2718 extern __printf(2, 3)
2719 int netdev_crit(const struct net_device
*dev
, const char *format
, ...);
2720 extern __printf(2, 3)
2721 int netdev_err(const struct net_device
*dev
, const char *format
, ...);
2722 extern __printf(2, 3)
2723 int netdev_warn(const struct net_device
*dev
, const char *format
, ...);
2724 extern __printf(2, 3)
2725 int netdev_notice(const struct net_device
*dev
, const char *format
, ...);
2726 extern __printf(2, 3)
2727 int netdev_info(const struct net_device
*dev
, const char *format
, ...);
2729 #define MODULE_ALIAS_NETDEV(device) \
2730 MODULE_ALIAS("netdev-" device)
2732 #if defined(CONFIG_DYNAMIC_DEBUG)
2733 #define netdev_dbg(__dev, format, args...) \
2735 dynamic_netdev_dbg(__dev, format, ##args); \
2737 #elif defined(DEBUG)
2738 #define netdev_dbg(__dev, format, args...) \
2739 netdev_printk(KERN_DEBUG, __dev, format, ##args)
2741 #define netdev_dbg(__dev, format, args...) \
2744 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
2749 #if defined(VERBOSE_DEBUG)
2750 #define netdev_vdbg netdev_dbg
2753 #define netdev_vdbg(dev, format, args...) \
2756 netdev_printk(KERN_DEBUG, dev, format, ##args); \
2762 * netdev_WARN() acts like dev_printk(), but with the key difference
2763 * of using a WARN/WARN_ON to get the message out, including the
2764 * file/line information and a backtrace.
2766 #define netdev_WARN(dev, format, args...) \
2767 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
2769 /* netif printk helpers, similar to netdev_printk */
2771 #define netif_printk(priv, type, level, dev, fmt, args...) \
2773 if (netif_msg_##type(priv)) \
2774 netdev_printk(level, (dev), fmt, ##args); \
2777 #define netif_level(level, priv, type, dev, fmt, args...) \
2779 if (netif_msg_##type(priv)) \
2780 netdev_##level(dev, fmt, ##args); \
2783 #define netif_emerg(priv, type, dev, fmt, args...) \
2784 netif_level(emerg, priv, type, dev, fmt, ##args)
2785 #define netif_alert(priv, type, dev, fmt, args...) \
2786 netif_level(alert, priv, type, dev, fmt, ##args)
2787 #define netif_crit(priv, type, dev, fmt, args...) \
2788 netif_level(crit, priv, type, dev, fmt, ##args)
2789 #define netif_err(priv, type, dev, fmt, args...) \
2790 netif_level(err, priv, type, dev, fmt, ##args)
2791 #define netif_warn(priv, type, dev, fmt, args...) \
2792 netif_level(warn, priv, type, dev, fmt, ##args)
2793 #define netif_notice(priv, type, dev, fmt, args...) \
2794 netif_level(notice, priv, type, dev, fmt, ##args)
2795 #define netif_info(priv, type, dev, fmt, args...) \
2796 netif_level(info, priv, type, dev, fmt, ##args)
2799 #define netif_dbg(priv, type, dev, format, args...) \
2800 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
2801 #elif defined(CONFIG_DYNAMIC_DEBUG)
2802 #define netif_dbg(priv, type, netdev, format, args...) \
2804 if (netif_msg_##type(priv)) \
2805 dynamic_netdev_dbg(netdev, format, ##args); \
2808 #define netif_dbg(priv, type, dev, format, args...) \
2811 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2816 #if defined(VERBOSE_DEBUG)
2817 #define netif_vdbg netif_dbg
2819 #define netif_vdbg(priv, type, dev, format, args...) \
2822 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2827 #endif /* __KERNEL__ */
2829 #endif /* _LINUX_NETDEVICE_H */