2 * Common framework for low-level network console, dump, and debugger code
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 * based on the netconsole code from:
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/string.h>
19 #include <linux/if_arp.h>
20 #include <linux/inetdevice.h>
21 #include <linux/inet.h>
22 #include <linux/interrupt.h>
23 #include <linux/netpoll.h>
24 #include <linux/sched.h>
25 #include <linux/delay.h>
26 #include <linux/rcupdate.h>
27 #include <linux/workqueue.h>
28 #include <linux/slab.h>
29 #include <linux/export.h>
30 #include <linux/if_vlan.h>
33 #include <net/addrconf.h>
34 #include <net/ndisc.h>
35 #include <net/ip6_checksum.h>
36 #include <asm/unaligned.h>
37 #include <trace/events/napi.h>
40 * We maintain a small pool of fully-sized skbs, to make sure the
41 * message gets out even in extreme OOM situations.
44 #define MAX_UDP_CHUNK 1460
47 static struct sk_buff_head skb_pool
;
49 DEFINE_STATIC_SRCU(netpoll_srcu
);
51 #define USEC_PER_POLL 50
53 #define MAX_SKB_SIZE \
54 (sizeof(struct ethhdr) + \
55 sizeof(struct iphdr) + \
56 sizeof(struct udphdr) + \
59 static void zap_completion_queue(void);
60 static void netpoll_async_cleanup(struct work_struct
*work
);
62 static unsigned int carrier_timeout
= 4;
63 module_param(carrier_timeout
, uint
, 0644);
65 #define np_info(np, fmt, ...) \
66 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
67 #define np_err(np, fmt, ...) \
68 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
69 #define np_notice(np, fmt, ...) \
70 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
72 static int netpoll_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
73 struct netdev_queue
*txq
)
75 int status
= NETDEV_TX_OK
;
76 netdev_features_t features
;
78 features
= netif_skb_features(skb
);
80 if (skb_vlan_tag_present(skb
) &&
81 !vlan_hw_offload_capable(features
, skb
->vlan_proto
)) {
82 skb
= __vlan_hwaccel_push_inside(skb
);
84 /* This is actually a packet drop, but we
85 * don't want the code that calls this
86 * function to try and operate on a NULL skb.
92 status
= netdev_start_xmit(skb
, dev
, txq
, false);
98 static void queue_process(struct work_struct
*work
)
100 struct netpoll_info
*npinfo
=
101 container_of(work
, struct netpoll_info
, tx_work
.work
);
105 while ((skb
= skb_dequeue(&npinfo
->txq
))) {
106 struct net_device
*dev
= skb
->dev
;
107 struct netdev_queue
*txq
;
109 if (!netif_device_present(dev
) || !netif_running(dev
)) {
114 txq
= skb_get_tx_queue(dev
, skb
);
116 local_irq_save(flags
);
117 HARD_TX_LOCK(dev
, txq
, smp_processor_id());
118 if (netif_xmit_frozen_or_stopped(txq
) ||
119 netpoll_start_xmit(skb
, dev
, txq
) != NETDEV_TX_OK
) {
120 skb_queue_head(&npinfo
->txq
, skb
);
121 HARD_TX_UNLOCK(dev
, txq
);
122 local_irq_restore(flags
);
124 schedule_delayed_work(&npinfo
->tx_work
, HZ
/10);
127 HARD_TX_UNLOCK(dev
, txq
);
128 local_irq_restore(flags
);
133 * Check whether delayed processing was scheduled for our NIC. If so,
134 * we attempt to grab the poll lock and use ->poll() to pump the card.
135 * If this fails, either we've recursed in ->poll() or it's already
136 * running on another CPU.
138 * Note: we don't mask interrupts with this lock because we're using
139 * trylock here and interrupts are already disabled in the softirq
140 * case. Further, we test the poll_owner to avoid recursion on UP
141 * systems where the lock doesn't exist.
143 static int poll_one_napi(struct napi_struct
*napi
, int budget
)
147 /* net_rx_action's ->poll() invocations and our's are
148 * synchronized by this test which is only made while
149 * holding the napi->poll_lock.
151 if (!test_bit(NAPI_STATE_SCHED
, &napi
->state
))
154 /* If we set this bit but see that it has already been set,
155 * that indicates that napi has been disabled and we need
156 * to abort this operation
158 if (test_and_set_bit(NAPI_STATE_NPSVC
, &napi
->state
))
161 work
= napi
->poll(napi
, budget
);
162 WARN_ONCE(work
> budget
, "%pF exceeded budget in poll\n", napi
->poll
);
163 trace_napi_poll(napi
);
165 clear_bit(NAPI_STATE_NPSVC
, &napi
->state
);
168 return budget
- work
;
171 static void poll_napi(struct net_device
*dev
, int budget
)
173 struct napi_struct
*napi
;
175 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
176 if (napi
->poll_owner
!= smp_processor_id() &&
177 spin_trylock(&napi
->poll_lock
)) {
178 budget
= poll_one_napi(napi
, budget
);
179 spin_unlock(&napi
->poll_lock
);
184 static void netpoll_poll_dev(struct net_device
*dev
)
186 const struct net_device_ops
*ops
;
187 struct netpoll_info
*ni
= rcu_dereference_bh(dev
->npinfo
);
190 /* Don't do any rx activity if the dev_lock mutex is held
191 * the dev_open/close paths use this to block netpoll activity
192 * while changing device state
194 if (down_trylock(&ni
->dev_lock
))
197 if (!netif_running(dev
)) {
202 ops
= dev
->netdev_ops
;
203 if (!ops
->ndo_poll_controller
) {
208 /* Process pending work on NIC */
209 ops
->ndo_poll_controller(dev
);
211 poll_napi(dev
, budget
);
215 zap_completion_queue();
218 void netpoll_poll_disable(struct net_device
*dev
)
220 struct netpoll_info
*ni
;
223 idx
= srcu_read_lock(&netpoll_srcu
);
224 ni
= srcu_dereference(dev
->npinfo
, &netpoll_srcu
);
227 srcu_read_unlock(&netpoll_srcu
, idx
);
229 EXPORT_SYMBOL(netpoll_poll_disable
);
231 void netpoll_poll_enable(struct net_device
*dev
)
233 struct netpoll_info
*ni
;
235 ni
= rcu_dereference(dev
->npinfo
);
240 EXPORT_SYMBOL(netpoll_poll_enable
);
242 static void refill_skbs(void)
247 spin_lock_irqsave(&skb_pool
.lock
, flags
);
248 while (skb_pool
.qlen
< MAX_SKBS
) {
249 skb
= alloc_skb(MAX_SKB_SIZE
, GFP_ATOMIC
);
253 __skb_queue_tail(&skb_pool
, skb
);
255 spin_unlock_irqrestore(&skb_pool
.lock
, flags
);
258 static void zap_completion_queue(void)
261 struct softnet_data
*sd
= &get_cpu_var(softnet_data
);
263 if (sd
->completion_queue
) {
264 struct sk_buff
*clist
;
266 local_irq_save(flags
);
267 clist
= sd
->completion_queue
;
268 sd
->completion_queue
= NULL
;
269 local_irq_restore(flags
);
271 while (clist
!= NULL
) {
272 struct sk_buff
*skb
= clist
;
274 if (!skb_irq_freeable(skb
)) {
275 atomic_inc(&skb
->users
);
276 dev_kfree_skb_any(skb
); /* put this one back */
283 put_cpu_var(softnet_data
);
286 static struct sk_buff
*find_skb(struct netpoll
*np
, int len
, int reserve
)
291 zap_completion_queue();
295 skb
= alloc_skb(len
, GFP_ATOMIC
);
297 skb
= skb_dequeue(&skb_pool
);
301 netpoll_poll_dev(np
->dev
);
307 atomic_set(&skb
->users
, 1);
308 skb_reserve(skb
, reserve
);
312 static int netpoll_owner_active(struct net_device
*dev
)
314 struct napi_struct
*napi
;
316 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
317 if (napi
->poll_owner
== smp_processor_id())
323 /* call with IRQ disabled */
324 void netpoll_send_skb_on_dev(struct netpoll
*np
, struct sk_buff
*skb
,
325 struct net_device
*dev
)
327 int status
= NETDEV_TX_BUSY
;
329 /* It is up to the caller to keep npinfo alive. */
330 struct netpoll_info
*npinfo
;
332 WARN_ON_ONCE(!irqs_disabled());
334 npinfo
= rcu_dereference_bh(np
->dev
->npinfo
);
335 if (!npinfo
|| !netif_running(dev
) || !netif_device_present(dev
)) {
336 dev_kfree_skb_irq(skb
);
340 /* don't get messages out of order, and no recursion */
341 if (skb_queue_len(&npinfo
->txq
) == 0 && !netpoll_owner_active(dev
)) {
342 struct netdev_queue
*txq
;
344 txq
= netdev_pick_tx(dev
, skb
, NULL
);
346 /* try until next clock tick */
347 for (tries
= jiffies_to_usecs(1)/USEC_PER_POLL
;
348 tries
> 0; --tries
) {
349 if (HARD_TX_TRYLOCK(dev
, txq
)) {
350 if (!netif_xmit_stopped(txq
))
351 status
= netpoll_start_xmit(skb
, dev
, txq
);
353 HARD_TX_UNLOCK(dev
, txq
);
355 if (status
== NETDEV_TX_OK
)
360 /* tickle device maybe there is some cleanup */
361 netpoll_poll_dev(np
->dev
);
363 udelay(USEC_PER_POLL
);
366 WARN_ONCE(!irqs_disabled(),
367 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
368 dev
->name
, dev
->netdev_ops
->ndo_start_xmit
);
372 if (status
!= NETDEV_TX_OK
) {
373 skb_queue_tail(&npinfo
->txq
, skb
);
374 schedule_delayed_work(&npinfo
->tx_work
,0);
377 EXPORT_SYMBOL(netpoll_send_skb_on_dev
);
379 void netpoll_send_udp(struct netpoll
*np
, const char *msg
, int len
)
381 int total_len
, ip_len
, udp_len
;
386 static atomic_t ip_ident
;
387 struct ipv6hdr
*ip6h
;
389 WARN_ON_ONCE(!irqs_disabled());
391 udp_len
= len
+ sizeof(*udph
);
393 ip_len
= udp_len
+ sizeof(*ip6h
);
395 ip_len
= udp_len
+ sizeof(*iph
);
397 total_len
= ip_len
+ LL_RESERVED_SPACE(np
->dev
);
399 skb
= find_skb(np
, total_len
+ np
->dev
->needed_tailroom
,
404 skb_copy_to_linear_data(skb
, msg
, len
);
407 skb_push(skb
, sizeof(*udph
));
408 skb_reset_transport_header(skb
);
410 udph
->source
= htons(np
->local_port
);
411 udph
->dest
= htons(np
->remote_port
);
412 udph
->len
= htons(udp_len
);
416 udph
->check
= csum_ipv6_magic(&np
->local_ip
.in6
,
418 udp_len
, IPPROTO_UDP
,
419 csum_partial(udph
, udp_len
, 0));
420 if (udph
->check
== 0)
421 udph
->check
= CSUM_MANGLED_0
;
423 skb_push(skb
, sizeof(*ip6h
));
424 skb_reset_network_header(skb
);
425 ip6h
= ipv6_hdr(skb
);
427 /* ip6h->version = 6; ip6h->priority = 0; */
428 put_unaligned(0x60, (unsigned char *)ip6h
);
429 ip6h
->flow_lbl
[0] = 0;
430 ip6h
->flow_lbl
[1] = 0;
431 ip6h
->flow_lbl
[2] = 0;
433 ip6h
->payload_len
= htons(sizeof(struct udphdr
) + len
);
434 ip6h
->nexthdr
= IPPROTO_UDP
;
435 ip6h
->hop_limit
= 32;
436 ip6h
->saddr
= np
->local_ip
.in6
;
437 ip6h
->daddr
= np
->remote_ip
.in6
;
439 eth
= (struct ethhdr
*) skb_push(skb
, ETH_HLEN
);
440 skb_reset_mac_header(skb
);
441 skb
->protocol
= eth
->h_proto
= htons(ETH_P_IPV6
);
444 udph
->check
= csum_tcpudp_magic(np
->local_ip
.ip
,
446 udp_len
, IPPROTO_UDP
,
447 csum_partial(udph
, udp_len
, 0));
448 if (udph
->check
== 0)
449 udph
->check
= CSUM_MANGLED_0
;
451 skb_push(skb
, sizeof(*iph
));
452 skb_reset_network_header(skb
);
455 /* iph->version = 4; iph->ihl = 5; */
456 put_unaligned(0x45, (unsigned char *)iph
);
458 put_unaligned(htons(ip_len
), &(iph
->tot_len
));
459 iph
->id
= htons(atomic_inc_return(&ip_ident
));
462 iph
->protocol
= IPPROTO_UDP
;
464 put_unaligned(np
->local_ip
.ip
, &(iph
->saddr
));
465 put_unaligned(np
->remote_ip
.ip
, &(iph
->daddr
));
466 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
468 eth
= (struct ethhdr
*) skb_push(skb
, ETH_HLEN
);
469 skb_reset_mac_header(skb
);
470 skb
->protocol
= eth
->h_proto
= htons(ETH_P_IP
);
473 ether_addr_copy(eth
->h_source
, np
->dev
->dev_addr
);
474 ether_addr_copy(eth
->h_dest
, np
->remote_mac
);
478 netpoll_send_skb(np
, skb
);
480 EXPORT_SYMBOL(netpoll_send_udp
);
482 void netpoll_print_options(struct netpoll
*np
)
484 np_info(np
, "local port %d\n", np
->local_port
);
486 np_info(np
, "local IPv6 address %pI6c\n", &np
->local_ip
.in6
);
488 np_info(np
, "local IPv4 address %pI4\n", &np
->local_ip
.ip
);
489 np_info(np
, "interface '%s'\n", np
->dev_name
);
490 np_info(np
, "remote port %d\n", np
->remote_port
);
492 np_info(np
, "remote IPv6 address %pI6c\n", &np
->remote_ip
.in6
);
494 np_info(np
, "remote IPv4 address %pI4\n", &np
->remote_ip
.ip
);
495 np_info(np
, "remote ethernet address %pM\n", np
->remote_mac
);
497 EXPORT_SYMBOL(netpoll_print_options
);
499 static int netpoll_parse_ip_addr(const char *str
, union inet_addr
*addr
)
503 if (!strchr(str
, ':') &&
504 in4_pton(str
, -1, (void *)addr
, -1, &end
) > 0) {
508 if (in6_pton(str
, -1, addr
->in6
.s6_addr
, -1, &end
) > 0) {
509 #if IS_ENABLED(CONFIG_IPV6)
519 int netpoll_parse_options(struct netpoll
*np
, char *opt
)
521 char *cur
=opt
, *delim
;
523 bool ipversion_set
= false;
526 if ((delim
= strchr(cur
, '@')) == NULL
)
529 if (kstrtou16(cur
, 10, &np
->local_port
))
536 ipversion_set
= true;
537 if ((delim
= strchr(cur
, '/')) == NULL
)
540 ipv6
= netpoll_parse_ip_addr(cur
, &np
->local_ip
);
544 np
->ipv6
= (bool)ipv6
;
550 /* parse out dev name */
551 if ((delim
= strchr(cur
, ',')) == NULL
)
554 strlcpy(np
->dev_name
, cur
, sizeof(np
->dev_name
));
561 if ((delim
= strchr(cur
, '@')) == NULL
)
564 if (*cur
== ' ' || *cur
== '\t')
565 np_info(np
, "warning: whitespace is not allowed\n");
566 if (kstrtou16(cur
, 10, &np
->remote_port
))
573 if ((delim
= strchr(cur
, '/')) == NULL
)
576 ipv6
= netpoll_parse_ip_addr(cur
, &np
->remote_ip
);
579 else if (ipversion_set
&& np
->ipv6
!= (bool)ipv6
)
582 np
->ipv6
= (bool)ipv6
;
587 if (!mac_pton(cur
, np
->remote_mac
))
591 netpoll_print_options(np
);
596 np_info(np
, "couldn't parse config at '%s'!\n", cur
);
599 EXPORT_SYMBOL(netpoll_parse_options
);
601 int __netpoll_setup(struct netpoll
*np
, struct net_device
*ndev
)
603 struct netpoll_info
*npinfo
;
604 const struct net_device_ops
*ops
;
608 strlcpy(np
->dev_name
, ndev
->name
, IFNAMSIZ
);
609 INIT_WORK(&np
->cleanup_work
, netpoll_async_cleanup
);
611 if ((ndev
->priv_flags
& IFF_DISABLE_NETPOLL
) ||
612 !ndev
->netdev_ops
->ndo_poll_controller
) {
613 np_err(np
, "%s doesn't support polling, aborting\n",
620 npinfo
= kmalloc(sizeof(*npinfo
), GFP_KERNEL
);
626 sema_init(&npinfo
->dev_lock
, 1);
627 skb_queue_head_init(&npinfo
->txq
);
628 INIT_DELAYED_WORK(&npinfo
->tx_work
, queue_process
);
630 atomic_set(&npinfo
->refcnt
, 1);
632 ops
= np
->dev
->netdev_ops
;
633 if (ops
->ndo_netpoll_setup
) {
634 err
= ops
->ndo_netpoll_setup(ndev
, npinfo
);
639 npinfo
= rtnl_dereference(ndev
->npinfo
);
640 atomic_inc(&npinfo
->refcnt
);
643 npinfo
->netpoll
= np
;
645 /* last thing to do is link it to the net device structure */
646 rcu_assign_pointer(ndev
->npinfo
, npinfo
);
655 EXPORT_SYMBOL_GPL(__netpoll_setup
);
657 int netpoll_setup(struct netpoll
*np
)
659 struct net_device
*ndev
= NULL
;
660 struct in_device
*in_dev
;
665 struct net
*net
= current
->nsproxy
->net_ns
;
666 ndev
= __dev_get_by_name(net
, np
->dev_name
);
669 np_err(np
, "%s doesn't exist, aborting\n", np
->dev_name
);
675 if (netdev_master_upper_dev_get(ndev
)) {
676 np_err(np
, "%s is a slave device, aborting\n", np
->dev_name
);
681 if (!netif_running(ndev
)) {
682 unsigned long atmost
, atleast
;
684 np_info(np
, "device %s not up yet, forcing it\n", np
->dev_name
);
686 err
= dev_open(ndev
);
689 np_err(np
, "failed to open %s\n", ndev
->name
);
694 atleast
= jiffies
+ HZ
/10;
695 atmost
= jiffies
+ carrier_timeout
* HZ
;
696 while (!netif_carrier_ok(ndev
)) {
697 if (time_after(jiffies
, atmost
)) {
698 np_notice(np
, "timeout waiting for carrier\n");
704 /* If carrier appears to come up instantly, we don't
705 * trust it and pause so that we don't pump all our
706 * queued console messages into the bitbucket.
709 if (time_before(jiffies
, atleast
)) {
710 np_notice(np
, "carrier detect appears untrustworthy, waiting 4 seconds\n");
716 if (!np
->local_ip
.ip
) {
718 in_dev
= __in_dev_get_rtnl(ndev
);
720 if (!in_dev
|| !in_dev
->ifa_list
) {
721 np_err(np
, "no IP address for %s, aborting\n",
727 np
->local_ip
.ip
= in_dev
->ifa_list
->ifa_local
;
728 np_info(np
, "local IP %pI4\n", &np
->local_ip
.ip
);
730 #if IS_ENABLED(CONFIG_IPV6)
731 struct inet6_dev
*idev
;
734 idev
= __in6_dev_get(ndev
);
736 struct inet6_ifaddr
*ifp
;
738 read_lock_bh(&idev
->lock
);
739 list_for_each_entry(ifp
, &idev
->addr_list
, if_list
) {
740 if (ipv6_addr_type(&ifp
->addr
) & IPV6_ADDR_LINKLOCAL
)
742 np
->local_ip
.in6
= ifp
->addr
;
746 read_unlock_bh(&idev
->lock
);
749 np_err(np
, "no IPv6 address for %s, aborting\n",
753 np_info(np
, "local IPv6 %pI6c\n", &np
->local_ip
.in6
);
755 np_err(np
, "IPv6 is not supported %s, aborting\n",
763 /* fill up the skb queue */
766 err
= __netpoll_setup(np
, ndev
);
779 EXPORT_SYMBOL(netpoll_setup
);
781 static int __init
netpoll_init(void)
783 skb_queue_head_init(&skb_pool
);
786 core_initcall(netpoll_init
);
788 static void rcu_cleanup_netpoll_info(struct rcu_head
*rcu_head
)
790 struct netpoll_info
*npinfo
=
791 container_of(rcu_head
, struct netpoll_info
, rcu
);
793 skb_queue_purge(&npinfo
->txq
);
795 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
796 cancel_delayed_work(&npinfo
->tx_work
);
798 /* clean after last, unfinished work */
799 __skb_queue_purge(&npinfo
->txq
);
800 /* now cancel it again */
801 cancel_delayed_work(&npinfo
->tx_work
);
805 void __netpoll_cleanup(struct netpoll
*np
)
807 struct netpoll_info
*npinfo
;
809 /* rtnl_dereference would be preferable here but
810 * rcu_cleanup_netpoll path can put us in here safely without
811 * holding the rtnl, so plain rcu_dereference it is
813 npinfo
= rtnl_dereference(np
->dev
->npinfo
);
817 synchronize_srcu(&netpoll_srcu
);
819 if (atomic_dec_and_test(&npinfo
->refcnt
)) {
820 const struct net_device_ops
*ops
;
822 ops
= np
->dev
->netdev_ops
;
823 if (ops
->ndo_netpoll_cleanup
)
824 ops
->ndo_netpoll_cleanup(np
->dev
);
826 RCU_INIT_POINTER(np
->dev
->npinfo
, NULL
);
827 call_rcu_bh(&npinfo
->rcu
, rcu_cleanup_netpoll_info
);
829 RCU_INIT_POINTER(np
->dev
->npinfo
, NULL
);
831 EXPORT_SYMBOL_GPL(__netpoll_cleanup
);
833 static void netpoll_async_cleanup(struct work_struct
*work
)
835 struct netpoll
*np
= container_of(work
, struct netpoll
, cleanup_work
);
838 __netpoll_cleanup(np
);
843 void __netpoll_free_async(struct netpoll
*np
)
845 schedule_work(&np
->cleanup_work
);
847 EXPORT_SYMBOL_GPL(__netpoll_free_async
);
849 void netpoll_cleanup(struct netpoll
*np
)
854 __netpoll_cleanup(np
);
860 EXPORT_SYMBOL(netpoll_cleanup
);