2 * Common framework for low-level network console, dump, and debugger code
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 * based on the netconsole code from:
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/string.h>
19 #include <linux/if_arp.h>
20 #include <linux/inetdevice.h>
21 #include <linux/inet.h>
22 #include <linux/interrupt.h>
23 #include <linux/netpoll.h>
24 #include <linux/sched.h>
25 #include <linux/delay.h>
26 #include <linux/rcupdate.h>
27 #include <linux/workqueue.h>
28 #include <linux/slab.h>
29 #include <linux/export.h>
30 #include <linux/if_vlan.h>
33 #include <net/addrconf.h>
34 #include <net/ndisc.h>
35 #include <net/ip6_checksum.h>
36 #include <asm/unaligned.h>
37 #include <trace/events/napi.h>
40 * We maintain a small pool of fully-sized skbs, to make sure the
41 * message gets out even in extreme OOM situations.
44 #define MAX_UDP_CHUNK 1460
47 static struct sk_buff_head skb_pool
;
49 DEFINE_STATIC_SRCU(netpoll_srcu
);
51 #define USEC_PER_POLL 50
53 #define MAX_SKB_SIZE \
54 (sizeof(struct ethhdr) + \
55 sizeof(struct iphdr) + \
56 sizeof(struct udphdr) + \
59 static void zap_completion_queue(void);
60 static void netpoll_async_cleanup(struct work_struct
*work
);
62 static unsigned int carrier_timeout
= 4;
63 module_param(carrier_timeout
, uint
, 0644);
65 #define np_info(np, fmt, ...) \
66 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
67 #define np_err(np, fmt, ...) \
68 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
69 #define np_notice(np, fmt, ...) \
70 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
72 static int netpoll_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
73 struct netdev_queue
*txq
)
75 int status
= NETDEV_TX_OK
;
76 netdev_features_t features
;
78 features
= netif_skb_features(skb
);
80 if (vlan_tx_tag_present(skb
) &&
81 !vlan_hw_offload_capable(features
, skb
->vlan_proto
)) {
82 skb
= __vlan_put_tag(skb
, skb
->vlan_proto
,
83 vlan_tx_tag_get(skb
));
85 /* This is actually a packet drop, but we
86 * don't want the code that calls this
87 * function to try and operate on a NULL skb.
94 status
= netdev_start_xmit(skb
, dev
, txq
, false);
100 static void queue_process(struct work_struct
*work
)
102 struct netpoll_info
*npinfo
=
103 container_of(work
, struct netpoll_info
, tx_work
.work
);
107 while ((skb
= skb_dequeue(&npinfo
->txq
))) {
108 struct net_device
*dev
= skb
->dev
;
109 struct netdev_queue
*txq
;
111 if (!netif_device_present(dev
) || !netif_running(dev
)) {
116 txq
= skb_get_tx_queue(dev
, skb
);
118 local_irq_save(flags
);
119 HARD_TX_LOCK(dev
, txq
, smp_processor_id());
120 if (netif_xmit_frozen_or_stopped(txq
) ||
121 netpoll_start_xmit(skb
, dev
, txq
) != NETDEV_TX_OK
) {
122 skb_queue_head(&npinfo
->txq
, skb
);
123 HARD_TX_UNLOCK(dev
, txq
);
124 local_irq_restore(flags
);
126 schedule_delayed_work(&npinfo
->tx_work
, HZ
/10);
129 HARD_TX_UNLOCK(dev
, txq
);
130 local_irq_restore(flags
);
135 * Check whether delayed processing was scheduled for our NIC. If so,
136 * we attempt to grab the poll lock and use ->poll() to pump the card.
137 * If this fails, either we've recursed in ->poll() or it's already
138 * running on another CPU.
140 * Note: we don't mask interrupts with this lock because we're using
141 * trylock here and interrupts are already disabled in the softirq
142 * case. Further, we test the poll_owner to avoid recursion on UP
143 * systems where the lock doesn't exist.
145 static int poll_one_napi(struct napi_struct
*napi
, int budget
)
149 /* net_rx_action's ->poll() invocations and our's are
150 * synchronized by this test which is only made while
151 * holding the napi->poll_lock.
153 if (!test_bit(NAPI_STATE_SCHED
, &napi
->state
))
156 set_bit(NAPI_STATE_NPSVC
, &napi
->state
);
158 work
= napi
->poll(napi
, budget
);
159 WARN_ONCE(work
> budget
, "%pF exceeded budget in poll\n", napi
->poll
);
160 trace_napi_poll(napi
);
162 clear_bit(NAPI_STATE_NPSVC
, &napi
->state
);
164 return budget
- work
;
167 static void poll_napi(struct net_device
*dev
, int budget
)
169 struct napi_struct
*napi
;
171 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
172 if (napi
->poll_owner
!= smp_processor_id() &&
173 spin_trylock(&napi
->poll_lock
)) {
174 budget
= poll_one_napi(napi
, budget
);
175 spin_unlock(&napi
->poll_lock
);
180 static void netpoll_poll_dev(struct net_device
*dev
)
182 const struct net_device_ops
*ops
;
183 struct netpoll_info
*ni
= rcu_dereference_bh(dev
->npinfo
);
186 /* Don't do any rx activity if the dev_lock mutex is held
187 * the dev_open/close paths use this to block netpoll activity
188 * while changing device state
190 if (down_trylock(&ni
->dev_lock
))
193 if (!netif_running(dev
)) {
198 ops
= dev
->netdev_ops
;
199 if (!ops
->ndo_poll_controller
) {
204 /* Process pending work on NIC */
205 ops
->ndo_poll_controller(dev
);
207 poll_napi(dev
, budget
);
211 zap_completion_queue();
214 void netpoll_poll_disable(struct net_device
*dev
)
216 struct netpoll_info
*ni
;
219 idx
= srcu_read_lock(&netpoll_srcu
);
220 ni
= srcu_dereference(dev
->npinfo
, &netpoll_srcu
);
223 srcu_read_unlock(&netpoll_srcu
, idx
);
225 EXPORT_SYMBOL(netpoll_poll_disable
);
227 void netpoll_poll_enable(struct net_device
*dev
)
229 struct netpoll_info
*ni
;
231 ni
= rcu_dereference(dev
->npinfo
);
236 EXPORT_SYMBOL(netpoll_poll_enable
);
238 static void refill_skbs(void)
243 spin_lock_irqsave(&skb_pool
.lock
, flags
);
244 while (skb_pool
.qlen
< MAX_SKBS
) {
245 skb
= alloc_skb(MAX_SKB_SIZE
, GFP_ATOMIC
);
249 __skb_queue_tail(&skb_pool
, skb
);
251 spin_unlock_irqrestore(&skb_pool
.lock
, flags
);
254 static void zap_completion_queue(void)
257 struct softnet_data
*sd
= &get_cpu_var(softnet_data
);
259 if (sd
->completion_queue
) {
260 struct sk_buff
*clist
;
262 local_irq_save(flags
);
263 clist
= sd
->completion_queue
;
264 sd
->completion_queue
= NULL
;
265 local_irq_restore(flags
);
267 while (clist
!= NULL
) {
268 struct sk_buff
*skb
= clist
;
270 if (!skb_irq_freeable(skb
)) {
271 atomic_inc(&skb
->users
);
272 dev_kfree_skb_any(skb
); /* put this one back */
279 put_cpu_var(softnet_data
);
282 static struct sk_buff
*find_skb(struct netpoll
*np
, int len
, int reserve
)
287 zap_completion_queue();
291 skb
= alloc_skb(len
, GFP_ATOMIC
);
293 skb
= skb_dequeue(&skb_pool
);
297 netpoll_poll_dev(np
->dev
);
303 atomic_set(&skb
->users
, 1);
304 skb_reserve(skb
, reserve
);
308 static int netpoll_owner_active(struct net_device
*dev
)
310 struct napi_struct
*napi
;
312 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
313 if (napi
->poll_owner
== smp_processor_id())
319 /* call with IRQ disabled */
320 void netpoll_send_skb_on_dev(struct netpoll
*np
, struct sk_buff
*skb
,
321 struct net_device
*dev
)
323 int status
= NETDEV_TX_BUSY
;
325 /* It is up to the caller to keep npinfo alive. */
326 struct netpoll_info
*npinfo
;
328 WARN_ON_ONCE(!irqs_disabled());
330 npinfo
= rcu_dereference_bh(np
->dev
->npinfo
);
331 if (!npinfo
|| !netif_running(dev
) || !netif_device_present(dev
)) {
332 dev_kfree_skb_irq(skb
);
336 /* don't get messages out of order, and no recursion */
337 if (skb_queue_len(&npinfo
->txq
) == 0 && !netpoll_owner_active(dev
)) {
338 struct netdev_queue
*txq
;
340 txq
= netdev_pick_tx(dev
, skb
, NULL
);
342 /* try until next clock tick */
343 for (tries
= jiffies_to_usecs(1)/USEC_PER_POLL
;
344 tries
> 0; --tries
) {
345 if (HARD_TX_TRYLOCK(dev
, txq
)) {
346 if (!netif_xmit_stopped(txq
))
347 status
= netpoll_start_xmit(skb
, dev
, txq
);
349 HARD_TX_UNLOCK(dev
, txq
);
351 if (status
== NETDEV_TX_OK
)
356 /* tickle device maybe there is some cleanup */
357 netpoll_poll_dev(np
->dev
);
359 udelay(USEC_PER_POLL
);
362 WARN_ONCE(!irqs_disabled(),
363 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
364 dev
->name
, dev
->netdev_ops
->ndo_start_xmit
);
368 if (status
!= NETDEV_TX_OK
) {
369 skb_queue_tail(&npinfo
->txq
, skb
);
370 schedule_delayed_work(&npinfo
->tx_work
,0);
373 EXPORT_SYMBOL(netpoll_send_skb_on_dev
);
375 void netpoll_send_udp(struct netpoll
*np
, const char *msg
, int len
)
377 int total_len
, ip_len
, udp_len
;
382 static atomic_t ip_ident
;
383 struct ipv6hdr
*ip6h
;
385 udp_len
= len
+ sizeof(*udph
);
387 ip_len
= udp_len
+ sizeof(*ip6h
);
389 ip_len
= udp_len
+ sizeof(*iph
);
391 total_len
= ip_len
+ LL_RESERVED_SPACE(np
->dev
);
393 skb
= find_skb(np
, total_len
+ np
->dev
->needed_tailroom
,
398 skb_copy_to_linear_data(skb
, msg
, len
);
401 skb_push(skb
, sizeof(*udph
));
402 skb_reset_transport_header(skb
);
404 udph
->source
= htons(np
->local_port
);
405 udph
->dest
= htons(np
->remote_port
);
406 udph
->len
= htons(udp_len
);
410 udph
->check
= csum_ipv6_magic(&np
->local_ip
.in6
,
412 udp_len
, IPPROTO_UDP
,
413 csum_partial(udph
, udp_len
, 0));
414 if (udph
->check
== 0)
415 udph
->check
= CSUM_MANGLED_0
;
417 skb_push(skb
, sizeof(*ip6h
));
418 skb_reset_network_header(skb
);
419 ip6h
= ipv6_hdr(skb
);
421 /* ip6h->version = 6; ip6h->priority = 0; */
422 put_unaligned(0x60, (unsigned char *)ip6h
);
423 ip6h
->flow_lbl
[0] = 0;
424 ip6h
->flow_lbl
[1] = 0;
425 ip6h
->flow_lbl
[2] = 0;
427 ip6h
->payload_len
= htons(sizeof(struct udphdr
) + len
);
428 ip6h
->nexthdr
= IPPROTO_UDP
;
429 ip6h
->hop_limit
= 32;
430 ip6h
->saddr
= np
->local_ip
.in6
;
431 ip6h
->daddr
= np
->remote_ip
.in6
;
433 eth
= (struct ethhdr
*) skb_push(skb
, ETH_HLEN
);
434 skb_reset_mac_header(skb
);
435 skb
->protocol
= eth
->h_proto
= htons(ETH_P_IPV6
);
438 udph
->check
= csum_tcpudp_magic(np
->local_ip
.ip
,
440 udp_len
, IPPROTO_UDP
,
441 csum_partial(udph
, udp_len
, 0));
442 if (udph
->check
== 0)
443 udph
->check
= CSUM_MANGLED_0
;
445 skb_push(skb
, sizeof(*iph
));
446 skb_reset_network_header(skb
);
449 /* iph->version = 4; iph->ihl = 5; */
450 put_unaligned(0x45, (unsigned char *)iph
);
452 put_unaligned(htons(ip_len
), &(iph
->tot_len
));
453 iph
->id
= htons(atomic_inc_return(&ip_ident
));
456 iph
->protocol
= IPPROTO_UDP
;
458 put_unaligned(np
->local_ip
.ip
, &(iph
->saddr
));
459 put_unaligned(np
->remote_ip
.ip
, &(iph
->daddr
));
460 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
462 eth
= (struct ethhdr
*) skb_push(skb
, ETH_HLEN
);
463 skb_reset_mac_header(skb
);
464 skb
->protocol
= eth
->h_proto
= htons(ETH_P_IP
);
467 ether_addr_copy(eth
->h_source
, np
->dev
->dev_addr
);
468 ether_addr_copy(eth
->h_dest
, np
->remote_mac
);
472 netpoll_send_skb(np
, skb
);
474 EXPORT_SYMBOL(netpoll_send_udp
);
476 void netpoll_print_options(struct netpoll
*np
)
478 np_info(np
, "local port %d\n", np
->local_port
);
480 np_info(np
, "local IPv6 address %pI6c\n", &np
->local_ip
.in6
);
482 np_info(np
, "local IPv4 address %pI4\n", &np
->local_ip
.ip
);
483 np_info(np
, "interface '%s'\n", np
->dev_name
);
484 np_info(np
, "remote port %d\n", np
->remote_port
);
486 np_info(np
, "remote IPv6 address %pI6c\n", &np
->remote_ip
.in6
);
488 np_info(np
, "remote IPv4 address %pI4\n", &np
->remote_ip
.ip
);
489 np_info(np
, "remote ethernet address %pM\n", np
->remote_mac
);
491 EXPORT_SYMBOL(netpoll_print_options
);
493 static int netpoll_parse_ip_addr(const char *str
, union inet_addr
*addr
)
497 if (!strchr(str
, ':') &&
498 in4_pton(str
, -1, (void *)addr
, -1, &end
) > 0) {
502 if (in6_pton(str
, -1, addr
->in6
.s6_addr
, -1, &end
) > 0) {
503 #if IS_ENABLED(CONFIG_IPV6)
513 int netpoll_parse_options(struct netpoll
*np
, char *opt
)
515 char *cur
=opt
, *delim
;
517 bool ipversion_set
= false;
520 if ((delim
= strchr(cur
, '@')) == NULL
)
523 if (kstrtou16(cur
, 10, &np
->local_port
))
530 ipversion_set
= true;
531 if ((delim
= strchr(cur
, '/')) == NULL
)
534 ipv6
= netpoll_parse_ip_addr(cur
, &np
->local_ip
);
538 np
->ipv6
= (bool)ipv6
;
544 /* parse out dev name */
545 if ((delim
= strchr(cur
, ',')) == NULL
)
548 strlcpy(np
->dev_name
, cur
, sizeof(np
->dev_name
));
555 if ((delim
= strchr(cur
, '@')) == NULL
)
558 if (*cur
== ' ' || *cur
== '\t')
559 np_info(np
, "warning: whitespace is not allowed\n");
560 if (kstrtou16(cur
, 10, &np
->remote_port
))
567 if ((delim
= strchr(cur
, '/')) == NULL
)
570 ipv6
= netpoll_parse_ip_addr(cur
, &np
->remote_ip
);
573 else if (ipversion_set
&& np
->ipv6
!= (bool)ipv6
)
576 np
->ipv6
= (bool)ipv6
;
581 if (!mac_pton(cur
, np
->remote_mac
))
585 netpoll_print_options(np
);
590 np_info(np
, "couldn't parse config at '%s'!\n", cur
);
593 EXPORT_SYMBOL(netpoll_parse_options
);
595 int __netpoll_setup(struct netpoll
*np
, struct net_device
*ndev
)
597 struct netpoll_info
*npinfo
;
598 const struct net_device_ops
*ops
;
602 strlcpy(np
->dev_name
, ndev
->name
, IFNAMSIZ
);
603 INIT_WORK(&np
->cleanup_work
, netpoll_async_cleanup
);
605 if ((ndev
->priv_flags
& IFF_DISABLE_NETPOLL
) ||
606 !ndev
->netdev_ops
->ndo_poll_controller
) {
607 np_err(np
, "%s doesn't support polling, aborting\n",
614 npinfo
= kmalloc(sizeof(*npinfo
), GFP_KERNEL
);
620 sema_init(&npinfo
->dev_lock
, 1);
621 skb_queue_head_init(&npinfo
->txq
);
622 INIT_DELAYED_WORK(&npinfo
->tx_work
, queue_process
);
624 atomic_set(&npinfo
->refcnt
, 1);
626 ops
= np
->dev
->netdev_ops
;
627 if (ops
->ndo_netpoll_setup
) {
628 err
= ops
->ndo_netpoll_setup(ndev
, npinfo
);
633 npinfo
= rtnl_dereference(ndev
->npinfo
);
634 atomic_inc(&npinfo
->refcnt
);
637 npinfo
->netpoll
= np
;
639 /* last thing to do is link it to the net device structure */
640 rcu_assign_pointer(ndev
->npinfo
, npinfo
);
649 EXPORT_SYMBOL_GPL(__netpoll_setup
);
651 int netpoll_setup(struct netpoll
*np
)
653 struct net_device
*ndev
= NULL
;
654 struct in_device
*in_dev
;
659 struct net
*net
= current
->nsproxy
->net_ns
;
660 ndev
= __dev_get_by_name(net
, np
->dev_name
);
663 np_err(np
, "%s doesn't exist, aborting\n", np
->dev_name
);
669 if (netdev_master_upper_dev_get(ndev
)) {
670 np_err(np
, "%s is a slave device, aborting\n", np
->dev_name
);
675 if (!netif_running(ndev
)) {
676 unsigned long atmost
, atleast
;
678 np_info(np
, "device %s not up yet, forcing it\n", np
->dev_name
);
680 err
= dev_open(ndev
);
683 np_err(np
, "failed to open %s\n", ndev
->name
);
688 atleast
= jiffies
+ HZ
/10;
689 atmost
= jiffies
+ carrier_timeout
* HZ
;
690 while (!netif_carrier_ok(ndev
)) {
691 if (time_after(jiffies
, atmost
)) {
692 np_notice(np
, "timeout waiting for carrier\n");
698 /* If carrier appears to come up instantly, we don't
699 * trust it and pause so that we don't pump all our
700 * queued console messages into the bitbucket.
703 if (time_before(jiffies
, atleast
)) {
704 np_notice(np
, "carrier detect appears untrustworthy, waiting 4 seconds\n");
710 if (!np
->local_ip
.ip
) {
712 in_dev
= __in_dev_get_rtnl(ndev
);
714 if (!in_dev
|| !in_dev
->ifa_list
) {
715 np_err(np
, "no IP address for %s, aborting\n",
721 np
->local_ip
.ip
= in_dev
->ifa_list
->ifa_local
;
722 np_info(np
, "local IP %pI4\n", &np
->local_ip
.ip
);
724 #if IS_ENABLED(CONFIG_IPV6)
725 struct inet6_dev
*idev
;
728 idev
= __in6_dev_get(ndev
);
730 struct inet6_ifaddr
*ifp
;
732 read_lock_bh(&idev
->lock
);
733 list_for_each_entry(ifp
, &idev
->addr_list
, if_list
) {
734 if (ipv6_addr_type(&ifp
->addr
) & IPV6_ADDR_LINKLOCAL
)
736 np
->local_ip
.in6
= ifp
->addr
;
740 read_unlock_bh(&idev
->lock
);
743 np_err(np
, "no IPv6 address for %s, aborting\n",
747 np_info(np
, "local IPv6 %pI6c\n", &np
->local_ip
.in6
);
749 np_err(np
, "IPv6 is not supported %s, aborting\n",
757 /* fill up the skb queue */
760 err
= __netpoll_setup(np
, ndev
);
773 EXPORT_SYMBOL(netpoll_setup
);
775 static int __init
netpoll_init(void)
777 skb_queue_head_init(&skb_pool
);
780 core_initcall(netpoll_init
);
782 static void rcu_cleanup_netpoll_info(struct rcu_head
*rcu_head
)
784 struct netpoll_info
*npinfo
=
785 container_of(rcu_head
, struct netpoll_info
, rcu
);
787 skb_queue_purge(&npinfo
->txq
);
789 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
790 cancel_delayed_work(&npinfo
->tx_work
);
792 /* clean after last, unfinished work */
793 __skb_queue_purge(&npinfo
->txq
);
794 /* now cancel it again */
795 cancel_delayed_work(&npinfo
->tx_work
);
799 void __netpoll_cleanup(struct netpoll
*np
)
801 struct netpoll_info
*npinfo
;
803 /* rtnl_dereference would be preferable here but
804 * rcu_cleanup_netpoll path can put us in here safely without
805 * holding the rtnl, so plain rcu_dereference it is
807 npinfo
= rtnl_dereference(np
->dev
->npinfo
);
811 synchronize_srcu(&netpoll_srcu
);
813 if (atomic_dec_and_test(&npinfo
->refcnt
)) {
814 const struct net_device_ops
*ops
;
816 ops
= np
->dev
->netdev_ops
;
817 if (ops
->ndo_netpoll_cleanup
)
818 ops
->ndo_netpoll_cleanup(np
->dev
);
820 RCU_INIT_POINTER(np
->dev
->npinfo
, NULL
);
821 call_rcu_bh(&npinfo
->rcu
, rcu_cleanup_netpoll_info
);
823 RCU_INIT_POINTER(np
->dev
->npinfo
, NULL
);
825 EXPORT_SYMBOL_GPL(__netpoll_cleanup
);
827 static void netpoll_async_cleanup(struct work_struct
*work
)
829 struct netpoll
*np
= container_of(work
, struct netpoll
, cleanup_work
);
832 __netpoll_cleanup(np
);
837 void __netpoll_free_async(struct netpoll
*np
)
839 schedule_work(&np
->cleanup_work
);
841 EXPORT_SYMBOL_GPL(__netpoll_free_async
);
843 void netpoll_cleanup(struct netpoll
*np
)
848 __netpoll_cleanup(np
);
854 EXPORT_SYMBOL(netpoll_cleanup
);