6cd8848fec682e34919ad7f81bcb0026b6db4cb5
[deliverable/linux.git] / include / net / busy_poll.h
1 /*
2 * net busy poll support
3 * Copyright(c) 2013 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Author: Eliezer Tamir
19 *
20 * Contact Information:
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22 */
23
24 #ifndef _LINUX_NET_BUSY_POLL_H
25 #define _LINUX_NET_BUSY_POLL_H
26
27 #include <linux/netdevice.h>
28 #include <net/ip.h>
29
30 #ifdef CONFIG_NET_LL_RX_POLL
31
32 struct napi_struct;
33 extern unsigned int sysctl_net_busy_read __read_mostly;
34 extern unsigned int sysctl_net_busy_poll __read_mostly;
35
36 /* return values from ndo_ll_poll */
37 #define LL_FLUSH_FAILED -1
38 #define LL_FLUSH_BUSY -2
39
40 static inline bool net_busy_loop_on(void)
41 {
42 return sysctl_net_busy_poll;
43 }
44
45 /* a wrapper to make debug_smp_processor_id() happy
46 * we can use sched_clock() because we don't care much about precision
47 * we only care that the average is bounded
48 */
49 #ifdef CONFIG_DEBUG_PREEMPT
50 static inline u64 busy_loop_us_clock(void)
51 {
52 u64 rc;
53
54 preempt_disable_notrace();
55 rc = sched_clock();
56 preempt_enable_no_resched_notrace();
57
58 return rc >> 10;
59 }
60 #else /* CONFIG_DEBUG_PREEMPT */
61 static inline u64 busy_loop_us_clock(void)
62 {
63 return sched_clock() >> 10;
64 }
65 #endif /* CONFIG_DEBUG_PREEMPT */
66
67 static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
68 {
69 return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec);
70 }
71
72 /* in poll/select we use the global sysctl_net_ll_poll value */
73 static inline unsigned long busy_loop_end_time(void)
74 {
75 return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_busy_poll);
76 }
77
78 static inline bool sk_can_busy_loop(struct sock *sk)
79 {
80 return sk->sk_ll_usec && sk->sk_napi_id &&
81 !need_resched() && !signal_pending(current);
82 }
83
84
85 static inline bool busy_loop_timeout(unsigned long end_time)
86 {
87 unsigned long now = busy_loop_us_clock();
88
89 return time_after(now, end_time);
90 }
91
92 /* when used in sock_poll() nonblock is known at compile time to be true
93 * so the loop and end_time will be optimized out
94 */
95 static inline bool sk_busy_loop(struct sock *sk, int nonblock)
96 {
97 unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
98 const struct net_device_ops *ops;
99 struct napi_struct *napi;
100 int rc = false;
101
102 /*
103 * rcu read lock for napi hash
104 * bh so we don't race with net_rx_action
105 */
106 rcu_read_lock_bh();
107
108 napi = napi_by_id(sk->sk_napi_id);
109 if (!napi)
110 goto out;
111
112 ops = napi->dev->netdev_ops;
113 if (!ops->ndo_busy_poll)
114 goto out;
115
116 do {
117 rc = ops->ndo_busy_poll(napi);
118
119 if (rc == LL_FLUSH_FAILED)
120 break; /* permanent failure */
121
122 if (rc > 0)
123 /* local bh are disabled so it is ok to use _BH */
124 NET_ADD_STATS_BH(sock_net(sk),
125 LINUX_MIB_LOWLATENCYRXPACKETS, rc);
126
127 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
128 !need_resched() && !busy_loop_timeout(end_time));
129
130 rc = !skb_queue_empty(&sk->sk_receive_queue);
131 out:
132 rcu_read_unlock_bh();
133 return rc;
134 }
135
136 /* used in the NIC receive handler to mark the skb */
137 static inline void skb_mark_napi_id(struct sk_buff *skb,
138 struct napi_struct *napi)
139 {
140 skb->napi_id = napi->napi_id;
141 }
142
143 /* used in the protocol hanlder to propagate the napi_id to the socket */
144 static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
145 {
146 sk->sk_napi_id = skb->napi_id;
147 }
148
149 #else /* CONFIG_NET_LL_RX_POLL */
150 static inline unsigned long net_busy_loop_on(void)
151 {
152 return 0;
153 }
154
155 static inline unsigned long busy_loop_end_time(void)
156 {
157 return 0;
158 }
159
160 static inline bool sk_can_busy_loop(struct sock *sk)
161 {
162 return false;
163 }
164
165 static inline bool sk_busy_poll(struct sock *sk, int nonblock)
166 {
167 return false;
168 }
169
170 static inline void skb_mark_napi_id(struct sk_buff *skb,
171 struct napi_struct *napi)
172 {
173 }
174
175 static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
176 {
177 }
178
179 static inline bool busy_loop_timeout(unsigned long end_time)
180 {
181 return true;
182 }
183
184 static inline bool sk_busy_loop(struct sock *sk, int nonblock)
185 {
186 return false;
187 }
188
189 #endif /* CONFIG_NET_LL_RX_POLL */
190 #endif /* _LINUX_NET_BUSY_POLL_H */
This page took 0.052332 seconds and 4 git commands to generate.