net: Add GSO support for UDP tunnels with checksum
[deliverable/linux.git] / net / ipv4 / tcp_offload.c
1 /*
2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * TCPv4 GSO/GRO support
11 */
12
13 #include <linux/skbuff.h>
14 #include <net/tcp.h>
15 #include <net/protocol.h>
16
17 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
18 netdev_features_t features)
19 {
20 struct sk_buff *segs = ERR_PTR(-EINVAL);
21 unsigned int sum_truesize = 0;
22 struct tcphdr *th;
23 unsigned int thlen;
24 unsigned int seq;
25 __be32 delta;
26 unsigned int oldlen;
27 unsigned int mss;
28 struct sk_buff *gso_skb = skb;
29 __sum16 newcheck;
30 bool ooo_okay, copy_destructor;
31
32 if (!pskb_may_pull(skb, sizeof(*th)))
33 goto out;
34
35 th = tcp_hdr(skb);
36 thlen = th->doff * 4;
37 if (thlen < sizeof(*th))
38 goto out;
39
40 if (!pskb_may_pull(skb, thlen))
41 goto out;
42
43 oldlen = (u16)~skb->len;
44 __skb_pull(skb, thlen);
45
46 mss = tcp_skb_mss(skb);
47 if (unlikely(skb->len <= mss))
48 goto out;
49
50 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
51 /* Packet is from an untrusted source, reset gso_segs. */
52 int type = skb_shinfo(skb)->gso_type;
53
54 if (unlikely(type &
55 ~(SKB_GSO_TCPV4 |
56 SKB_GSO_DODGY |
57 SKB_GSO_TCP_ECN |
58 SKB_GSO_TCPV6 |
59 SKB_GSO_GRE |
60 SKB_GSO_IPIP |
61 SKB_GSO_SIT |
62 SKB_GSO_MPLS |
63 SKB_GSO_UDP_TUNNEL |
64 SKB_GSO_UDP_TUNNEL_CSUM |
65 0) ||
66 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
67 goto out;
68
69 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
70
71 segs = NULL;
72 goto out;
73 }
74
75 copy_destructor = gso_skb->destructor == tcp_wfree;
76 ooo_okay = gso_skb->ooo_okay;
77 /* All segments but the first should have ooo_okay cleared */
78 skb->ooo_okay = 0;
79
80 segs = skb_segment(skb, features);
81 if (IS_ERR(segs))
82 goto out;
83
84 /* Only first segment might have ooo_okay set */
85 segs->ooo_okay = ooo_okay;
86
87 delta = htonl(oldlen + (thlen + mss));
88
89 skb = segs;
90 th = tcp_hdr(skb);
91 seq = ntohl(th->seq);
92
93 newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
94 (__force u32)delta));
95
96 do {
97 th->fin = th->psh = 0;
98 th->check = newcheck;
99
100 if (skb->ip_summed != CHECKSUM_PARTIAL)
101 th->check = gso_make_checksum(skb, ~th->check);
102
103 seq += mss;
104 if (copy_destructor) {
105 skb->destructor = gso_skb->destructor;
106 skb->sk = gso_skb->sk;
107 sum_truesize += skb->truesize;
108 }
109 skb = skb->next;
110 th = tcp_hdr(skb);
111
112 th->seq = htonl(seq);
113 th->cwr = 0;
114 } while (skb->next);
115
116 /* Following permits TCP Small Queues to work well with GSO :
117 * The callback to TCP stack will be called at the time last frag
118 * is freed at TX completion, and not right now when gso_skb
119 * is freed by GSO engine
120 */
121 if (copy_destructor) {
122 swap(gso_skb->sk, skb->sk);
123 swap(gso_skb->destructor, skb->destructor);
124 sum_truesize += skb->truesize;
125 atomic_add(sum_truesize - gso_skb->truesize,
126 &skb->sk->sk_wmem_alloc);
127 }
128
129 delta = htonl(oldlen + (skb_tail_pointer(skb) -
130 skb_transport_header(skb)) +
131 skb->data_len);
132 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
133 (__force u32)delta));
134 if (skb->ip_summed != CHECKSUM_PARTIAL)
135 th->check = gso_make_checksum(skb, ~th->check);
136 out:
137 return segs;
138 }
139
140 struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
141 {
142 struct sk_buff **pp = NULL;
143 struct sk_buff *p;
144 struct tcphdr *th;
145 struct tcphdr *th2;
146 unsigned int len;
147 unsigned int thlen;
148 __be32 flags;
149 unsigned int mss = 1;
150 unsigned int hlen;
151 unsigned int off;
152 int flush = 1;
153 int i;
154
155 off = skb_gro_offset(skb);
156 hlen = off + sizeof(*th);
157 th = skb_gro_header_fast(skb, off);
158 if (skb_gro_header_hard(skb, hlen)) {
159 th = skb_gro_header_slow(skb, hlen, off);
160 if (unlikely(!th))
161 goto out;
162 }
163
164 thlen = th->doff * 4;
165 if (thlen < sizeof(*th))
166 goto out;
167
168 hlen = off + thlen;
169 if (skb_gro_header_hard(skb, hlen)) {
170 th = skb_gro_header_slow(skb, hlen, off);
171 if (unlikely(!th))
172 goto out;
173 }
174
175 skb_gro_pull(skb, thlen);
176
177 len = skb_gro_len(skb);
178 flags = tcp_flag_word(th);
179
180 for (; (p = *head); head = &p->next) {
181 if (!NAPI_GRO_CB(p)->same_flow)
182 continue;
183
184 th2 = tcp_hdr(p);
185
186 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
187 NAPI_GRO_CB(p)->same_flow = 0;
188 continue;
189 }
190
191 goto found;
192 }
193
194 goto out_check_final;
195
196 found:
197 /* Include the IP ID check below from the inner most IP hdr */
198 flush = NAPI_GRO_CB(p)->flush | NAPI_GRO_CB(p)->flush_id;
199 flush |= (__force int)(flags & TCP_FLAG_CWR);
200 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
201 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
202 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
203 for (i = sizeof(*th); i < thlen; i += 4)
204 flush |= *(u32 *)((u8 *)th + i) ^
205 *(u32 *)((u8 *)th2 + i);
206
207 mss = tcp_skb_mss(p);
208
209 flush |= (len - 1) >= mss;
210 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
211
212 if (flush || skb_gro_receive(head, skb)) {
213 mss = 1;
214 goto out_check_final;
215 }
216
217 p = *head;
218 th2 = tcp_hdr(p);
219 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
220
221 out_check_final:
222 flush = len < mss;
223 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
224 TCP_FLAG_RST | TCP_FLAG_SYN |
225 TCP_FLAG_FIN));
226
227 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
228 pp = head;
229
230 out:
231 NAPI_GRO_CB(skb)->flush |= (flush != 0);
232
233 return pp;
234 }
235
236 int tcp_gro_complete(struct sk_buff *skb)
237 {
238 struct tcphdr *th = tcp_hdr(skb);
239
240 skb->csum_start = (unsigned char *)th - skb->head;
241 skb->csum_offset = offsetof(struct tcphdr, check);
242 skb->ip_summed = CHECKSUM_PARTIAL;
243
244 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
245
246 if (th->cwr)
247 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
248
249 return 0;
250 }
251 EXPORT_SYMBOL(tcp_gro_complete);
252
253 static int tcp_v4_gso_send_check(struct sk_buff *skb)
254 {
255 const struct iphdr *iph;
256 struct tcphdr *th;
257
258 if (!pskb_may_pull(skb, sizeof(*th)))
259 return -EINVAL;
260
261 iph = ip_hdr(skb);
262 th = tcp_hdr(skb);
263
264 th->check = 0;
265 skb->ip_summed = CHECKSUM_PARTIAL;
266 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
267 return 0;
268 }
269
270 static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
271 {
272 /* Use the IP hdr immediately proceeding for this transport */
273 const struct iphdr *iph = skb_gro_network_header(skb);
274 __wsum wsum;
275
276 /* Don't bother verifying checksum if we're going to flush anyway. */
277 if (NAPI_GRO_CB(skb)->flush)
278 goto skip_csum;
279
280 wsum = NAPI_GRO_CB(skb)->csum;
281
282 switch (skb->ip_summed) {
283 case CHECKSUM_NONE:
284 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),
285 0);
286
287 /* fall through */
288
289 case CHECKSUM_COMPLETE:
290 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
291 wsum)) {
292 skb->ip_summed = CHECKSUM_UNNECESSARY;
293 break;
294 }
295
296 NAPI_GRO_CB(skb)->flush = 1;
297 return NULL;
298 }
299
300 skip_csum:
301 return tcp_gro_receive(head, skb);
302 }
303
304 static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
305 {
306 const struct iphdr *iph = ip_hdr(skb);
307 struct tcphdr *th = tcp_hdr(skb);
308
309 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
310 iph->daddr, 0);
311 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
312
313 return tcp_gro_complete(skb);
314 }
315
316 static const struct net_offload tcpv4_offload = {
317 .callbacks = {
318 .gso_send_check = tcp_v4_gso_send_check,
319 .gso_segment = tcp_gso_segment,
320 .gro_receive = tcp4_gro_receive,
321 .gro_complete = tcp4_gro_complete,
322 },
323 };
324
325 int __init tcpv4_offload_init(void)
326 {
327 return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
328 }
This page took 0.037635 seconds and 5 git commands to generate.