2 * IPV6 GSO/GRO offload support
3 * Linux INET6 implementation
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/socket.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/printk.h>
17 #include <net/protocol.h>
20 #include "ip6_offload.h"
22 static int ipv6_gso_pull_exthdrs(struct sk_buff
*skb
, int proto
)
24 const struct net_offload
*ops
= NULL
;
27 struct ipv6_opt_hdr
*opth
;
30 if (proto
!= NEXTHDR_HOP
) {
31 ops
= rcu_dereference(inet6_offloads
[proto
]);
36 if (!(ops
->flags
& INET6_PROTO_GSO_EXTHDR
))
40 if (unlikely(!pskb_may_pull(skb
, 8)))
43 opth
= (void *)skb
->data
;
44 len
= ipv6_optlen(opth
);
46 if (unlikely(!pskb_may_pull(skb
, len
)))
49 opth
= (void *)skb
->data
;
50 proto
= opth
->nexthdr
;
57 static struct sk_buff
*ipv6_gso_segment(struct sk_buff
*skb
,
58 netdev_features_t features
)
60 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
61 struct ipv6hdr
*ipv6h
;
62 const struct net_offload
*ops
;
64 struct frag_hdr
*fptr
;
65 unsigned int unfrag_ip6hlen
;
71 if (unlikely(skb_shinfo(skb
)->gso_type
&
81 SKB_GSO_UDP_TUNNEL_CSUM
|
82 SKB_GSO_TUNNEL_REMCSUM
|
87 skb_reset_network_header(skb
);
88 nhoff
= skb_network_header(skb
) - skb_mac_header(skb
);
89 if (unlikely(!pskb_may_pull(skb
, sizeof(*ipv6h
))))
92 encap
= SKB_GSO_CB(skb
)->encap_level
> 0;
94 features
&= skb
->dev
->hw_enc_features
;
95 SKB_GSO_CB(skb
)->encap_level
+= sizeof(*ipv6h
);
97 ipv6h
= ipv6_hdr(skb
);
98 __skb_pull(skb
, sizeof(*ipv6h
));
99 segs
= ERR_PTR(-EPROTONOSUPPORT
);
101 proto
= ipv6_gso_pull_exthdrs(skb
, ipv6h
->nexthdr
);
103 if (skb
->encapsulation
&&
104 skb_shinfo(skb
)->gso_type
& (SKB_GSO_SIT
|SKB_GSO_IPIP
))
105 udpfrag
= proto
== IPPROTO_UDP
&& encap
;
107 udpfrag
= proto
== IPPROTO_UDP
&& !skb
->encapsulation
;
109 ops
= rcu_dereference(inet6_offloads
[proto
]);
110 if (likely(ops
&& ops
->callbacks
.gso_segment
)) {
111 skb_reset_transport_header(skb
);
112 segs
= ops
->callbacks
.gso_segment(skb
, features
);
118 for (skb
= segs
; skb
; skb
= skb
->next
) {
119 ipv6h
= (struct ipv6hdr
*)(skb_mac_header(skb
) + nhoff
);
120 ipv6h
->payload_len
= htons(skb
->len
- nhoff
- sizeof(*ipv6h
));
121 skb
->network_header
= (u8
*)ipv6h
- skb
->head
;
124 unfrag_ip6hlen
= ip6_find_1stfragopt(skb
, &prevhdr
);
125 fptr
= (struct frag_hdr
*)((u8
*)ipv6h
+ unfrag_ip6hlen
);
126 fptr
->frag_off
= htons(offset
);
128 fptr
->frag_off
|= htons(IP6_MF
);
129 offset
+= (ntohs(ipv6h
->payload_len
) -
130 sizeof(struct frag_hdr
));
133 skb_reset_inner_headers(skb
);
140 /* Return the total length of all the extension hdrs, following the same
141 * logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs.
143 static int ipv6_exthdrs_len(struct ipv6hdr
*iph
,
144 const struct net_offload
**opps
)
146 struct ipv6_opt_hdr
*opth
= (void *)iph
;
147 int len
= 0, proto
, optlen
= sizeof(*iph
);
149 proto
= iph
->nexthdr
;
151 if (proto
!= NEXTHDR_HOP
) {
152 *opps
= rcu_dereference(inet6_offloads
[proto
]);
153 if (unlikely(!(*opps
)))
155 if (!((*opps
)->flags
& INET6_PROTO_GSO_EXTHDR
))
158 opth
= (void *)opth
+ optlen
;
159 optlen
= ipv6_optlen(opth
);
161 proto
= opth
->nexthdr
;
166 static struct sk_buff
**ipv6_gro_receive(struct sk_buff
**head
,
169 const struct net_offload
*ops
;
170 struct sk_buff
**pp
= NULL
;
179 off
= skb_gro_offset(skb
);
180 hlen
= off
+ sizeof(*iph
);
181 iph
= skb_gro_header_fast(skb
, off
);
182 if (skb_gro_header_hard(skb
, hlen
)) {
183 iph
= skb_gro_header_slow(skb
, hlen
, off
);
188 skb_set_network_header(skb
, off
);
189 skb_gro_pull(skb
, sizeof(*iph
));
190 skb_set_transport_header(skb
, skb_gro_offset(skb
));
192 flush
+= ntohs(iph
->payload_len
) != skb_gro_len(skb
);
195 proto
= iph
->nexthdr
;
196 ops
= rcu_dereference(inet6_offloads
[proto
]);
197 if (!ops
|| !ops
->callbacks
.gro_receive
) {
198 __pskb_pull(skb
, skb_gro_offset(skb
));
199 proto
= ipv6_gso_pull_exthdrs(skb
, proto
);
200 skb_gro_pull(skb
, -skb_transport_offset(skb
));
201 skb_reset_transport_header(skb
);
202 __skb_push(skb
, skb_gro_offset(skb
));
204 ops
= rcu_dereference(inet6_offloads
[proto
]);
205 if (!ops
|| !ops
->callbacks
.gro_receive
)
211 NAPI_GRO_CB(skb
)->proto
= proto
;
214 nlen
= skb_network_header_len(skb
);
216 for (p
= *head
; p
; p
= p
->next
) {
217 const struct ipv6hdr
*iph2
;
218 __be32 first_word
; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
220 if (!NAPI_GRO_CB(p
)->same_flow
)
223 iph2
= (struct ipv6hdr
*)(p
->data
+ off
);
224 first_word
= *(__be32
*)iph
^ *(__be32
*)iph2
;
226 /* All fields must match except length and Traffic Class.
227 * XXX skbs on the gro_list have all been parsed and pulled
228 * already so we don't need to compare nlen
229 * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops)))
230 * memcmp() alone below is suffcient, right?
232 if ((first_word
& htonl(0xF00FFFFF)) ||
233 memcmp(&iph
->nexthdr
, &iph2
->nexthdr
,
234 nlen
- offsetof(struct ipv6hdr
, nexthdr
))) {
235 NAPI_GRO_CB(p
)->same_flow
= 0;
238 /* flush if Traffic Class fields are different */
239 NAPI_GRO_CB(p
)->flush
|= !!(first_word
& htonl(0x0FF00000));
240 NAPI_GRO_CB(p
)->flush
|= flush
;
242 /* Clear flush_id, there's really no concept of ID in IPv6. */
243 NAPI_GRO_CB(p
)->flush_id
= 0;
246 NAPI_GRO_CB(skb
)->flush
|= flush
;
248 skb_gro_postpull_rcsum(skb
, iph
, nlen
);
250 pp
= ops
->callbacks
.gro_receive(head
, skb
);
256 NAPI_GRO_CB(skb
)->flush
|= flush
;
261 static int ipv6_gro_complete(struct sk_buff
*skb
, int nhoff
)
263 const struct net_offload
*ops
;
264 struct ipv6hdr
*iph
= (struct ipv6hdr
*)(skb
->data
+ nhoff
);
267 if (skb
->encapsulation
)
268 skb_set_inner_network_header(skb
, nhoff
);
270 iph
->payload_len
= htons(skb
->len
- nhoff
- sizeof(*iph
));
274 nhoff
+= sizeof(*iph
) + ipv6_exthdrs_len(iph
, &ops
);
275 if (WARN_ON(!ops
|| !ops
->callbacks
.gro_complete
))
278 err
= ops
->callbacks
.gro_complete(skb
, nhoff
);
286 static int sit_gro_complete(struct sk_buff
*skb
, int nhoff
)
288 skb
->encapsulation
= 1;
289 skb_shinfo(skb
)->gso_type
|= SKB_GSO_SIT
;
290 return ipv6_gro_complete(skb
, nhoff
);
293 static struct packet_offload ipv6_packet_offload __read_mostly
= {
294 .type
= cpu_to_be16(ETH_P_IPV6
),
296 .gso_segment
= ipv6_gso_segment
,
297 .gro_receive
= ipv6_gro_receive
,
298 .gro_complete
= ipv6_gro_complete
,
302 static const struct net_offload sit_offload
= {
304 .gso_segment
= ipv6_gso_segment
,
305 .gro_receive
= ipv6_gro_receive
,
306 .gro_complete
= sit_gro_complete
,
310 static int __init
ipv6_offload_init(void)
313 if (tcpv6_offload_init() < 0)
314 pr_crit("%s: Cannot add TCP protocol offload\n", __func__
);
315 if (udp_offload_init() < 0)
316 pr_crit("%s: Cannot add UDP protocol offload\n", __func__
);
317 if (ipv6_exthdrs_offload_init() < 0)
318 pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__
);
320 dev_add_offload(&ipv6_packet_offload
);
322 inet_add_offload(&sit_offload
, IPPROTO_IPV6
);
327 fs_initcall(ipv6_offload_init
);