2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
15 #include <net/protocol.h>
18 static struct sk_buff
*gre_gso_segment(struct sk_buff
*skb
,
19 netdev_features_t features
)
21 int tnl_hlen
= skb_inner_mac_header(skb
) - skb_transport_header(skb
);
22 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
23 u16 mac_offset
= skb
->mac_header
;
24 __be16 protocol
= skb
->protocol
;
25 u16 mac_len
= skb
->mac_len
;
26 int gre_offset
, outer_hlen
;
29 if (unlikely(skb_shinfo(skb
)->gso_type
&
41 if (!skb
->encapsulation
)
44 if (unlikely(tnl_hlen
< sizeof(struct gre_base_hdr
)))
47 if (unlikely(!pskb_may_pull(skb
, tnl_hlen
)))
50 /* setup inner skb. */
51 skb
->encapsulation
= 0;
52 __skb_pull(skb
, tnl_hlen
);
53 skb_reset_mac_header(skb
);
54 skb_set_network_header(skb
, skb_inner_network_offset(skb
));
55 skb
->mac_len
= skb_inner_network_offset(skb
);
56 skb
->protocol
= skb
->inner_protocol
;
58 need_csum
= !!(skb_shinfo(skb
)->gso_type
& SKB_GSO_GRE_CSUM
);
59 skb
->encap_hdr_csum
= need_csum
;
61 ufo
= !!(skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP
);
63 features
&= skb
->dev
->hw_enc_features
;
65 /* The only checksum offload we care about from here on out is the
66 * outer one so strip the existing checksum feature flags based
67 * on the fact that we will be computing our checksum in software.
70 features
&= ~NETIF_F_CSUM_MASK
;
72 features
|= NETIF_F_HW_CSUM
;
75 /* segment inner packet. */
76 segs
= skb_mac_gso_segment(skb
, features
);
77 if (IS_ERR_OR_NULL(segs
)) {
78 skb_gso_error_unwind(skb
, protocol
, tnl_hlen
, mac_offset
,
83 outer_hlen
= skb_tnl_header_len(skb
);
84 gre_offset
= outer_hlen
- tnl_hlen
;
87 struct gre_base_hdr
*greh
;
90 /* Set up inner headers if we are offloading inner checksum */
91 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
92 skb_reset_inner_headers(skb
);
93 skb
->encapsulation
= 1;
96 skb
->mac_len
= mac_len
;
97 skb
->protocol
= protocol
;
99 __skb_push(skb
, outer_hlen
);
100 skb_reset_mac_header(skb
);
101 skb_set_network_header(skb
, mac_len
);
102 skb_set_transport_header(skb
, gre_offset
);
107 greh
= (struct gre_base_hdr
*)skb_transport_header(skb
);
108 pcsum
= (__be32
*)(greh
+ 1);
111 *(__sum16
*)pcsum
= gso_make_checksum(skb
, 0);
112 } while ((skb
= skb
->next
));
117 static struct sk_buff
**gre_gro_receive(struct sk_buff
**head
,
120 struct sk_buff
**pp
= NULL
;
122 const struct gre_base_hdr
*greh
;
123 unsigned int hlen
, grehlen
;
126 struct packet_offload
*ptype
;
129 if (NAPI_GRO_CB(skb
)->encap_mark
)
132 NAPI_GRO_CB(skb
)->encap_mark
= 1;
134 off
= skb_gro_offset(skb
);
135 hlen
= off
+ sizeof(*greh
);
136 greh
= skb_gro_header_fast(skb
, off
);
137 if (skb_gro_header_hard(skb
, hlen
)) {
138 greh
= skb_gro_header_slow(skb
, hlen
, off
);
143 /* Only support version 0 and K (key), C (csum) flags. Note that
144 * although the support for the S (seq#) flag can be added easily
145 * for GRO, this is problematic for GSO hence can not be enabled
146 * here because a GRO pkt may end up in the forwarding path, thus
147 * requiring GSO support to break it up correctly.
149 if ((greh
->flags
& ~(GRE_KEY
|GRE_CSUM
)) != 0)
152 type
= greh
->protocol
;
155 ptype
= gro_find_receive_by_type(type
);
159 grehlen
= GRE_HEADER_SECTION
;
161 if (greh
->flags
& GRE_KEY
)
162 grehlen
+= GRE_HEADER_SECTION
;
164 if (greh
->flags
& GRE_CSUM
)
165 grehlen
+= GRE_HEADER_SECTION
;
167 hlen
= off
+ grehlen
;
168 if (skb_gro_header_hard(skb
, hlen
)) {
169 greh
= skb_gro_header_slow(skb
, hlen
, off
);
174 /* Don't bother verifying checksum if we're going to flush anyway. */
175 if ((greh
->flags
& GRE_CSUM
) && !NAPI_GRO_CB(skb
)->flush
) {
176 if (skb_gro_checksum_simple_validate(skb
))
179 skb_gro_checksum_try_convert(skb
, IPPROTO_GRE
, 0,
180 null_compute_pseudo
);
183 for (p
= *head
; p
; p
= p
->next
) {
184 const struct gre_base_hdr
*greh2
;
186 if (!NAPI_GRO_CB(p
)->same_flow
)
189 /* The following checks are needed to ensure only pkts
190 * from the same tunnel are considered for aggregation.
191 * The criteria for "the same tunnel" includes:
192 * 1) same version (we only support version 0 here)
193 * 2) same protocol (we only support ETH_P_IP for now)
194 * 3) same set of flags
195 * 4) same key if the key field is present.
197 greh2
= (struct gre_base_hdr
*)(p
->data
+ off
);
199 if (greh2
->flags
!= greh
->flags
||
200 greh2
->protocol
!= greh
->protocol
) {
201 NAPI_GRO_CB(p
)->same_flow
= 0;
204 if (greh
->flags
& GRE_KEY
) {
206 if (*(__be32
*)(greh2
+1) != *(__be32
*)(greh
+1)) {
207 NAPI_GRO_CB(p
)->same_flow
= 0;
213 skb_gro_pull(skb
, grehlen
);
215 /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
216 skb_gro_postpull_rcsum(skb
, greh
, grehlen
);
218 pp
= ptype
->callbacks
.gro_receive(head
, skb
);
224 NAPI_GRO_CB(skb
)->flush
|= flush
;
229 static int gre_gro_complete(struct sk_buff
*skb
, int nhoff
)
231 struct gre_base_hdr
*greh
= (struct gre_base_hdr
*)(skb
->data
+ nhoff
);
232 struct packet_offload
*ptype
;
233 unsigned int grehlen
= sizeof(*greh
);
237 skb
->encapsulation
= 1;
238 skb_shinfo(skb
)->gso_type
= SKB_GSO_GRE
;
240 type
= greh
->protocol
;
241 if (greh
->flags
& GRE_KEY
)
242 grehlen
+= GRE_HEADER_SECTION
;
244 if (greh
->flags
& GRE_CSUM
)
245 grehlen
+= GRE_HEADER_SECTION
;
248 ptype
= gro_find_complete_by_type(type
);
250 err
= ptype
->callbacks
.gro_complete(skb
, nhoff
+ grehlen
);
254 skb_set_inner_mac_header(skb
, nhoff
+ grehlen
);
259 static const struct net_offload gre_offload
= {
261 .gso_segment
= gre_gso_segment
,
262 .gro_receive
= gre_gro_receive
,
263 .gro_complete
= gre_gro_complete
,
267 static int __init
gre_offload_init(void)
269 return inet_add_offload(&gre_offload
, IPPROTO_GRE
);
271 device_initcall(gre_offload_init
);