gro: Defer clearing of flush bit in tunnel paths
authorAlexander Duyck <aduyck@mirantis.com>
Wed, 9 Mar 2016 17:24:23 +0000 (09:24 -0800)
committerDavid S. Miller <davem@davemloft.net>
Sun, 13 Mar 2016 19:01:00 +0000 (15:01 -0400)
This patch updates the GRO handlers for GRE, VXLAN, GENEVE, and FOU so that
we do not clear the flush bit until after we have called the next level GRO
handler.  Previously this was being cleared before parsing through the list
of frames, however this resulted in several paths where either the bit
needed to be reset but wasn't as in the case of FOU, or cases where it was
being set as in GENEVE.  By just deferring the clearing of the bit until
after the next level protocol has been parsed we can avoid any unnecessary
bit twiddling and avoid bugs.

Signed-off-by: Alexander Duyck <aduyck@mirantis.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/geneve.c
drivers/net/vxlan.c
net/ipv4/fou.c
net/ipv4/gre_offload.c

index 33185b9a435e86a0f665eeef81b43042c0d20d90..192631a345dfe6e9a36b976323e340f0693b6123 100644 (file)
@@ -463,8 +463,6 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
                        goto out;
        }
 
-       flush = 0;
-
        for (p = *head; p; p = p->next) {
                if (!NAPI_GRO_CB(p)->same_flow)
                        continue;
@@ -481,14 +479,13 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
 
        rcu_read_lock();
        ptype = gro_find_receive_by_type(type);
-       if (!ptype) {
-               flush = 1;
+       if (!ptype)
                goto out_unlock;
-       }
 
        skb_gro_pull(skb, gh_len);
        skb_gro_postpull_rcsum(skb, gh, gh_len);
        pp = ptype->callbacks.gro_receive(head, skb);
+       flush = 0;
 
 out_unlock:
        rcu_read_unlock();
index 8eda76f9e474ddaa29e2db68fcae9f1e16f1fd39..800106a7246cb165d46b4e25316cbfbc41996cef 100644 (file)
@@ -591,8 +591,6 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
 
        skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
 
-       flush = 0;
-
        for (p = *head; p; p = p->next) {
                if (!NAPI_GRO_CB(p)->same_flow)
                        continue;
@@ -606,6 +604,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
        }
 
        pp = eth_gro_receive(head, skb);
+       flush = 0;
 
 out:
        skb_gro_remcsum_cleanup(skb, &grc);
index 88dab0c1670c355c21b3c0aecd253e85ec2fd28f..780484243e144006d8ee409b0377ee4eb4db6dd9 100644 (file)
@@ -319,8 +319,6 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
 
        skb_gro_pull(skb, hdrlen);
 
-       flush = 0;
-
        for (p = *head; p; p = p->next) {
                const struct guehdr *guehdr2;
 
@@ -352,6 +350,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
                goto out_unlock;
 
        pp = ops->callbacks.gro_receive(head, skb);
+       flush = 0;
 
 out_unlock:
        rcu_read_unlock();
index 47f4c544c91627816fd7a07e6c80f123c6fef2dc..540866dbd27d6663d4647a5eb1a4e8445b1e6a18 100644 (file)
@@ -175,8 +175,6 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
                                             null_compute_pseudo);
        }
 
-       flush = 0;
-
        for (p = *head; p; p = p->next) {
                const struct gre_base_hdr *greh2;
 
@@ -213,6 +211,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
        skb_gro_postpull_rcsum(skb, greh, grehlen);
 
        pp = ptype->callbacks.gro_receive(head, skb);
+       flush = 0;
 
 out_unlock:
        rcu_read_unlock();
This page took 0.034383 seconds and 5 git commands to generate.