h8300: Add missing include file to asm/io.h
[deliverable/linux.git] / net / ipv4 / tcp_offload.c
index 773083b7f1e98906f5cdc93f770829f293fea205..5c5964962d0ca2f00157c4e533597a34b9a96979 100644 (file)
@@ -83,23 +83,6 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
 
        if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
                /* Packet is from an untrusted source, reset gso_segs. */
-               int type = skb_shinfo(skb)->gso_type;
-
-               if (unlikely(type &
-                            ~(SKB_GSO_TCPV4 |
-                              SKB_GSO_DODGY |
-                              SKB_GSO_TCP_ECN |
-                              SKB_GSO_TCPV6 |
-                              SKB_GSO_GRE |
-                              SKB_GSO_GRE_CSUM |
-                              SKB_GSO_IPIP |
-                              SKB_GSO_SIT |
-                              SKB_GSO_UDP_TUNNEL |
-                              SKB_GSO_UDP_TUNNEL_CSUM |
-                              SKB_GSO_TUNNEL_REMCSUM |
-                              0) ||
-                            !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
-                       goto out;
 
                skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
 
@@ -107,6 +90,12 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
                goto out;
        }
 
+       /* GSO partial only requires splitting the frame into an MSS
+        * multiple and possibly a remainder.  So update the mss now.
+        */
+       if (features & NETIF_F_GSO_PARTIAL)
+               mss = skb->len - (skb->len % mss);
+
        copy_destructor = gso_skb->destructor == tcp_wfree;
        ooo_okay = gso_skb->ooo_okay;
        /* All segments but the first should have ooo_okay cleared */
@@ -131,7 +120,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
        newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
                                               (__force u32)delta));
 
-       do {
+       while (skb->next) {
                th->fin = th->psh = 0;
                th->check = newcheck;
 
@@ -151,7 +140,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
 
                th->seq = htonl(seq);
                th->cwr = 0;
-       } while (skb->next);
+       }
 
        /* Following permits TCP Small Queues to work well with GSO :
         * The callback to TCP stack will be called at the time last frag
@@ -237,7 +226,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 
 found:
        /* Include the IP ID check below from the inner most IP hdr */
-       flush = NAPI_GRO_CB(p)->flush | NAPI_GRO_CB(p)->flush_id;
+       flush = NAPI_GRO_CB(p)->flush;
        flush |= (__force int)(flags & TCP_FLAG_CWR);
        flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
                  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
@@ -246,6 +235,17 @@ found:
                flush |= *(u32 *)((u8 *)th + i) ^
                         *(u32 *)((u8 *)th2 + i);
 
+       /* When we receive our second frame we can made a decision on if we
+        * continue this flow as an atomic flow with a fixed ID or if we use
+        * an incrementing ID.
+        */
+       if (NAPI_GRO_CB(p)->flush_id != 1 ||
+           NAPI_GRO_CB(p)->count != 1 ||
+           !NAPI_GRO_CB(p)->is_atomic)
+               flush |= NAPI_GRO_CB(p)->flush_id;
+       else
+               NAPI_GRO_CB(p)->is_atomic = false;
+
        mss = skb_shinfo(p)->gso_size;
 
        flush |= (len - 1) >= mss;
@@ -314,6 +314,9 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
                                  iph->daddr, 0);
        skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
 
+       if (NAPI_GRO_CB(skb)->is_atomic)
+               skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
+
        return tcp_gro_complete(skb);
 }
 
This page took 0.039623 seconds and 5 git commands to generate.