tcp: fix cwnd limited checking to improve congestion control
[deliverable/linux.git] / net / ipv4 / tcp_output.c
index 20847de991eaaabf24dd9510f16d449edcd0148a..f9181a1334629125b54e799ec3100467f6a7d597 100644 (file)
@@ -1402,12 +1402,13 @@ static void tcp_cwnd_application_limited(struct sock *sk)
        tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
-/* Congestion window validation. (RFC2861) */
-static void tcp_cwnd_validate(struct sock *sk)
+static void tcp_cwnd_validate(struct sock *sk, u32 unsent_segs)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (tp->packets_out >= tp->snd_cwnd) {
+       tp->lsnd_pending = tp->packets_out + unsent_segs;
+
+       if (tcp_is_cwnd_limited(sk, 0)) {
                /* Network is feed fully. */
                tp->snd_cwnd_used = 0;
                tp->snd_cwnd_stamp = tcp_time_stamp;
@@ -1880,7 +1881,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
-       unsigned int tso_segs, sent_pkts;
+       unsigned int tso_segs, sent_pkts, unsent_segs = 0;
        int cwnd_quota;
        int result;
 
@@ -1924,7 +1925,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                                break;
                } else {
                        if (!push_one && tcp_tso_should_defer(sk, skb))
-                               break;
+                               goto compute_unsent_segs;
                }
 
                /* TCP Small Queues :
@@ -1949,8 +1950,14 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                         * there is no smp_mb__after_set_bit() yet
                         */
                        smp_mb__after_clear_bit();
-                       if (atomic_read(&sk->sk_wmem_alloc) > limit)
+                       if (atomic_read(&sk->sk_wmem_alloc) > limit) {
+                               u32 unsent_bytes;
+
+compute_unsent_segs:
+                               unsent_bytes = tp->write_seq - tp->snd_nxt;
+                               unsent_segs = DIV_ROUND_UP(unsent_bytes, mss_now);
                                break;
+                       }
                }
 
                limit = mss_now;
@@ -1990,7 +1997,7 @@ repair:
                /* Send one loss probe per tail loss episode. */
                if (push_one != 2)
                        tcp_schedule_loss_probe(sk);
-               tcp_cwnd_validate(sk);
+               tcp_cwnd_validate(sk, unsent_segs);
                return false;
        }
        return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
This page took 0.025912 seconds and 5 git commands to generate.