2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
21 #include <linux/module.h>
22 #include <linux/gfp.h>
25 int sysctl_tcp_syn_retries __read_mostly
= TCP_SYN_RETRIES
;
26 int sysctl_tcp_synack_retries __read_mostly
= TCP_SYNACK_RETRIES
;
27 int sysctl_tcp_retries1 __read_mostly
= TCP_RETR1
;
28 int sysctl_tcp_retries2 __read_mostly
= TCP_RETR2
;
29 int sysctl_tcp_orphan_retries __read_mostly
;
30 int sysctl_tcp_thin_linear_timeouts __read_mostly
;
32 static void tcp_write_err(struct sock
*sk
)
34 sk
->sk_err
= sk
->sk_err_soft
? : ETIMEDOUT
;
35 sk
->sk_error_report(sk
);
38 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPABORTONTIMEOUT
);
41 /* Do not allow orphaned sockets to eat all our resources.
42 * This is direct violation of TCP specs, but it is required
43 * to prevent DoS attacks. It is called when a retransmission timeout
44 * or zero probe timeout occurs on orphaned socket.
46 * Criteria is still not confirmed experimentally and may change.
47 * We kill the socket, if:
48 * 1. If number of orphaned sockets exceeds an administratively configured
50 * 2. If we have strong memory pressure.
52 static int tcp_out_of_resources(struct sock
*sk
, bool do_reset
)
54 struct tcp_sock
*tp
= tcp_sk(sk
);
57 /* If peer does not open window for long time, or did not transmit
58 * anything for long time, penalize it. */
59 if ((s32
)(tcp_time_stamp
- tp
->lsndtime
) > 2*TCP_RTO_MAX
|| !do_reset
)
62 /* If some dubious ICMP arrived, penalize even more. */
66 if (tcp_check_oom(sk
, shift
)) {
67 /* Catch exceptional cases, when connection requires reset.
68 * 1. Last segment was sent recently. */
69 if ((s32
)(tcp_time_stamp
- tp
->lsndtime
) <= TCP_TIMEWAIT_LEN
||
70 /* 2. Window is closed. */
71 (!tp
->snd_wnd
&& !tp
->packets_out
))
74 tcp_send_active_reset(sk
, GFP_ATOMIC
);
76 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPABORTONMEMORY
);
82 /* Calculate maximal number or retries on an orphaned socket. */
83 static int tcp_orphan_retries(struct sock
*sk
, bool alive
)
85 int retries
= sysctl_tcp_orphan_retries
; /* May be zero. */
87 /* We know from an ICMP that something is wrong. */
88 if (sk
->sk_err_soft
&& !alive
)
91 /* However, if socket sent something recently, select some safe
92 * number of retries. 8 corresponds to >100 seconds with minimal
94 if (retries
== 0 && alive
)
99 static void tcp_mtu_probing(struct inet_connection_sock
*icsk
, struct sock
*sk
)
101 struct net
*net
= sock_net(sk
);
103 /* Black hole detection */
104 if (net
->ipv4
.sysctl_tcp_mtu_probing
) {
105 if (!icsk
->icsk_mtup
.enabled
) {
106 icsk
->icsk_mtup
.enabled
= 1;
107 icsk
->icsk_mtup
.probe_timestamp
= tcp_time_stamp
;
108 tcp_sync_mss(sk
, icsk
->icsk_pmtu_cookie
);
110 struct net
*net
= sock_net(sk
);
111 struct tcp_sock
*tp
= tcp_sk(sk
);
114 mss
= tcp_mtu_to_mss(sk
, icsk
->icsk_mtup
.search_low
) >> 1;
115 mss
= min(net
->ipv4
.sysctl_tcp_base_mss
, mss
);
116 mss
= max(mss
, 68 - tp
->tcp_header_len
);
117 icsk
->icsk_mtup
.search_low
= tcp_mss_to_mtu(sk
, mss
);
118 tcp_sync_mss(sk
, icsk
->icsk_pmtu_cookie
);
123 /* This function calculates a "timeout" which is equivalent to the timeout of a
124 * TCP connection after "boundary" unsuccessful, exponentially backed-off
125 * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
126 * syn_set flag is set.
128 static bool retransmits_timed_out(struct sock
*sk
,
129 unsigned int boundary
,
130 unsigned int timeout
,
133 unsigned int linear_backoff_thresh
, start_ts
;
134 unsigned int rto_base
= syn_set
? TCP_TIMEOUT_INIT
: TCP_RTO_MIN
;
136 if (!inet_csk(sk
)->icsk_retransmits
)
139 start_ts
= tcp_sk(sk
)->retrans_stamp
;
140 if (unlikely(!start_ts
))
141 start_ts
= tcp_skb_timestamp(tcp_write_queue_head(sk
));
143 if (likely(timeout
== 0)) {
144 linear_backoff_thresh
= ilog2(TCP_RTO_MAX
/rto_base
);
146 if (boundary
<= linear_backoff_thresh
)
147 timeout
= ((2 << boundary
) - 1) * rto_base
;
149 timeout
= ((2 << linear_backoff_thresh
) - 1) * rto_base
+
150 (boundary
- linear_backoff_thresh
) * TCP_RTO_MAX
;
152 return (tcp_time_stamp
- start_ts
) >= timeout
;
155 /* A write timeout has occurred. Process the after effects. */
156 static int tcp_write_timeout(struct sock
*sk
)
158 struct inet_connection_sock
*icsk
= inet_csk(sk
);
159 struct tcp_sock
*tp
= tcp_sk(sk
);
161 bool do_reset
, syn_set
= false;
163 if ((1 << sk
->sk_state
) & (TCPF_SYN_SENT
| TCPF_SYN_RECV
)) {
164 if (icsk
->icsk_retransmits
) {
165 dst_negative_advice(sk
);
166 if (tp
->syn_fastopen
|| tp
->syn_data
)
167 tcp_fastopen_cache_set(sk
, 0, NULL
, true, 0);
168 if (tp
->syn_data
&& icsk
->icsk_retransmits
== 1)
169 NET_INC_STATS_BH(sock_net(sk
),
170 LINUX_MIB_TCPFASTOPENACTIVEFAIL
);
172 retry_until
= icsk
->icsk_syn_retries
? : sysctl_tcp_syn_retries
;
175 if (retransmits_timed_out(sk
, sysctl_tcp_retries1
, 0, 0)) {
176 /* Some middle-boxes may black-hole Fast Open _after_
177 * the handshake. Therefore we conservatively disable
178 * Fast Open on this path on recurring timeouts with
179 * few or zero bytes acked after Fast Open.
181 if (tp
->syn_data_acked
&&
182 tp
->bytes_acked
<= tp
->rx_opt
.mss_clamp
) {
183 tcp_fastopen_cache_set(sk
, 0, NULL
, true, 0);
184 if (icsk
->icsk_retransmits
== sysctl_tcp_retries1
)
185 NET_INC_STATS_BH(sock_net(sk
),
186 LINUX_MIB_TCPFASTOPENACTIVEFAIL
);
188 /* Black hole detection */
189 tcp_mtu_probing(icsk
, sk
);
191 dst_negative_advice(sk
);
194 retry_until
= sysctl_tcp_retries2
;
195 if (sock_flag(sk
, SOCK_DEAD
)) {
196 const bool alive
= icsk
->icsk_rto
< TCP_RTO_MAX
;
198 retry_until
= tcp_orphan_retries(sk
, alive
);
200 !retransmits_timed_out(sk
, retry_until
, 0, 0);
202 if (tcp_out_of_resources(sk
, do_reset
))
207 if (retransmits_timed_out(sk
, retry_until
,
208 syn_set
? 0 : icsk
->icsk_user_timeout
, syn_set
)) {
209 /* Has it gone just too far? */
216 void tcp_delack_timer_handler(struct sock
*sk
)
218 struct tcp_sock
*tp
= tcp_sk(sk
);
219 struct inet_connection_sock
*icsk
= inet_csk(sk
);
221 sk_mem_reclaim_partial(sk
);
223 if (sk
->sk_state
== TCP_CLOSE
|| !(icsk
->icsk_ack
.pending
& ICSK_ACK_TIMER
))
226 if (time_after(icsk
->icsk_ack
.timeout
, jiffies
)) {
227 sk_reset_timer(sk
, &icsk
->icsk_delack_timer
, icsk
->icsk_ack
.timeout
);
230 icsk
->icsk_ack
.pending
&= ~ICSK_ACK_TIMER
;
232 if (!skb_queue_empty(&tp
->ucopy
.prequeue
)) {
235 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPSCHEDULERFAILED
);
237 while ((skb
= __skb_dequeue(&tp
->ucopy
.prequeue
)) != NULL
)
238 sk_backlog_rcv(sk
, skb
);
240 tp
->ucopy
.memory
= 0;
243 if (inet_csk_ack_scheduled(sk
)) {
244 if (!icsk
->icsk_ack
.pingpong
) {
245 /* Delayed ACK missed: inflate ATO. */
246 icsk
->icsk_ack
.ato
= min(icsk
->icsk_ack
.ato
<< 1, icsk
->icsk_rto
);
248 /* Delayed ACK missed: leave pingpong mode and
251 icsk
->icsk_ack
.pingpong
= 0;
252 icsk
->icsk_ack
.ato
= TCP_ATO_MIN
;
255 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_DELAYEDACKS
);
259 if (tcp_under_memory_pressure(sk
))
263 static void tcp_delack_timer(unsigned long data
)
265 struct sock
*sk
= (struct sock
*)data
;
268 if (!sock_owned_by_user(sk
)) {
269 tcp_delack_timer_handler(sk
);
271 inet_csk(sk
)->icsk_ack
.blocked
= 1;
272 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_DELAYEDACKLOCKED
);
273 /* deleguate our work to tcp_release_cb() */
274 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED
, &tcp_sk(sk
)->tsq_flags
))
281 static void tcp_probe_timer(struct sock
*sk
)
283 struct inet_connection_sock
*icsk
= inet_csk(sk
);
284 struct tcp_sock
*tp
= tcp_sk(sk
);
288 if (tp
->packets_out
|| !tcp_send_head(sk
)) {
289 icsk
->icsk_probes_out
= 0;
293 /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
294 * long as the receiver continues to respond probes. We support this by
295 * default and reset icsk_probes_out with incoming ACKs. But if the
296 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
297 * kill the socket when the retry count and the time exceeds the
298 * corresponding system limit. We also implement similar policy when
299 * we use RTO to probe window in tcp_retransmit_timer().
301 start_ts
= tcp_skb_timestamp(tcp_send_head(sk
));
303 skb_mstamp_get(&tcp_send_head(sk
)->skb_mstamp
);
304 else if (icsk
->icsk_user_timeout
&&
305 (s32
)(tcp_time_stamp
- start_ts
) > icsk
->icsk_user_timeout
)
308 max_probes
= sysctl_tcp_retries2
;
309 if (sock_flag(sk
, SOCK_DEAD
)) {
310 const bool alive
= inet_csk_rto_backoff(icsk
, TCP_RTO_MAX
) < TCP_RTO_MAX
;
312 max_probes
= tcp_orphan_retries(sk
, alive
);
313 if (!alive
&& icsk
->icsk_backoff
>= max_probes
)
315 if (tcp_out_of_resources(sk
, true))
319 if (icsk
->icsk_probes_out
> max_probes
) {
320 abort
: tcp_write_err(sk
);
322 /* Only send another probe if we didn't close things up. */
328 * Timer for Fast Open socket to retransmit SYNACK. Note that the
329 * sk here is the child socket, not the parent (listener) socket.
331 static void tcp_fastopen_synack_timer(struct sock
*sk
)
333 struct inet_connection_sock
*icsk
= inet_csk(sk
);
334 int max_retries
= icsk
->icsk_syn_retries
? :
335 sysctl_tcp_synack_retries
+ 1; /* add one more retry for fastopen */
336 struct request_sock
*req
;
338 req
= tcp_sk(sk
)->fastopen_rsk
;
339 req
->rsk_ops
->syn_ack_timeout(req
);
341 if (req
->num_timeout
>= max_retries
) {
345 /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
346 * returned from rtx_syn_ack() to make it more persistent like
347 * regular retransmit because if the child socket has been accepted
348 * it's not good to give up too easily.
350 inet_rtx_syn_ack(sk
, req
);
352 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
353 TCP_TIMEOUT_INIT
<< req
->num_timeout
, TCP_RTO_MAX
);
357 * The TCP retransmit timer.
360 void tcp_retransmit_timer(struct sock
*sk
)
362 struct tcp_sock
*tp
= tcp_sk(sk
);
363 struct inet_connection_sock
*icsk
= inet_csk(sk
);
365 if (tp
->fastopen_rsk
) {
366 WARN_ON_ONCE(sk
->sk_state
!= TCP_SYN_RECV
&&
367 sk
->sk_state
!= TCP_FIN_WAIT1
);
368 tcp_fastopen_synack_timer(sk
);
369 /* Before we receive ACK to our SYN-ACK don't retransmit
370 * anything else (e.g., data or FIN segments).
374 if (!tp
->packets_out
)
377 WARN_ON(tcp_write_queue_empty(sk
));
379 tp
->tlp_high_seq
= 0;
381 if (!tp
->snd_wnd
&& !sock_flag(sk
, SOCK_DEAD
) &&
382 !((1 << sk
->sk_state
) & (TCPF_SYN_SENT
| TCPF_SYN_RECV
))) {
383 /* Receiver dastardly shrinks window. Our retransmits
384 * become zero probes, but we should not timeout this
385 * connection. If the socket is an orphan, time it out,
386 * we cannot allow such beasts to hang infinitely.
388 struct inet_sock
*inet
= inet_sk(sk
);
389 if (sk
->sk_family
== AF_INET
) {
390 net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
392 ntohs(inet
->inet_dport
),
394 tp
->snd_una
, tp
->snd_nxt
);
396 #if IS_ENABLED(CONFIG_IPV6)
397 else if (sk
->sk_family
== AF_INET6
) {
398 net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
400 ntohs(inet
->inet_dport
),
402 tp
->snd_una
, tp
->snd_nxt
);
405 if (tcp_time_stamp
- tp
->rcv_tstamp
> TCP_RTO_MAX
) {
410 tcp_retransmit_skb(sk
, tcp_write_queue_head(sk
));
412 goto out_reset_timer
;
415 if (tcp_write_timeout(sk
))
418 if (icsk
->icsk_retransmits
== 0) {
421 if (icsk
->icsk_ca_state
== TCP_CA_Recovery
) {
423 mib_idx
= LINUX_MIB_TCPSACKRECOVERYFAIL
;
425 mib_idx
= LINUX_MIB_TCPRENORECOVERYFAIL
;
426 } else if (icsk
->icsk_ca_state
== TCP_CA_Loss
) {
427 mib_idx
= LINUX_MIB_TCPLOSSFAILURES
;
428 } else if ((icsk
->icsk_ca_state
== TCP_CA_Disorder
) ||
431 mib_idx
= LINUX_MIB_TCPSACKFAILURES
;
433 mib_idx
= LINUX_MIB_TCPRENOFAILURES
;
435 mib_idx
= LINUX_MIB_TCPTIMEOUTS
;
437 NET_INC_STATS_BH(sock_net(sk
), mib_idx
);
442 if (tcp_retransmit_skb(sk
, tcp_write_queue_head(sk
)) > 0) {
443 /* Retransmission failed because of local congestion,
446 if (!icsk
->icsk_retransmits
)
447 icsk
->icsk_retransmits
= 1;
448 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
449 min(icsk
->icsk_rto
, TCP_RESOURCE_PROBE_INTERVAL
),
454 /* Increase the timeout each time we retransmit. Note that
455 * we do not increase the rtt estimate. rto is initialized
456 * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
457 * that doubling rto each time is the least we can get away with.
458 * In KA9Q, Karn uses this for the first few times, and then
459 * goes to quadratic. netBSD doubles, but only goes up to *64,
460 * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
461 * defined in the protocol as the maximum possible RTT. I guess
462 * we'll have to use something other than TCP to talk to the
463 * University of Mars.
465 * PAWS allows us longer timeouts and large windows, so once
466 * implemented ftp to mars will work nicely. We will have to fix
467 * the 120 second clamps though!
469 icsk
->icsk_backoff
++;
470 icsk
->icsk_retransmits
++;
473 /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
474 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
475 * might be increased if the stream oscillates between thin and thick,
476 * thus the old value might already be too high compared to the value
477 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
478 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
479 * exponential backoff behaviour to avoid continue hammering
480 * linear-timeout retransmissions into a black hole
482 if (sk
->sk_state
== TCP_ESTABLISHED
&&
483 (tp
->thin_lto
|| sysctl_tcp_thin_linear_timeouts
) &&
484 tcp_stream_is_thin(tp
) &&
485 icsk
->icsk_retransmits
<= TCP_THIN_LINEAR_RETRIES
) {
486 icsk
->icsk_backoff
= 0;
487 icsk
->icsk_rto
= min(__tcp_set_rto(tp
), TCP_RTO_MAX
);
489 /* Use normal (exponential) backoff */
490 icsk
->icsk_rto
= min(icsk
->icsk_rto
<< 1, TCP_RTO_MAX
);
492 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
, icsk
->icsk_rto
, TCP_RTO_MAX
);
493 if (retransmits_timed_out(sk
, sysctl_tcp_retries1
+ 1, 0, 0))
499 void tcp_write_timer_handler(struct sock
*sk
)
501 struct inet_connection_sock
*icsk
= inet_csk(sk
);
504 if (sk
->sk_state
== TCP_CLOSE
|| !icsk
->icsk_pending
)
507 if (time_after(icsk
->icsk_timeout
, jiffies
)) {
508 sk_reset_timer(sk
, &icsk
->icsk_retransmit_timer
, icsk
->icsk_timeout
);
512 event
= icsk
->icsk_pending
;
515 case ICSK_TIME_EARLY_RETRANS
:
516 tcp_resume_early_retransmit(sk
);
518 case ICSK_TIME_LOSS_PROBE
:
519 tcp_send_loss_probe(sk
);
521 case ICSK_TIME_RETRANS
:
522 icsk
->icsk_pending
= 0;
523 tcp_retransmit_timer(sk
);
525 case ICSK_TIME_PROBE0
:
526 icsk
->icsk_pending
= 0;
535 static void tcp_write_timer(unsigned long data
)
537 struct sock
*sk
= (struct sock
*)data
;
540 if (!sock_owned_by_user(sk
)) {
541 tcp_write_timer_handler(sk
);
543 /* deleguate our work to tcp_release_cb() */
544 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED
, &tcp_sk(sk
)->tsq_flags
))
551 void tcp_syn_ack_timeout(const struct request_sock
*req
)
553 struct net
*net
= read_pnet(&inet_rsk(req
)->ireq_net
);
555 NET_INC_STATS_BH(net
, LINUX_MIB_TCPTIMEOUTS
);
557 EXPORT_SYMBOL(tcp_syn_ack_timeout
);
559 void tcp_set_keepalive(struct sock
*sk
, int val
)
561 if ((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))
564 if (val
&& !sock_flag(sk
, SOCK_KEEPOPEN
))
565 inet_csk_reset_keepalive_timer(sk
, keepalive_time_when(tcp_sk(sk
)));
567 inet_csk_delete_keepalive_timer(sk
);
571 static void tcp_keepalive_timer (unsigned long data
)
573 struct sock
*sk
= (struct sock
*) data
;
574 struct inet_connection_sock
*icsk
= inet_csk(sk
);
575 struct tcp_sock
*tp
= tcp_sk(sk
);
578 /* Only process if socket is not in use. */
580 if (sock_owned_by_user(sk
)) {
581 /* Try again later. */
582 inet_csk_reset_keepalive_timer (sk
, HZ
/20);
586 if (sk
->sk_state
== TCP_LISTEN
) {
587 pr_err("Hmm... keepalive on a LISTEN ???\n");
591 if (sk
->sk_state
== TCP_FIN_WAIT2
&& sock_flag(sk
, SOCK_DEAD
)) {
592 if (tp
->linger2
>= 0) {
593 const int tmo
= tcp_fin_time(sk
) - TCP_TIMEWAIT_LEN
;
596 tcp_time_wait(sk
, TCP_FIN_WAIT2
, tmo
);
600 tcp_send_active_reset(sk
, GFP_ATOMIC
);
604 if (!sock_flag(sk
, SOCK_KEEPOPEN
) || sk
->sk_state
== TCP_CLOSE
)
607 elapsed
= keepalive_time_when(tp
);
609 /* It is alive without keepalive 8) */
610 if (tp
->packets_out
|| tcp_send_head(sk
))
613 elapsed
= keepalive_time_elapsed(tp
);
615 if (elapsed
>= keepalive_time_when(tp
)) {
616 /* If the TCP_USER_TIMEOUT option is enabled, use that
617 * to determine when to timeout instead.
619 if ((icsk
->icsk_user_timeout
!= 0 &&
620 elapsed
>= icsk
->icsk_user_timeout
&&
621 icsk
->icsk_probes_out
> 0) ||
622 (icsk
->icsk_user_timeout
== 0 &&
623 icsk
->icsk_probes_out
>= keepalive_probes(tp
))) {
624 tcp_send_active_reset(sk
, GFP_ATOMIC
);
628 if (tcp_write_wakeup(sk
, LINUX_MIB_TCPKEEPALIVE
) <= 0) {
629 icsk
->icsk_probes_out
++;
630 elapsed
= keepalive_intvl_when(tp
);
632 /* If keepalive was lost due to local congestion,
635 elapsed
= TCP_RESOURCE_PROBE_INTERVAL
;
638 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
639 elapsed
= keepalive_time_when(tp
) - elapsed
;
645 inet_csk_reset_keepalive_timer (sk
, elapsed
);
656 void tcp_init_xmit_timers(struct sock
*sk
)
658 inet_csk_init_xmit_timers(sk
, &tcp_write_timer
, &tcp_delack_timer
,
659 &tcp_keepalive_timer
);