2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
21 * Alan Cox : Numerous verify_area() calls
22 * Alan Cox : Set the ACK bit on a reset
23 * Alan Cox : Stopped it crashing if it closed while
24 * sk->inuse=1 and was trying to connect
26 * Alan Cox : All icmp error handling was broken
27 * pointers passed where wrong and the
28 * socket was looked up backwards. Nobody
29 * tested any icmp error code obviously.
30 * Alan Cox : tcp_err() now handled properly. It
31 * wakes people on errors. poll
32 * behaves and the icmp error race
33 * has gone by moving it into sock.c
34 * Alan Cox : tcp_send_reset() fixed to work for
35 * everything not just packets for
37 * Alan Cox : tcp option processing.
38 * Alan Cox : Reset tweaked (still not 100%) [Had
40 * Herp Rosmanith : More reset fixes
41 * Alan Cox : No longer acks invalid rst frames.
42 * Acking any kind of RST is right out.
43 * Alan Cox : Sets an ignore me flag on an rst
44 * receive otherwise odd bits of prattle
46 * Alan Cox : Fixed another acking RST frame bug.
47 * Should stop LAN workplace lockups.
48 * Alan Cox : Some tidyups using the new skb list
50 * Alan Cox : sk->keepopen now seems to work
51 * Alan Cox : Pulls options out correctly on accepts
52 * Alan Cox : Fixed assorted sk->rqueue->next errors
53 * Alan Cox : PSH doesn't end a TCP read. Switched a
55 * Alan Cox : Tidied tcp_data to avoid a potential
57 * Alan Cox : Added some better commenting, as the
58 * tcp is hard to follow
59 * Alan Cox : Removed incorrect check for 20 * psh
60 * Michael O'Reilly : ack < copied bug fix.
61 * Johannes Stille : Misc tcp fixes (not all in yet).
62 * Alan Cox : FIN with no memory -> CRASH
63 * Alan Cox : Added socket option proto entries.
64 * Also added awareness of them to accept.
65 * Alan Cox : Added TCP options (SOL_TCP)
66 * Alan Cox : Switched wakeup calls to callbacks,
67 * so the kernel can layer network
69 * Alan Cox : Use ip_tos/ip_ttl settings.
70 * Alan Cox : Handle FIN (more) properly (we hope).
71 * Alan Cox : RST frames sent on unsynchronised
73 * Alan Cox : Put in missing check for SYN bit.
74 * Alan Cox : Added tcp_select_window() aka NET2E
75 * window non shrink trick.
76 * Alan Cox : Added a couple of small NET2E timer
78 * Charles Hedrick : TCP fixes
79 * Toomas Tamm : TCP window fixes
80 * Alan Cox : Small URG fix to rlogin ^C ack fight
81 * Charles Hedrick : Rewrote most of it to actually work
82 * Linus : Rewrote tcp_read() and URG handling
84 * Gerhard Koerting: Fixed some missing timer handling
85 * Matthew Dillon : Reworked TCP machine states as per RFC
86 * Gerhard Koerting: PC/TCP workarounds
87 * Adam Caldwell : Assorted timer/timing errors
88 * Matthew Dillon : Fixed another RST bug
89 * Alan Cox : Move to kernel side addressing changes.
90 * Alan Cox : Beginning work on TCP fastpathing
92 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
93 * Alan Cox : TCP fast path debugging
94 * Alan Cox : Window clamping
95 * Michael Riepe : Bug in tcp_check()
96 * Matt Dillon : More TCP improvements and RST bug fixes
97 * Matt Dillon : Yet more small nasties remove from the
98 * TCP code (Be very nice to this man if
99 * tcp finally works 100%) 8)
100 * Alan Cox : BSD accept semantics.
101 * Alan Cox : Reset on closedown bug.
102 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
103 * Michael Pall : Handle poll() after URG properly in
105 * Michael Pall : Undo the last fix in tcp_read_urg()
106 * (multi URG PUSH broke rlogin).
107 * Michael Pall : Fix the multi URG PUSH problem in
108 * tcp_readable(), poll() after URG
110 * Michael Pall : recv(...,MSG_OOB) never blocks in the
112 * Alan Cox : Changed the semantics of sk->socket to
113 * fix a race and a signal problem with
114 * accept() and async I/O.
115 * Alan Cox : Relaxed the rules on tcp_sendto().
116 * Yury Shevchuk : Really fixed accept() blocking problem.
117 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
118 * clients/servers which listen in on
120 * Alan Cox : Cleaned the above up and shrank it to
121 * a sensible code size.
122 * Alan Cox : Self connect lockup fix.
123 * Alan Cox : No connect to multicast.
124 * Ross Biro : Close unaccepted children on master
126 * Alan Cox : Reset tracing code.
127 * Alan Cox : Spurious resets on shutdown.
128 * Alan Cox : Giant 15 minute/60 second timer error
129 * Alan Cox : Small whoops in polling before an
131 * Alan Cox : Kept the state trace facility since
132 * it's handy for debugging.
133 * Alan Cox : More reset handler fixes.
134 * Alan Cox : Started rewriting the code based on
135 * the RFC's for other useful protocol
136 * references see: Comer, KA9Q NOS, and
137 * for a reference on the difference
138 * between specifications and how BSD
139 * works see the 4.4lite source.
140 * A.N.Kuznetsov : Don't time wait on completion of tidy
142 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
143 * Linus Torvalds : Fixed BSD port reuse to work first syn
144 * Alan Cox : Reimplemented timers as per the RFC
145 * and using multiple timers for sanity.
146 * Alan Cox : Small bug fixes, and a lot of new
148 * Alan Cox : Fixed dual reader crash by locking
149 * the buffers (much like datagram.c)
150 * Alan Cox : Fixed stuck sockets in probe. A probe
151 * now gets fed up of retrying without
152 * (even a no space) answer.
153 * Alan Cox : Extracted closing code better
154 * Alan Cox : Fixed the closing state machine to
156 * Alan Cox : More 'per spec' fixes.
157 * Jorge Cwik : Even faster checksumming.
158 * Alan Cox : tcp_data() doesn't ack illegal PSH
159 * only frames. At least one pc tcp stack
161 * Alan Cox : Cache last socket.
162 * Alan Cox : Per route irtt.
163 * Matt Day : poll()->select() match BSD precisely on error
164 * Alan Cox : New buffers
165 * Marc Tamsky : Various sk->prot->retransmits and
166 * sk->retransmits misupdating fixed.
167 * Fixed tcp_write_timeout: stuck close,
168 * and TCP syn retries gets used now.
169 * Mark Yarvis : In tcp_read_wakeup(), don't send an
170 * ack if state is TCP_CLOSED.
171 * Alan Cox : Look up device on a retransmit - routes may
172 * change. Doesn't yet cope with MSS shrink right
174 * Marc Tamsky : Closing in closing fixes.
175 * Mike Shaver : RFC1122 verifications.
176 * Alan Cox : rcv_saddr errors.
177 * Alan Cox : Block double connect().
178 * Alan Cox : Small hooks for enSKIP.
179 * Alexey Kuznetsov: Path MTU discovery.
180 * Alan Cox : Support soft errors.
181 * Alan Cox : Fix MTU discovery pathological case
182 * when the remote claims no mtu!
183 * Marc Tamsky : TCP_CLOSE fix.
184 * Colin (G3TNE) : Send a reset on syn ack replies in
185 * window but wrong (fixes NT lpd problems)
186 * Pedro Roque : Better TCP window handling, delayed ack.
187 * Joerg Reuter : No modification of locked buffers in
188 * tcp_do_retransmit()
189 * Eric Schenk : Changed receiver side silly window
190 * avoidance algorithm to BSD style
191 * algorithm. This doubles throughput
192 * against machines running Solaris,
193 * and seems to result in general
195 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
196 * Willy Konynenberg : Transparent proxying support.
197 * Mike McLagan : Routing by source
198 * Keith Owens : Do proper merging with partial SKB's in
199 * tcp_do_sendmsg to avoid burstiness.
200 * Eric Schenk : Fix fast close down bug with
201 * shutdown() followed by close().
202 * Andi Kleen : Make poll agree with SIGIO
203 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
204 * lingertime == 0 (RFC 793 ABORT Call)
205 * Hirokazu Takahashi : Use copy_from_user() instead of
206 * csum_and_copy_from_user() if possible.
208 * This program is free software; you can redistribute it and/or
209 * modify it under the terms of the GNU General Public License
210 * as published by the Free Software Foundation; either version
211 * 2 of the License, or(at your option) any later version.
213 * Description of States:
215 * TCP_SYN_SENT sent a connection request, waiting for ack
217 * TCP_SYN_RECV received a connection request, sent ack,
218 * waiting for final ack in three-way handshake.
220 * TCP_ESTABLISHED connection established
222 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
223 * transmission of remaining buffered data
225 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * TCP_CLOSING both sides have shutdown but we still have
229 * data we have to finish sending
231 * TCP_TIME_WAIT timeout to catch resent junk before entering
232 * closed, can only be entered from FIN_WAIT2
233 * or CLOSING. Required because the other end
234 * may not have gotten our last ACK causing it
235 * to retransmit the data packet (which we ignore)
237 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
238 * us to finish writing our data and to shutdown
239 * (we have to close() to move on to LAST_ACK)
241 * TCP_LAST_ACK out side has shutdown after remote has
242 * shutdown. There may still be data in our
243 * buffer that we have to finish sending
245 * TCP_CLOSE socket is finished
248 #define pr_fmt(fmt) "TCP: " fmt
250 #include <linux/kernel.h>
251 #include <linux/module.h>
252 #include <linux/types.h>
253 #include <linux/fcntl.h>
254 #include <linux/poll.h>
255 #include <linux/init.h>
256 #include <linux/fs.h>
257 #include <linux/skbuff.h>
258 #include <linux/scatterlist.h>
259 #include <linux/splice.h>
260 #include <linux/net.h>
261 #include <linux/socket.h>
262 #include <linux/random.h>
263 #include <linux/bootmem.h>
264 #include <linux/highmem.h>
265 #include <linux/swap.h>
266 #include <linux/cache.h>
267 #include <linux/err.h>
268 #include <linux/crypto.h>
269 #include <linux/time.h>
270 #include <linux/slab.h>
272 #include <net/icmp.h>
274 #include <net/xfrm.h>
276 #include <net/netdma.h>
277 #include <net/sock.h>
279 #include <asm/uaccess.h>
280 #include <asm/ioctls.h>
282 int sysctl_tcp_fin_timeout __read_mostly
= TCP_FIN_TIMEOUT
;
284 struct percpu_counter tcp_orphan_count
;
285 EXPORT_SYMBOL_GPL(tcp_orphan_count
);
287 int sysctl_tcp_wmem
[3] __read_mostly
;
288 int sysctl_tcp_rmem
[3] __read_mostly
;
290 EXPORT_SYMBOL(sysctl_tcp_rmem
);
291 EXPORT_SYMBOL(sysctl_tcp_wmem
);
293 atomic_long_t tcp_memory_allocated
; /* Current allocated memory. */
294 EXPORT_SYMBOL(tcp_memory_allocated
);
297 * Current number of TCP sockets.
299 struct percpu_counter tcp_sockets_allocated
;
300 EXPORT_SYMBOL(tcp_sockets_allocated
);
305 struct tcp_splice_state
{
306 struct pipe_inode_info
*pipe
;
312 * Pressure flag: try to collapse.
313 * Technical note: it is used by multiple contexts non atomically.
314 * All the __sk_mem_schedule() is of this nature: accounting
315 * is strict, actions are advisory and have some latency.
317 int tcp_memory_pressure __read_mostly
;
318 EXPORT_SYMBOL(tcp_memory_pressure
);
320 void tcp_enter_memory_pressure(struct sock
*sk
)
322 if (!tcp_memory_pressure
) {
323 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMEMORYPRESSURES
);
324 tcp_memory_pressure
= 1;
327 EXPORT_SYMBOL(tcp_enter_memory_pressure
);
329 /* Convert seconds to retransmits based on initial and max timeout */
330 static u8
secs_to_retrans(int seconds
, int timeout
, int rto_max
)
335 int period
= timeout
;
338 while (seconds
> period
&& res
< 255) {
341 if (timeout
> rto_max
)
349 /* Convert retransmits to seconds based on initial and max timeout */
350 static int retrans_to_secs(u8 retrans
, int timeout
, int rto_max
)
358 if (timeout
> rto_max
)
367 * Wait for a TCP event.
369 * Note that we don't need to lock the socket, as the upper poll layers
370 * take care of normal races (between the test and the event) and we don't
371 * go look at any of the socket buffers directly.
373 unsigned int tcp_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
376 struct sock
*sk
= sock
->sk
;
377 const struct tcp_sock
*tp
= tcp_sk(sk
);
379 sock_poll_wait(file
, sk_sleep(sk
), wait
);
380 if (sk
->sk_state
== TCP_LISTEN
)
381 return inet_csk_listen_poll(sk
);
383 /* Socket is not locked. We are protected from async events
384 * by poll logic and correct handling of state changes
385 * made by other threads is impossible in any case.
391 * POLLHUP is certainly not done right. But poll() doesn't
392 * have a notion of HUP in just one direction, and for a
393 * socket the read side is more interesting.
395 * Some poll() documentation says that POLLHUP is incompatible
396 * with the POLLOUT/POLLWR flags, so somebody should check this
397 * all. But careful, it tends to be safer to return too many
398 * bits than too few, and you can easily break real applications
399 * if you don't tell them that something has hung up!
403 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
404 * our fs/select.c). It means that after we received EOF,
405 * poll always returns immediately, making impossible poll() on write()
406 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
407 * if and only if shutdown has been made in both directions.
408 * Actually, it is interesting to look how Solaris and DUX
409 * solve this dilemma. I would prefer, if POLLHUP were maskable,
410 * then we could set it on SND_SHUTDOWN. BTW examples given
411 * in Stevens' books assume exactly this behaviour, it explains
412 * why POLLHUP is incompatible with POLLOUT. --ANK
414 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
415 * blocking on fresh not-connected or disconnected socket. --ANK
417 if (sk
->sk_shutdown
== SHUTDOWN_MASK
|| sk
->sk_state
== TCP_CLOSE
)
419 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
420 mask
|= POLLIN
| POLLRDNORM
| POLLRDHUP
;
423 if ((1 << sk
->sk_state
) & ~(TCPF_SYN_SENT
| TCPF_SYN_RECV
)) {
424 int target
= sock_rcvlowat(sk
, 0, INT_MAX
);
426 if (tp
->urg_seq
== tp
->copied_seq
&&
427 !sock_flag(sk
, SOCK_URGINLINE
) &&
431 /* Potential race condition. If read of tp below will
432 * escape above sk->sk_state, we can be illegally awaken
433 * in SYN_* states. */
434 if (tp
->rcv_nxt
- tp
->copied_seq
>= target
)
435 mask
|= POLLIN
| POLLRDNORM
;
437 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
)) {
438 if (sk_stream_wspace(sk
) >= sk_stream_min_wspace(sk
)) {
439 mask
|= POLLOUT
| POLLWRNORM
;
440 } else { /* send SIGIO later */
441 set_bit(SOCK_ASYNC_NOSPACE
,
442 &sk
->sk_socket
->flags
);
443 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
445 /* Race breaker. If space is freed after
446 * wspace test but before the flags are set,
447 * IO signal will be lost.
449 if (sk_stream_wspace(sk
) >= sk_stream_min_wspace(sk
))
450 mask
|= POLLOUT
| POLLWRNORM
;
453 mask
|= POLLOUT
| POLLWRNORM
;
455 if (tp
->urg_data
& TCP_URG_VALID
)
458 /* This barrier is coupled with smp_wmb() in tcp_reset() */
465 EXPORT_SYMBOL(tcp_poll
);
467 int tcp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
469 struct tcp_sock
*tp
= tcp_sk(sk
);
474 if (sk
->sk_state
== TCP_LISTEN
)
478 if ((1 << sk
->sk_state
) & (TCPF_SYN_SENT
| TCPF_SYN_RECV
))
480 else if (sock_flag(sk
, SOCK_URGINLINE
) ||
482 before(tp
->urg_seq
, tp
->copied_seq
) ||
483 !before(tp
->urg_seq
, tp
->rcv_nxt
)) {
486 answ
= tp
->rcv_nxt
- tp
->copied_seq
;
488 /* Subtract 1, if FIN is in queue. */
489 skb
= skb_peek_tail(&sk
->sk_receive_queue
);
491 answ
-= tcp_hdr(skb
)->fin
;
493 answ
= tp
->urg_seq
- tp
->copied_seq
;
497 answ
= tp
->urg_data
&& tp
->urg_seq
== tp
->copied_seq
;
500 if (sk
->sk_state
== TCP_LISTEN
)
503 if ((1 << sk
->sk_state
) & (TCPF_SYN_SENT
| TCPF_SYN_RECV
))
506 answ
= tp
->write_seq
- tp
->snd_una
;
509 if (sk
->sk_state
== TCP_LISTEN
)
512 if ((1 << sk
->sk_state
) & (TCPF_SYN_SENT
| TCPF_SYN_RECV
))
515 answ
= tp
->write_seq
- tp
->snd_nxt
;
521 return put_user(answ
, (int __user
*)arg
);
523 EXPORT_SYMBOL(tcp_ioctl
);
525 static inline void tcp_mark_push(struct tcp_sock
*tp
, struct sk_buff
*skb
)
527 TCP_SKB_CB(skb
)->tcp_flags
|= TCPHDR_PSH
;
528 tp
->pushed_seq
= tp
->write_seq
;
531 static inline int forced_push(const struct tcp_sock
*tp
)
533 return after(tp
->write_seq
, tp
->pushed_seq
+ (tp
->max_window
>> 1));
536 static inline void skb_entail(struct sock
*sk
, struct sk_buff
*skb
)
538 struct tcp_sock
*tp
= tcp_sk(sk
);
539 struct tcp_skb_cb
*tcb
= TCP_SKB_CB(skb
);
542 tcb
->seq
= tcb
->end_seq
= tp
->write_seq
;
543 tcb
->tcp_flags
= TCPHDR_ACK
;
545 skb_header_release(skb
);
546 tcp_add_write_queue_tail(sk
, skb
);
547 sk
->sk_wmem_queued
+= skb
->truesize
;
548 sk_mem_charge(sk
, skb
->truesize
);
549 if (tp
->nonagle
& TCP_NAGLE_PUSH
)
550 tp
->nonagle
&= ~TCP_NAGLE_PUSH
;
553 static inline void tcp_mark_urg(struct tcp_sock
*tp
, int flags
)
556 tp
->snd_up
= tp
->write_seq
;
559 static inline void tcp_push(struct sock
*sk
, int flags
, int mss_now
,
562 if (tcp_send_head(sk
)) {
563 struct tcp_sock
*tp
= tcp_sk(sk
);
565 if (!(flags
& MSG_MORE
) || forced_push(tp
))
566 tcp_mark_push(tp
, tcp_write_queue_tail(sk
));
568 tcp_mark_urg(tp
, flags
);
569 __tcp_push_pending_frames(sk
, mss_now
,
570 (flags
& MSG_MORE
) ? TCP_NAGLE_CORK
: nonagle
);
574 static int tcp_splice_data_recv(read_descriptor_t
*rd_desc
, struct sk_buff
*skb
,
575 unsigned int offset
, size_t len
)
577 struct tcp_splice_state
*tss
= rd_desc
->arg
.data
;
580 ret
= skb_splice_bits(skb
, offset
, tss
->pipe
, min(rd_desc
->count
, len
),
583 rd_desc
->count
-= ret
;
587 static int __tcp_splice_read(struct sock
*sk
, struct tcp_splice_state
*tss
)
589 /* Store TCP splice context information in read_descriptor_t. */
590 read_descriptor_t rd_desc
= {
595 return tcp_read_sock(sk
, &rd_desc
, tcp_splice_data_recv
);
599 * tcp_splice_read - splice data from TCP socket to a pipe
600 * @sock: socket to splice from
601 * @ppos: position (not valid)
602 * @pipe: pipe to splice to
603 * @len: number of bytes to splice
604 * @flags: splice modifier flags
607 * Will read pages from given socket and fill them into a pipe.
610 ssize_t
tcp_splice_read(struct socket
*sock
, loff_t
*ppos
,
611 struct pipe_inode_info
*pipe
, size_t len
,
614 struct sock
*sk
= sock
->sk
;
615 struct tcp_splice_state tss
= {
624 sock_rps_record_flow(sk
);
626 * We can't seek on a socket input
635 timeo
= sock_rcvtimeo(sk
, sock
->file
->f_flags
& O_NONBLOCK
);
637 ret
= __tcp_splice_read(sk
, &tss
);
643 if (sock_flag(sk
, SOCK_DONE
))
646 ret
= sock_error(sk
);
649 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
651 if (sk
->sk_state
== TCP_CLOSE
) {
653 * This occurs when user tries to read
654 * from never connected socket.
656 if (!sock_flag(sk
, SOCK_DONE
))
664 sk_wait_data(sk
, &timeo
);
665 if (signal_pending(current
)) {
666 ret
= sock_intr_errno(timeo
);
679 if (sk
->sk_err
|| sk
->sk_state
== TCP_CLOSE
||
680 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
681 signal_pending(current
))
692 EXPORT_SYMBOL(tcp_splice_read
);
694 struct sk_buff
*sk_stream_alloc_skb(struct sock
*sk
, int size
, gfp_t gfp
)
698 /* The TCP header must be at least 32-bit aligned. */
699 size
= ALIGN(size
, 4);
701 skb
= alloc_skb_fclone(size
+ sk
->sk_prot
->max_header
, gfp
);
703 if (sk_wmem_schedule(sk
, skb
->truesize
)) {
704 skb_reserve(skb
, sk
->sk_prot
->max_header
);
706 * Make sure that we have exactly size bytes
707 * available to the caller, no more, no less.
709 skb
->avail_size
= size
;
714 sk
->sk_prot
->enter_memory_pressure(sk
);
715 sk_stream_moderate_sndbuf(sk
);
720 static unsigned int tcp_xmit_size_goal(struct sock
*sk
, u32 mss_now
,
723 struct tcp_sock
*tp
= tcp_sk(sk
);
724 u32 xmit_size_goal
, old_size_goal
;
726 xmit_size_goal
= mss_now
;
728 if (large_allowed
&& sk_can_gso(sk
)) {
729 xmit_size_goal
= ((sk
->sk_gso_max_size
- 1) -
730 inet_csk(sk
)->icsk_af_ops
->net_header_len
-
731 inet_csk(sk
)->icsk_ext_hdr_len
-
734 xmit_size_goal
= tcp_bound_to_half_wnd(tp
, xmit_size_goal
);
736 /* We try hard to avoid divides here */
737 old_size_goal
= tp
->xmit_size_goal_segs
* mss_now
;
739 if (likely(old_size_goal
<= xmit_size_goal
&&
740 old_size_goal
+ mss_now
> xmit_size_goal
)) {
741 xmit_size_goal
= old_size_goal
;
743 tp
->xmit_size_goal_segs
= xmit_size_goal
/ mss_now
;
744 xmit_size_goal
= tp
->xmit_size_goal_segs
* mss_now
;
748 return max(xmit_size_goal
, mss_now
);
751 static int tcp_send_mss(struct sock
*sk
, int *size_goal
, int flags
)
755 mss_now
= tcp_current_mss(sk
);
756 *size_goal
= tcp_xmit_size_goal(sk
, mss_now
, !(flags
& MSG_OOB
));
761 static ssize_t
do_tcp_sendpages(struct sock
*sk
, struct page
**pages
, int poffset
,
762 size_t psize
, int flags
)
764 struct tcp_sock
*tp
= tcp_sk(sk
);
765 int mss_now
, size_goal
;
768 long timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
770 /* Wait for a connection to finish. */
771 if ((1 << sk
->sk_state
) & ~(TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
))
772 if ((err
= sk_stream_wait_connect(sk
, &timeo
)) != 0)
775 clear_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
777 mss_now
= tcp_send_mss(sk
, &size_goal
, flags
);
781 if (sk
->sk_err
|| (sk
->sk_shutdown
& SEND_SHUTDOWN
))
785 struct sk_buff
*skb
= tcp_write_queue_tail(sk
);
786 struct page
*page
= pages
[poffset
/ PAGE_SIZE
];
787 int copy
, i
, can_coalesce
;
788 int offset
= poffset
% PAGE_SIZE
;
789 int size
= min_t(size_t, psize
, PAGE_SIZE
- offset
);
791 if (!tcp_send_head(sk
) || (copy
= size_goal
- skb
->len
) <= 0) {
793 if (!sk_stream_memory_free(sk
))
794 goto wait_for_sndbuf
;
796 skb
= sk_stream_alloc_skb(sk
, 0, sk
->sk_allocation
);
798 goto wait_for_memory
;
807 i
= skb_shinfo(skb
)->nr_frags
;
808 can_coalesce
= skb_can_coalesce(skb
, i
, page
, offset
);
809 if (!can_coalesce
&& i
>= MAX_SKB_FRAGS
) {
810 tcp_mark_push(tp
, skb
);
813 if (!sk_wmem_schedule(sk
, copy
))
814 goto wait_for_memory
;
817 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], copy
);
820 skb_fill_page_desc(skb
, i
, page
, offset
, copy
);
824 skb
->data_len
+= copy
;
825 skb
->truesize
+= copy
;
826 sk
->sk_wmem_queued
+= copy
;
827 sk_mem_charge(sk
, copy
);
828 skb
->ip_summed
= CHECKSUM_PARTIAL
;
829 tp
->write_seq
+= copy
;
830 TCP_SKB_CB(skb
)->end_seq
+= copy
;
831 skb_shinfo(skb
)->gso_segs
= 0;
834 TCP_SKB_CB(skb
)->tcp_flags
&= ~TCPHDR_PSH
;
838 if (!(psize
-= copy
))
841 if (skb
->len
< size_goal
|| (flags
& MSG_OOB
))
844 if (forced_push(tp
)) {
845 tcp_mark_push(tp
, skb
);
846 __tcp_push_pending_frames(sk
, mss_now
, TCP_NAGLE_PUSH
);
847 } else if (skb
== tcp_send_head(sk
))
848 tcp_push_one(sk
, mss_now
);
852 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
855 tcp_push(sk
, flags
& ~MSG_MORE
, mss_now
, TCP_NAGLE_PUSH
);
857 if ((err
= sk_stream_wait_memory(sk
, &timeo
)) != 0)
860 mss_now
= tcp_send_mss(sk
, &size_goal
, flags
);
864 if (copied
&& !(flags
& MSG_SENDPAGE_NOTLAST
))
865 tcp_push(sk
, flags
, mss_now
, tp
->nonagle
);
872 return sk_stream_error(sk
, flags
, err
);
875 int tcp_sendpage(struct sock
*sk
, struct page
*page
, int offset
,
876 size_t size
, int flags
)
880 if (!(sk
->sk_route_caps
& NETIF_F_SG
) ||
881 !(sk
->sk_route_caps
& NETIF_F_ALL_CSUM
))
882 return sock_no_sendpage(sk
->sk_socket
, page
, offset
, size
,
886 res
= do_tcp_sendpages(sk
, &page
, offset
, size
, flags
);
890 EXPORT_SYMBOL(tcp_sendpage
);
892 static inline int select_size(const struct sock
*sk
, bool sg
)
894 const struct tcp_sock
*tp
= tcp_sk(sk
);
895 int tmp
= tp
->mss_cache
;
898 if (sk_can_gso(sk
)) {
899 /* Small frames wont use a full page:
900 * Payload will immediately follow tcp header.
902 tmp
= SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER
);
904 int pgbreak
= SKB_MAX_HEAD(MAX_TCP_HEADER
);
906 if (tmp
>= pgbreak
&&
907 tmp
<= pgbreak
+ (MAX_SKB_FRAGS
- 1) * PAGE_SIZE
)
915 static int tcp_send_rcvq(struct sock
*sk
, struct msghdr
*msg
, size_t size
)
918 struct tcp_skb_cb
*cb
;
921 skb
= alloc_skb(size
+ sizeof(*th
), sk
->sk_allocation
);
925 th
= (struct tcphdr
*)skb_put(skb
, sizeof(*th
));
926 skb_reset_transport_header(skb
);
927 memset(th
, 0, sizeof(*th
));
929 if (memcpy_fromiovec(skb_put(skb
, size
), msg
->msg_iov
, size
))
932 cb
= TCP_SKB_CB(skb
);
934 TCP_SKB_CB(skb
)->seq
= tcp_sk(sk
)->rcv_nxt
;
935 TCP_SKB_CB(skb
)->end_seq
= TCP_SKB_CB(skb
)->seq
+ size
;
936 TCP_SKB_CB(skb
)->ack_seq
= tcp_sk(sk
)->snd_una
- 1;
938 tcp_queue_rcv(sk
, skb
, sizeof(*th
));
948 int tcp_sendmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
952 struct tcp_sock
*tp
= tcp_sk(sk
);
954 int iovlen
, flags
, err
, copied
;
955 int mss_now
= 0, size_goal
;
961 flags
= msg
->msg_flags
;
962 timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
964 /* Wait for a connection to finish. */
965 if ((1 << sk
->sk_state
) & ~(TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
))
966 if ((err
= sk_stream_wait_connect(sk
, &timeo
)) != 0)
969 if (unlikely(tp
->repair
)) {
970 if (tp
->repair_queue
== TCP_RECV_QUEUE
) {
971 copied
= tcp_send_rcvq(sk
, msg
, size
);
976 if (tp
->repair_queue
== TCP_NO_QUEUE
)
979 /* 'common' sending to sendq */
982 /* This should be in poll */
983 clear_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
985 mss_now
= tcp_send_mss(sk
, &size_goal
, flags
);
987 /* Ok commence sending. */
988 iovlen
= msg
->msg_iovlen
;
993 if (sk
->sk_err
|| (sk
->sk_shutdown
& SEND_SHUTDOWN
))
996 sg
= !!(sk
->sk_route_caps
& NETIF_F_SG
);
998 while (--iovlen
>= 0) {
999 size_t seglen
= iov
->iov_len
;
1000 unsigned char __user
*from
= iov
->iov_base
;
1004 while (seglen
> 0) {
1006 int max
= size_goal
;
1008 skb
= tcp_write_queue_tail(sk
);
1009 if (tcp_send_head(sk
)) {
1010 if (skb
->ip_summed
== CHECKSUM_NONE
)
1012 copy
= max
- skb
->len
;
1017 /* Allocate new segment. If the interface is SG,
1018 * allocate skb fitting to single page.
1020 if (!sk_stream_memory_free(sk
))
1021 goto wait_for_sndbuf
;
1023 skb
= sk_stream_alloc_skb(sk
,
1024 select_size(sk
, sg
),
1027 goto wait_for_memory
;
1030 * Check whether we can use HW checksum.
1032 if (sk
->sk_route_caps
& NETIF_F_ALL_CSUM
)
1033 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1035 skb_entail(sk
, skb
);
1040 /* Try to append data to the end of skb. */
1044 /* Where to copy to? */
1045 if (skb_availroom(skb
) > 0) {
1046 /* We have some space in skb head. Superb! */
1047 copy
= min_t(int, copy
, skb_availroom(skb
));
1048 err
= skb_add_data_nocache(sk
, skb
, from
, copy
);
1053 int i
= skb_shinfo(skb
)->nr_frags
;
1054 struct page
*page
= sk
->sk_sndmsg_page
;
1057 if (page
&& page_count(page
) == 1)
1058 sk
->sk_sndmsg_off
= 0;
1060 off
= sk
->sk_sndmsg_off
;
1062 if (skb_can_coalesce(skb
, i
, page
, off
) &&
1064 /* We can extend the last page
1067 } else if (i
== MAX_SKB_FRAGS
|| !sg
) {
1068 /* Need to add new fragment and cannot
1069 * do this because interface is non-SG,
1070 * or because all the page slots are
1072 tcp_mark_push(tp
, skb
);
1075 if (off
== PAGE_SIZE
) {
1077 sk
->sk_sndmsg_page
= page
= NULL
;
1083 if (copy
> PAGE_SIZE
- off
)
1084 copy
= PAGE_SIZE
- off
;
1086 if (!sk_wmem_schedule(sk
, copy
))
1087 goto wait_for_memory
;
1090 /* Allocate new cache page. */
1091 if (!(page
= sk_stream_alloc_page(sk
)))
1092 goto wait_for_memory
;
1095 /* Time to copy data. We are close to
1097 err
= skb_copy_to_page_nocache(sk
, from
, skb
,
1100 /* If this page was new, give it to the
1101 * socket so it does not get leaked.
1103 if (!sk
->sk_sndmsg_page
) {
1104 sk
->sk_sndmsg_page
= page
;
1105 sk
->sk_sndmsg_off
= 0;
1110 /* Update the skb. */
1112 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], copy
);
1114 skb_fill_page_desc(skb
, i
, page
, off
, copy
);
1115 if (sk
->sk_sndmsg_page
) {
1117 } else if (off
+ copy
< PAGE_SIZE
) {
1119 sk
->sk_sndmsg_page
= page
;
1123 sk
->sk_sndmsg_off
= off
+ copy
;
1127 TCP_SKB_CB(skb
)->tcp_flags
&= ~TCPHDR_PSH
;
1129 tp
->write_seq
+= copy
;
1130 TCP_SKB_CB(skb
)->end_seq
+= copy
;
1131 skb_shinfo(skb
)->gso_segs
= 0;
1135 if ((seglen
-= copy
) == 0 && iovlen
== 0)
1138 if (skb
->len
< max
|| (flags
& MSG_OOB
) || unlikely(tp
->repair
))
1141 if (forced_push(tp
)) {
1142 tcp_mark_push(tp
, skb
);
1143 __tcp_push_pending_frames(sk
, mss_now
, TCP_NAGLE_PUSH
);
1144 } else if (skb
== tcp_send_head(sk
))
1145 tcp_push_one(sk
, mss_now
);
1149 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
1151 if (copied
&& likely(!tp
->repair
))
1152 tcp_push(sk
, flags
& ~MSG_MORE
, mss_now
, TCP_NAGLE_PUSH
);
1154 if ((err
= sk_stream_wait_memory(sk
, &timeo
)) != 0)
1157 mss_now
= tcp_send_mss(sk
, &size_goal
, flags
);
1162 if (copied
&& likely(!tp
->repair
))
1163 tcp_push(sk
, flags
, mss_now
, tp
->nonagle
);
1169 tcp_unlink_write_queue(skb
, sk
);
1170 /* It is the one place in all of TCP, except connection
1171 * reset, where we can be unlinking the send_head.
1173 tcp_check_send_head(sk
, skb
);
1174 sk_wmem_free_skb(sk
, skb
);
1181 err
= sk_stream_error(sk
, flags
, err
);
1185 EXPORT_SYMBOL(tcp_sendmsg
);
1188 * Handle reading urgent data. BSD has very simple semantics for
1189 * this, no blocking and very strange errors 8)
1192 static int tcp_recv_urg(struct sock
*sk
, struct msghdr
*msg
, int len
, int flags
)
1194 struct tcp_sock
*tp
= tcp_sk(sk
);
1196 /* No URG data to read. */
1197 if (sock_flag(sk
, SOCK_URGINLINE
) || !tp
->urg_data
||
1198 tp
->urg_data
== TCP_URG_READ
)
1199 return -EINVAL
; /* Yes this is right ! */
1201 if (sk
->sk_state
== TCP_CLOSE
&& !sock_flag(sk
, SOCK_DONE
))
1204 if (tp
->urg_data
& TCP_URG_VALID
) {
1206 char c
= tp
->urg_data
;
1208 if (!(flags
& MSG_PEEK
))
1209 tp
->urg_data
= TCP_URG_READ
;
1211 /* Read urgent data. */
1212 msg
->msg_flags
|= MSG_OOB
;
1215 if (!(flags
& MSG_TRUNC
))
1216 err
= memcpy_toiovec(msg
->msg_iov
, &c
, 1);
1219 msg
->msg_flags
|= MSG_TRUNC
;
1221 return err
? -EFAULT
: len
;
1224 if (sk
->sk_state
== TCP_CLOSE
|| (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1227 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1228 * the available implementations agree in this case:
1229 * this call should never block, independent of the
1230 * blocking state of the socket.
1231 * Mike <pall@rz.uni-karlsruhe.de>
1236 static int tcp_peek_sndq(struct sock
*sk
, struct msghdr
*msg
, int len
)
1238 struct sk_buff
*skb
;
1239 int copied
= 0, err
= 0;
1241 /* XXX -- need to support SO_PEEK_OFF */
1243 skb_queue_walk(&sk
->sk_write_queue
, skb
) {
1244 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, skb
->len
);
1251 return err
?: copied
;
1254 /* Clean up the receive buffer for full frames taken by the user,
1255 * then send an ACK if necessary. COPIED is the number of bytes
1256 * tcp_recvmsg has given to the user so far, it speeds up the
1257 * calculation of whether or not we must ACK for the sake of
1260 void tcp_cleanup_rbuf(struct sock
*sk
, int copied
)
1262 struct tcp_sock
*tp
= tcp_sk(sk
);
1263 int time_to_ack
= 0;
1265 struct sk_buff
*skb
= skb_peek(&sk
->sk_receive_queue
);
1267 WARN(skb
&& !before(tp
->copied_seq
, TCP_SKB_CB(skb
)->end_seq
),
1268 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1269 tp
->copied_seq
, TCP_SKB_CB(skb
)->end_seq
, tp
->rcv_nxt
);
1271 if (inet_csk_ack_scheduled(sk
)) {
1272 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
1273 /* Delayed ACKs frequently hit locked sockets during bulk
1275 if (icsk
->icsk_ack
.blocked
||
1276 /* Once-per-two-segments ACK was not sent by tcp_input.c */
1277 tp
->rcv_nxt
- tp
->rcv_wup
> icsk
->icsk_ack
.rcv_mss
||
1279 * If this read emptied read buffer, we send ACK, if
1280 * connection is not bidirectional, user drained
1281 * receive buffer and there was a small segment
1285 ((icsk
->icsk_ack
.pending
& ICSK_ACK_PUSHED2
) ||
1286 ((icsk
->icsk_ack
.pending
& ICSK_ACK_PUSHED
) &&
1287 !icsk
->icsk_ack
.pingpong
)) &&
1288 !atomic_read(&sk
->sk_rmem_alloc
)))
1292 /* We send an ACK if we can now advertise a non-zero window
1293 * which has been raised "significantly".
1295 * Even if window raised up to infinity, do not send window open ACK
1296 * in states, where we will not receive more. It is useless.
1298 if (copied
> 0 && !time_to_ack
&& !(sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
1299 __u32 rcv_window_now
= tcp_receive_window(tp
);
1301 /* Optimize, __tcp_select_window() is not cheap. */
1302 if (2*rcv_window_now
<= tp
->window_clamp
) {
1303 __u32 new_window
= __tcp_select_window(sk
);
1305 /* Send ACK now, if this read freed lots of space
1306 * in our buffer. Certainly, new_window is new window.
1307 * We can advertise it now, if it is not less than current one.
1308 * "Lots" means "at least twice" here.
1310 if (new_window
&& new_window
>= 2 * rcv_window_now
)
1318 static void tcp_prequeue_process(struct sock
*sk
)
1320 struct sk_buff
*skb
;
1321 struct tcp_sock
*tp
= tcp_sk(sk
);
1323 NET_INC_STATS_USER(sock_net(sk
), LINUX_MIB_TCPPREQUEUED
);
1325 /* RX process wants to run with disabled BHs, though it is not
1328 while ((skb
= __skb_dequeue(&tp
->ucopy
.prequeue
)) != NULL
)
1329 sk_backlog_rcv(sk
, skb
);
1332 /* Clear memory counter. */
1333 tp
->ucopy
.memory
= 0;
1336 #ifdef CONFIG_NET_DMA
1337 static void tcp_service_net_dma(struct sock
*sk
, bool wait
)
1339 dma_cookie_t done
, used
;
1340 dma_cookie_t last_issued
;
1341 struct tcp_sock
*tp
= tcp_sk(sk
);
1343 if (!tp
->ucopy
.dma_chan
)
1346 last_issued
= tp
->ucopy
.dma_cookie
;
1347 dma_async_memcpy_issue_pending(tp
->ucopy
.dma_chan
);
1350 if (dma_async_memcpy_complete(tp
->ucopy
.dma_chan
,
1352 &used
) == DMA_SUCCESS
) {
1353 /* Safe to free early-copied skbs now */
1354 __skb_queue_purge(&sk
->sk_async_wait_queue
);
1357 struct sk_buff
*skb
;
1358 while ((skb
= skb_peek(&sk
->sk_async_wait_queue
)) &&
1359 (dma_async_is_complete(skb
->dma_cookie
, done
,
1360 used
) == DMA_SUCCESS
)) {
1361 __skb_dequeue(&sk
->sk_async_wait_queue
);
1369 static inline struct sk_buff
*tcp_recv_skb(struct sock
*sk
, u32 seq
, u32
*off
)
1371 struct sk_buff
*skb
;
1374 skb_queue_walk(&sk
->sk_receive_queue
, skb
) {
1375 offset
= seq
- TCP_SKB_CB(skb
)->seq
;
1376 if (tcp_hdr(skb
)->syn
)
1378 if (offset
< skb
->len
|| tcp_hdr(skb
)->fin
) {
1387 * This routine provides an alternative to tcp_recvmsg() for routines
1388 * that would like to handle copying from skbuffs directly in 'sendfile'
1391 * - It is assumed that the socket was locked by the caller.
1392 * - The routine does not block.
1393 * - At present, there is no support for reading OOB data
1394 * or for 'peeking' the socket using this routine
1395 * (although both would be easy to implement).
1397 int tcp_read_sock(struct sock
*sk
, read_descriptor_t
*desc
,
1398 sk_read_actor_t recv_actor
)
1400 struct sk_buff
*skb
;
1401 struct tcp_sock
*tp
= tcp_sk(sk
);
1402 u32 seq
= tp
->copied_seq
;
1406 if (sk
->sk_state
== TCP_LISTEN
)
1408 while ((skb
= tcp_recv_skb(sk
, seq
, &offset
)) != NULL
) {
1409 if (offset
< skb
->len
) {
1413 len
= skb
->len
- offset
;
1414 /* Stop reading if we hit a patch of urgent data */
1416 u32 urg_offset
= tp
->urg_seq
- seq
;
1417 if (urg_offset
< len
)
1422 used
= recv_actor(desc
, skb
, offset
, len
);
1427 } else if (used
<= len
) {
1433 * If recv_actor drops the lock (e.g. TCP splice
1434 * receive) the skb pointer might be invalid when
1435 * getting here: tcp_collapse might have deleted it
1436 * while aggregating skbs from the socket queue.
1438 skb
= tcp_recv_skb(sk
, seq
-1, &offset
);
1439 if (!skb
|| (offset
+1 != skb
->len
))
1442 if (tcp_hdr(skb
)->fin
) {
1443 sk_eat_skb(sk
, skb
, 0);
1447 sk_eat_skb(sk
, skb
, 0);
1450 tp
->copied_seq
= seq
;
1452 tp
->copied_seq
= seq
;
1454 tcp_rcv_space_adjust(sk
);
1456 /* Clean up data we have read: This will do ACK frames. */
1458 tcp_cleanup_rbuf(sk
, copied
);
1461 EXPORT_SYMBOL(tcp_read_sock
);
1464 * This routine copies from a sock struct into the user buffer.
1466 * Technical note: in 2.3 we work on _locked_ socket, so that
1467 * tricks with *seq access order and skb->users are not required.
1468 * Probably, code can be easily improved even more.
1471 int tcp_recvmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
1472 size_t len
, int nonblock
, int flags
, int *addr_len
)
1474 struct tcp_sock
*tp
= tcp_sk(sk
);
1480 int target
; /* Read at least this many bytes */
1482 struct task_struct
*user_recv
= NULL
;
1483 int copied_early
= 0;
1484 struct sk_buff
*skb
;
1490 if (sk
->sk_state
== TCP_LISTEN
)
1493 timeo
= sock_rcvtimeo(sk
, nonblock
);
1495 /* Urgent data needs to be handled specially. */
1496 if (flags
& MSG_OOB
)
1499 if (unlikely(tp
->repair
)) {
1501 if (!(flags
& MSG_PEEK
))
1504 if (tp
->repair_queue
== TCP_SEND_QUEUE
)
1508 if (tp
->repair_queue
== TCP_NO_QUEUE
)
1511 /* 'common' recv queue MSG_PEEK-ing */
1514 seq
= &tp
->copied_seq
;
1515 if (flags
& MSG_PEEK
) {
1516 peek_seq
= tp
->copied_seq
;
1520 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, len
);
1522 #ifdef CONFIG_NET_DMA
1523 tp
->ucopy
.dma_chan
= NULL
;
1525 skb
= skb_peek_tail(&sk
->sk_receive_queue
);
1530 available
= TCP_SKB_CB(skb
)->seq
+ skb
->len
- (*seq
);
1531 if ((available
< target
) &&
1532 (len
> sysctl_tcp_dma_copybreak
) && !(flags
& MSG_PEEK
) &&
1533 !sysctl_tcp_low_latency
&&
1534 net_dma_find_channel()) {
1535 preempt_enable_no_resched();
1536 tp
->ucopy
.pinned_list
=
1537 dma_pin_iovec_pages(msg
->msg_iov
, len
);
1539 preempt_enable_no_resched();
1547 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1548 if (tp
->urg_data
&& tp
->urg_seq
== *seq
) {
1551 if (signal_pending(current
)) {
1552 copied
= timeo
? sock_intr_errno(timeo
) : -EAGAIN
;
1557 /* Next get a buffer. */
1559 skb_queue_walk(&sk
->sk_receive_queue
, skb
) {
1560 /* Now that we have two receive queues this
1563 if (WARN(before(*seq
, TCP_SKB_CB(skb
)->seq
),
1564 "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
1565 *seq
, TCP_SKB_CB(skb
)->seq
, tp
->rcv_nxt
,
1569 offset
= *seq
- TCP_SKB_CB(skb
)->seq
;
1570 if (tcp_hdr(skb
)->syn
)
1572 if (offset
< skb
->len
)
1574 if (tcp_hdr(skb
)->fin
)
1576 WARN(!(flags
& MSG_PEEK
),
1577 "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
1578 *seq
, TCP_SKB_CB(skb
)->seq
, tp
->rcv_nxt
, flags
);
1581 /* Well, if we have backlog, try to process it now yet. */
1583 if (copied
>= target
&& !sk
->sk_backlog
.tail
)
1588 sk
->sk_state
== TCP_CLOSE
||
1589 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
1591 signal_pending(current
))
1594 if (sock_flag(sk
, SOCK_DONE
))
1598 copied
= sock_error(sk
);
1602 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1605 if (sk
->sk_state
== TCP_CLOSE
) {
1606 if (!sock_flag(sk
, SOCK_DONE
)) {
1607 /* This occurs when user tries to read
1608 * from never connected socket.
1621 if (signal_pending(current
)) {
1622 copied
= sock_intr_errno(timeo
);
1627 tcp_cleanup_rbuf(sk
, copied
);
1629 if (!sysctl_tcp_low_latency
&& tp
->ucopy
.task
== user_recv
) {
1630 /* Install new reader */
1631 if (!user_recv
&& !(flags
& (MSG_TRUNC
| MSG_PEEK
))) {
1632 user_recv
= current
;
1633 tp
->ucopy
.task
= user_recv
;
1634 tp
->ucopy
.iov
= msg
->msg_iov
;
1637 tp
->ucopy
.len
= len
;
1639 WARN_ON(tp
->copied_seq
!= tp
->rcv_nxt
&&
1640 !(flags
& (MSG_PEEK
| MSG_TRUNC
)));
1642 /* Ugly... If prequeue is not empty, we have to
1643 * process it before releasing socket, otherwise
1644 * order will be broken at second iteration.
1645 * More elegant solution is required!!!
1647 * Look: we have the following (pseudo)queues:
1649 * 1. packets in flight
1654 * Each queue can be processed only if the next ones
1655 * are empty. At this point we have empty receive_queue.
1656 * But prequeue _can_ be not empty after 2nd iteration,
1657 * when we jumped to start of loop because backlog
1658 * processing added something to receive_queue.
1659 * We cannot release_sock(), because backlog contains
1660 * packets arrived _after_ prequeued ones.
1662 * Shortly, algorithm is clear --- to process all
1663 * the queues in order. We could make it more directly,
1664 * requeueing packets from backlog to prequeue, if
1665 * is not empty. It is more elegant, but eats cycles,
1668 if (!skb_queue_empty(&tp
->ucopy
.prequeue
))
1671 /* __ Set realtime policy in scheduler __ */
1674 #ifdef CONFIG_NET_DMA
1675 if (tp
->ucopy
.dma_chan
)
1676 dma_async_memcpy_issue_pending(tp
->ucopy
.dma_chan
);
1678 if (copied
>= target
) {
1679 /* Do not sleep, just process backlog. */
1683 sk_wait_data(sk
, &timeo
);
1685 #ifdef CONFIG_NET_DMA
1686 tcp_service_net_dma(sk
, false); /* Don't block */
1687 tp
->ucopy
.wakeup
= 0;
1693 /* __ Restore normal policy in scheduler __ */
1695 if ((chunk
= len
- tp
->ucopy
.len
) != 0) {
1696 NET_ADD_STATS_USER(sock_net(sk
), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG
, chunk
);
1701 if (tp
->rcv_nxt
== tp
->copied_seq
&&
1702 !skb_queue_empty(&tp
->ucopy
.prequeue
)) {
1704 tcp_prequeue_process(sk
);
1706 if ((chunk
= len
- tp
->ucopy
.len
) != 0) {
1707 NET_ADD_STATS_USER(sock_net(sk
), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE
, chunk
);
1713 if ((flags
& MSG_PEEK
) &&
1714 (peek_seq
- copied
- urg_hole
!= tp
->copied_seq
)) {
1715 if (net_ratelimit())
1716 printk(KERN_DEBUG
"TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1717 current
->comm
, task_pid_nr(current
));
1718 peek_seq
= tp
->copied_seq
;
1723 /* Ok so how much can we use? */
1724 used
= skb
->len
- offset
;
1728 /* Do we have urgent data here? */
1730 u32 urg_offset
= tp
->urg_seq
- *seq
;
1731 if (urg_offset
< used
) {
1733 if (!sock_flag(sk
, SOCK_URGINLINE
)) {
1746 if (!(flags
& MSG_TRUNC
)) {
1747 #ifdef CONFIG_NET_DMA
1748 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1749 tp
->ucopy
.dma_chan
= net_dma_find_channel();
1751 if (tp
->ucopy
.dma_chan
) {
1752 tp
->ucopy
.dma_cookie
= dma_skb_copy_datagram_iovec(
1753 tp
->ucopy
.dma_chan
, skb
, offset
,
1755 tp
->ucopy
.pinned_list
);
1757 if (tp
->ucopy
.dma_cookie
< 0) {
1759 pr_alert("%s: dma_cookie < 0\n",
1762 /* Exception. Bailout! */
1768 dma_async_memcpy_issue_pending(tp
->ucopy
.dma_chan
);
1770 if ((offset
+ used
) == skb
->len
)
1776 err
= skb_copy_datagram_iovec(skb
, offset
,
1777 msg
->msg_iov
, used
);
1779 /* Exception. Bailout! */
1791 tcp_rcv_space_adjust(sk
);
1794 if (tp
->urg_data
&& after(tp
->copied_seq
, tp
->urg_seq
)) {
1796 tcp_fast_path_check(sk
);
1798 if (used
+ offset
< skb
->len
)
1801 if (tcp_hdr(skb
)->fin
)
1803 if (!(flags
& MSG_PEEK
)) {
1804 sk_eat_skb(sk
, skb
, copied_early
);
1810 /* Process the FIN. */
1812 if (!(flags
& MSG_PEEK
)) {
1813 sk_eat_skb(sk
, skb
, copied_early
);
1820 if (!skb_queue_empty(&tp
->ucopy
.prequeue
)) {
1823 tp
->ucopy
.len
= copied
> 0 ? len
: 0;
1825 tcp_prequeue_process(sk
);
1827 if (copied
> 0 && (chunk
= len
- tp
->ucopy
.len
) != 0) {
1828 NET_ADD_STATS_USER(sock_net(sk
), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE
, chunk
);
1834 tp
->ucopy
.task
= NULL
;
1838 #ifdef CONFIG_NET_DMA
1839 tcp_service_net_dma(sk
, true); /* Wait for queue to drain */
1840 tp
->ucopy
.dma_chan
= NULL
;
1842 if (tp
->ucopy
.pinned_list
) {
1843 dma_unpin_iovec_pages(tp
->ucopy
.pinned_list
);
1844 tp
->ucopy
.pinned_list
= NULL
;
1848 /* According to UNIX98, msg_name/msg_namelen are ignored
1849 * on connected socket. I was just happy when found this 8) --ANK
1852 /* Clean up data we have read: This will do ACK frames. */
1853 tcp_cleanup_rbuf(sk
, copied
);
1863 err
= tcp_recv_urg(sk
, msg
, len
, flags
);
1867 err
= tcp_peek_sndq(sk
, msg
, len
);
1870 EXPORT_SYMBOL(tcp_recvmsg
);
1872 void tcp_set_state(struct sock
*sk
, int state
)
1874 int oldstate
= sk
->sk_state
;
1877 case TCP_ESTABLISHED
:
1878 if (oldstate
!= TCP_ESTABLISHED
)
1879 TCP_INC_STATS(sock_net(sk
), TCP_MIB_CURRESTAB
);
1883 if (oldstate
== TCP_CLOSE_WAIT
|| oldstate
== TCP_ESTABLISHED
)
1884 TCP_INC_STATS(sock_net(sk
), TCP_MIB_ESTABRESETS
);
1886 sk
->sk_prot
->unhash(sk
);
1887 if (inet_csk(sk
)->icsk_bind_hash
&&
1888 !(sk
->sk_userlocks
& SOCK_BINDPORT_LOCK
))
1892 if (oldstate
== TCP_ESTABLISHED
)
1893 TCP_DEC_STATS(sock_net(sk
), TCP_MIB_CURRESTAB
);
1896 /* Change state AFTER socket is unhashed to avoid closed
1897 * socket sitting in hash tables.
1899 sk
->sk_state
= state
;
1902 SOCK_DEBUG(sk
, "TCP sk=%p, State %s -> %s\n", sk
, statename
[oldstate
], statename
[state
]);
1905 EXPORT_SYMBOL_GPL(tcp_set_state
);
1908 * State processing on a close. This implements the state shift for
1909 * sending our FIN frame. Note that we only send a FIN for some
1910 * states. A shutdown() may have already sent the FIN, or we may be
1914 static const unsigned char new_state
[16] = {
1915 /* current state: new state: action: */
1916 /* (Invalid) */ TCP_CLOSE
,
1917 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1
| TCP_ACTION_FIN
,
1918 /* TCP_SYN_SENT */ TCP_CLOSE
,
1919 /* TCP_SYN_RECV */ TCP_FIN_WAIT1
| TCP_ACTION_FIN
,
1920 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1
,
1921 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2
,
1922 /* TCP_TIME_WAIT */ TCP_CLOSE
,
1923 /* TCP_CLOSE */ TCP_CLOSE
,
1924 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK
| TCP_ACTION_FIN
,
1925 /* TCP_LAST_ACK */ TCP_LAST_ACK
,
1926 /* TCP_LISTEN */ TCP_CLOSE
,
1927 /* TCP_CLOSING */ TCP_CLOSING
,
1930 static int tcp_close_state(struct sock
*sk
)
1932 int next
= (int)new_state
[sk
->sk_state
];
1933 int ns
= next
& TCP_STATE_MASK
;
1935 tcp_set_state(sk
, ns
);
1937 return next
& TCP_ACTION_FIN
;
1941 * Shutdown the sending side of a connection. Much like close except
1942 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
1945 void tcp_shutdown(struct sock
*sk
, int how
)
1947 /* We need to grab some memory, and put together a FIN,
1948 * and then put it into the queue to be sent.
1949 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1951 if (!(how
& SEND_SHUTDOWN
))
1954 /* If we've already sent a FIN, or it's a closed state, skip this. */
1955 if ((1 << sk
->sk_state
) &
1956 (TCPF_ESTABLISHED
| TCPF_SYN_SENT
|
1957 TCPF_SYN_RECV
| TCPF_CLOSE_WAIT
)) {
1958 /* Clear out any half completed packets. FIN if needed. */
1959 if (tcp_close_state(sk
))
1963 EXPORT_SYMBOL(tcp_shutdown
);
1965 bool tcp_check_oom(struct sock
*sk
, int shift
)
1967 bool too_many_orphans
, out_of_socket_memory
;
1969 too_many_orphans
= tcp_too_many_orphans(sk
, shift
);
1970 out_of_socket_memory
= tcp_out_of_memory(sk
);
1972 if (too_many_orphans
&& net_ratelimit())
1973 pr_info("too many orphaned sockets\n");
1974 if (out_of_socket_memory
&& net_ratelimit())
1975 pr_info("out of memory -- consider tuning tcp_mem\n");
1976 return too_many_orphans
|| out_of_socket_memory
;
1979 void tcp_close(struct sock
*sk
, long timeout
)
1981 struct sk_buff
*skb
;
1982 int data_was_unread
= 0;
1986 sk
->sk_shutdown
= SHUTDOWN_MASK
;
1988 if (sk
->sk_state
== TCP_LISTEN
) {
1989 tcp_set_state(sk
, TCP_CLOSE
);
1992 inet_csk_listen_stop(sk
);
1994 goto adjudge_to_death
;
1997 /* We need to flush the recv. buffs. We do this only on the
1998 * descriptor close, not protocol-sourced closes, because the
1999 * reader process may not have drained the data yet!
2001 while ((skb
= __skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
2002 u32 len
= TCP_SKB_CB(skb
)->end_seq
- TCP_SKB_CB(skb
)->seq
-
2004 data_was_unread
+= len
;
2010 /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
2011 if (sk
->sk_state
== TCP_CLOSE
)
2012 goto adjudge_to_death
;
2014 /* As outlined in RFC 2525, section 2.17, we send a RST here because
2015 * data was lost. To witness the awful effects of the old behavior of
2016 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
2017 * GET in an FTP client, suspend the process, wait for the client to
2018 * advertise a zero window, then kill -9 the FTP client, wheee...
2019 * Note: timeout is always zero in such a case.
2021 if (unlikely(tcp_sk(sk
)->repair
)) {
2022 sk
->sk_prot
->disconnect(sk
, 0);
2023 } else if (data_was_unread
) {
2024 /* Unread data was tossed, zap the connection. */
2025 NET_INC_STATS_USER(sock_net(sk
), LINUX_MIB_TCPABORTONCLOSE
);
2026 tcp_set_state(sk
, TCP_CLOSE
);
2027 tcp_send_active_reset(sk
, sk
->sk_allocation
);
2028 } else if (sock_flag(sk
, SOCK_LINGER
) && !sk
->sk_lingertime
) {
2029 /* Check zero linger _after_ checking for unread data. */
2030 sk
->sk_prot
->disconnect(sk
, 0);
2031 NET_INC_STATS_USER(sock_net(sk
), LINUX_MIB_TCPABORTONDATA
);
2032 } else if (tcp_close_state(sk
)) {
2033 /* We FIN if the application ate all the data before
2034 * zapping the connection.
2037 /* RED-PEN. Formally speaking, we have broken TCP state
2038 * machine. State transitions:
2040 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
2041 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
2042 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
2044 * are legal only when FIN has been sent (i.e. in window),
2045 * rather than queued out of window. Purists blame.
2047 * F.e. "RFC state" is ESTABLISHED,
2048 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
2050 * The visible declinations are that sometimes
2051 * we enter time-wait state, when it is not required really
2052 * (harmless), do not send active resets, when they are
2053 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
2054 * they look as CLOSING or LAST_ACK for Linux)
2055 * Probably, I missed some more holelets.
2061 sk_stream_wait_close(sk
, timeout
);
2064 state
= sk
->sk_state
;
2068 /* It is the last release_sock in its life. It will remove backlog. */
2072 /* Now socket is owned by kernel and we acquire BH lock
2073 to finish close. No need to check for user refs.
2077 WARN_ON(sock_owned_by_user(sk
));
2079 percpu_counter_inc(sk
->sk_prot
->orphan_count
);
2081 /* Have we already been destroyed by a softirq or backlog? */
2082 if (state
!= TCP_CLOSE
&& sk
->sk_state
== TCP_CLOSE
)
2085 /* This is a (useful) BSD violating of the RFC. There is a
2086 * problem with TCP as specified in that the other end could
2087 * keep a socket open forever with no application left this end.
2088 * We use a 3 minute timeout (about the same as BSD) then kill
2089 * our end. If they send after that then tough - BUT: long enough
2090 * that we won't make the old 4*rto = almost no time - whoops
2093 * Nope, it was not mistake. It is really desired behaviour
2094 * f.e. on http servers, when such sockets are useless, but
2095 * consume significant resources. Let's do it with special
2096 * linger2 option. --ANK
2099 if (sk
->sk_state
== TCP_FIN_WAIT2
) {
2100 struct tcp_sock
*tp
= tcp_sk(sk
);
2101 if (tp
->linger2
< 0) {
2102 tcp_set_state(sk
, TCP_CLOSE
);
2103 tcp_send_active_reset(sk
, GFP_ATOMIC
);
2104 NET_INC_STATS_BH(sock_net(sk
),
2105 LINUX_MIB_TCPABORTONLINGER
);
2107 const int tmo
= tcp_fin_time(sk
);
2109 if (tmo
> TCP_TIMEWAIT_LEN
) {
2110 inet_csk_reset_keepalive_timer(sk
,
2111 tmo
- TCP_TIMEWAIT_LEN
);
2113 tcp_time_wait(sk
, TCP_FIN_WAIT2
, tmo
);
2118 if (sk
->sk_state
!= TCP_CLOSE
) {
2120 if (tcp_check_oom(sk
, 0)) {
2121 tcp_set_state(sk
, TCP_CLOSE
);
2122 tcp_send_active_reset(sk
, GFP_ATOMIC
);
2123 NET_INC_STATS_BH(sock_net(sk
),
2124 LINUX_MIB_TCPABORTONMEMORY
);
2128 if (sk
->sk_state
== TCP_CLOSE
)
2129 inet_csk_destroy_sock(sk
);
2130 /* Otherwise, socket is reprieved until protocol close. */
2137 EXPORT_SYMBOL(tcp_close
);
2139 /* These states need RST on ABORT according to RFC793 */
2141 static inline int tcp_need_reset(int state
)
2143 return (1 << state
) &
2144 (TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
| TCPF_FIN_WAIT1
|
2145 TCPF_FIN_WAIT2
| TCPF_SYN_RECV
);
2148 int tcp_disconnect(struct sock
*sk
, int flags
)
2150 struct inet_sock
*inet
= inet_sk(sk
);
2151 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2152 struct tcp_sock
*tp
= tcp_sk(sk
);
2154 int old_state
= sk
->sk_state
;
2156 if (old_state
!= TCP_CLOSE
)
2157 tcp_set_state(sk
, TCP_CLOSE
);
2159 /* ABORT function of RFC793 */
2160 if (old_state
== TCP_LISTEN
) {
2161 inet_csk_listen_stop(sk
);
2162 } else if (unlikely(tp
->repair
)) {
2163 sk
->sk_err
= ECONNABORTED
;
2164 } else if (tcp_need_reset(old_state
) ||
2165 (tp
->snd_nxt
!= tp
->write_seq
&&
2166 (1 << old_state
) & (TCPF_CLOSING
| TCPF_LAST_ACK
))) {
2167 /* The last check adjusts for discrepancy of Linux wrt. RFC
2170 tcp_send_active_reset(sk
, gfp_any());
2171 sk
->sk_err
= ECONNRESET
;
2172 } else if (old_state
== TCP_SYN_SENT
)
2173 sk
->sk_err
= ECONNRESET
;
2175 tcp_clear_xmit_timers(sk
);
2176 __skb_queue_purge(&sk
->sk_receive_queue
);
2177 tcp_write_queue_purge(sk
);
2178 __skb_queue_purge(&tp
->out_of_order_queue
);
2179 #ifdef CONFIG_NET_DMA
2180 __skb_queue_purge(&sk
->sk_async_wait_queue
);
2183 inet
->inet_dport
= 0;
2185 if (!(sk
->sk_userlocks
& SOCK_BINDADDR_LOCK
))
2186 inet_reset_saddr(sk
);
2188 sk
->sk_shutdown
= 0;
2189 sock_reset_flag(sk
, SOCK_DONE
);
2191 if ((tp
->write_seq
+= tp
->max_window
+ 2) == 0)
2193 icsk
->icsk_backoff
= 0;
2195 icsk
->icsk_probes_out
= 0;
2196 tp
->packets_out
= 0;
2197 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
2198 tp
->snd_cwnd_cnt
= 0;
2199 tp
->bytes_acked
= 0;
2200 tp
->window_clamp
= 0;
2201 tcp_set_ca_state(sk
, TCP_CA_Open
);
2202 tcp_clear_retrans(tp
);
2203 inet_csk_delack_init(sk
);
2204 tcp_init_send_head(sk
);
2205 memset(&tp
->rx_opt
, 0, sizeof(tp
->rx_opt
));
2208 WARN_ON(inet
->inet_num
&& !icsk
->icsk_bind_hash
);
2210 sk
->sk_error_report(sk
);
2213 EXPORT_SYMBOL(tcp_disconnect
);
2215 static inline int tcp_can_repair_sock(struct sock
*sk
)
2217 return capable(CAP_NET_ADMIN
) &&
2218 ((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_ESTABLISHED
));
2222 * Socket option code for TCP.
2224 static int do_tcp_setsockopt(struct sock
*sk
, int level
,
2225 int optname
, char __user
*optval
, unsigned int optlen
)
2227 struct tcp_sock
*tp
= tcp_sk(sk
);
2228 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2232 /* These are data/string values, all the others are ints */
2234 case TCP_CONGESTION
: {
2235 char name
[TCP_CA_NAME_MAX
];
2240 val
= strncpy_from_user(name
, optval
,
2241 min_t(long, TCP_CA_NAME_MAX
-1, optlen
));
2247 err
= tcp_set_congestion_control(sk
, name
);
2251 case TCP_COOKIE_TRANSACTIONS
: {
2252 struct tcp_cookie_transactions ctd
;
2253 struct tcp_cookie_values
*cvp
= NULL
;
2255 if (sizeof(ctd
) > optlen
)
2257 if (copy_from_user(&ctd
, optval
, sizeof(ctd
)))
2260 if (ctd
.tcpct_used
> sizeof(ctd
.tcpct_value
) ||
2261 ctd
.tcpct_s_data_desired
> TCP_MSS_DESIRED
)
2264 if (ctd
.tcpct_cookie_desired
== 0) {
2265 /* default to global value */
2266 } else if ((0x1 & ctd
.tcpct_cookie_desired
) ||
2267 ctd
.tcpct_cookie_desired
> TCP_COOKIE_MAX
||
2268 ctd
.tcpct_cookie_desired
< TCP_COOKIE_MIN
) {
2272 if (TCP_COOKIE_OUT_NEVER
& ctd
.tcpct_flags
) {
2273 /* Supercedes all other values */
2275 if (tp
->cookie_values
!= NULL
) {
2276 kref_put(&tp
->cookie_values
->kref
,
2277 tcp_cookie_values_release
);
2278 tp
->cookie_values
= NULL
;
2280 tp
->rx_opt
.cookie_in_always
= 0; /* false */
2281 tp
->rx_opt
.cookie_out_never
= 1; /* true */
2286 /* Allocate ancillary memory before locking.
2288 if (ctd
.tcpct_used
> 0 ||
2289 (tp
->cookie_values
== NULL
&&
2290 (sysctl_tcp_cookie_size
> 0 ||
2291 ctd
.tcpct_cookie_desired
> 0 ||
2292 ctd
.tcpct_s_data_desired
> 0))) {
2293 cvp
= kzalloc(sizeof(*cvp
) + ctd
.tcpct_used
,
2298 kref_init(&cvp
->kref
);
2301 tp
->rx_opt
.cookie_in_always
=
2302 (TCP_COOKIE_IN_ALWAYS
& ctd
.tcpct_flags
);
2303 tp
->rx_opt
.cookie_out_never
= 0; /* false */
2305 if (tp
->cookie_values
!= NULL
) {
2307 /* Changed values are recorded by a changed
2308 * pointer, ensuring the cookie will differ,
2309 * without separately hashing each value later.
2311 kref_put(&tp
->cookie_values
->kref
,
2312 tcp_cookie_values_release
);
2314 cvp
= tp
->cookie_values
;
2319 cvp
->cookie_desired
= ctd
.tcpct_cookie_desired
;
2321 if (ctd
.tcpct_used
> 0) {
2322 memcpy(cvp
->s_data_payload
, ctd
.tcpct_value
,
2324 cvp
->s_data_desired
= ctd
.tcpct_used
;
2325 cvp
->s_data_constant
= 1; /* true */
2327 /* No constant payload data. */
2328 cvp
->s_data_desired
= ctd
.tcpct_s_data_desired
;
2329 cvp
->s_data_constant
= 0; /* false */
2332 tp
->cookie_values
= cvp
;
2342 if (optlen
< sizeof(int))
2345 if (get_user(val
, (int __user
*)optval
))
2352 /* Values greater than interface MTU won't take effect. However
2353 * at the point when this call is done we typically don't yet
2354 * know which interface is going to be used */
2355 if (val
< TCP_MIN_MSS
|| val
> MAX_TCP_WINDOW
) {
2359 tp
->rx_opt
.user_mss
= val
;
2364 /* TCP_NODELAY is weaker than TCP_CORK, so that
2365 * this option on corked socket is remembered, but
2366 * it is not activated until cork is cleared.
2368 * However, when TCP_NODELAY is set we make
2369 * an explicit push, which overrides even TCP_CORK
2370 * for currently queued segments.
2372 tp
->nonagle
|= TCP_NAGLE_OFF
|TCP_NAGLE_PUSH
;
2373 tcp_push_pending_frames(sk
);
2375 tp
->nonagle
&= ~TCP_NAGLE_OFF
;
2379 case TCP_THIN_LINEAR_TIMEOUTS
:
2380 if (val
< 0 || val
> 1)
2386 case TCP_THIN_DUPACK
:
2387 if (val
< 0 || val
> 1)
2390 tp
->thin_dupack
= val
;
2394 if (!tcp_can_repair_sock(sk
))
2396 else if (val
== 1) {
2398 sk
->sk_reuse
= SK_FORCE_REUSE
;
2399 tp
->repair_queue
= TCP_NO_QUEUE
;
2400 } else if (val
== 0) {
2402 sk
->sk_reuse
= SK_NO_REUSE
;
2403 tcp_send_window_probe(sk
);
2409 case TCP_REPAIR_QUEUE
:
2412 else if (val
< TCP_QUEUES_NR
)
2413 tp
->repair_queue
= val
;
2419 if (sk
->sk_state
!= TCP_CLOSE
)
2421 else if (tp
->repair_queue
== TCP_SEND_QUEUE
)
2422 tp
->write_seq
= val
;
2423 else if (tp
->repair_queue
== TCP_RECV_QUEUE
)
2430 /* When set indicates to always queue non-full frames.
2431 * Later the user clears this option and we transmit
2432 * any pending partial frames in the queue. This is
2433 * meant to be used alongside sendfile() to get properly
2434 * filled frames when the user (for example) must write
2435 * out headers with a write() call first and then use
2436 * sendfile to send out the data parts.
2438 * TCP_CORK can be set together with TCP_NODELAY and it is
2439 * stronger than TCP_NODELAY.
2442 tp
->nonagle
|= TCP_NAGLE_CORK
;
2444 tp
->nonagle
&= ~TCP_NAGLE_CORK
;
2445 if (tp
->nonagle
&TCP_NAGLE_OFF
)
2446 tp
->nonagle
|= TCP_NAGLE_PUSH
;
2447 tcp_push_pending_frames(sk
);
2452 if (val
< 1 || val
> MAX_TCP_KEEPIDLE
)
2455 tp
->keepalive_time
= val
* HZ
;
2456 if (sock_flag(sk
, SOCK_KEEPOPEN
) &&
2457 !((1 << sk
->sk_state
) &
2458 (TCPF_CLOSE
| TCPF_LISTEN
))) {
2459 u32 elapsed
= keepalive_time_elapsed(tp
);
2460 if (tp
->keepalive_time
> elapsed
)
2461 elapsed
= tp
->keepalive_time
- elapsed
;
2464 inet_csk_reset_keepalive_timer(sk
, elapsed
);
2469 if (val
< 1 || val
> MAX_TCP_KEEPINTVL
)
2472 tp
->keepalive_intvl
= val
* HZ
;
2475 if (val
< 1 || val
> MAX_TCP_KEEPCNT
)
2478 tp
->keepalive_probes
= val
;
2481 if (val
< 1 || val
> MAX_TCP_SYNCNT
)
2484 icsk
->icsk_syn_retries
= val
;
2490 else if (val
> sysctl_tcp_fin_timeout
/ HZ
)
2493 tp
->linger2
= val
* HZ
;
2496 case TCP_DEFER_ACCEPT
:
2497 /* Translate value in seconds to number of retransmits */
2498 icsk
->icsk_accept_queue
.rskq_defer_accept
=
2499 secs_to_retrans(val
, TCP_TIMEOUT_INIT
/ HZ
,
2503 case TCP_WINDOW_CLAMP
:
2505 if (sk
->sk_state
!= TCP_CLOSE
) {
2509 tp
->window_clamp
= 0;
2511 tp
->window_clamp
= val
< SOCK_MIN_RCVBUF
/ 2 ?
2512 SOCK_MIN_RCVBUF
/ 2 : val
;
2517 icsk
->icsk_ack
.pingpong
= 1;
2519 icsk
->icsk_ack
.pingpong
= 0;
2520 if ((1 << sk
->sk_state
) &
2521 (TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
) &&
2522 inet_csk_ack_scheduled(sk
)) {
2523 icsk
->icsk_ack
.pending
|= ICSK_ACK_PUSHED
;
2524 tcp_cleanup_rbuf(sk
, 1);
2526 icsk
->icsk_ack
.pingpong
= 1;
2531 #ifdef CONFIG_TCP_MD5SIG
2533 /* Read the IP->Key mappings from userspace */
2534 err
= tp
->af_specific
->md5_parse(sk
, optval
, optlen
);
2537 case TCP_USER_TIMEOUT
:
2538 /* Cap the max timeout in ms TCP will retry/retrans
2539 * before giving up and aborting (ETIMEDOUT) a connection.
2541 icsk
->icsk_user_timeout
= msecs_to_jiffies(val
);
2552 int tcp_setsockopt(struct sock
*sk
, int level
, int optname
, char __user
*optval
,
2553 unsigned int optlen
)
2555 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
2557 if (level
!= SOL_TCP
)
2558 return icsk
->icsk_af_ops
->setsockopt(sk
, level
, optname
,
2560 return do_tcp_setsockopt(sk
, level
, optname
, optval
, optlen
);
2562 EXPORT_SYMBOL(tcp_setsockopt
);
2564 #ifdef CONFIG_COMPAT
2565 int compat_tcp_setsockopt(struct sock
*sk
, int level
, int optname
,
2566 char __user
*optval
, unsigned int optlen
)
2568 if (level
!= SOL_TCP
)
2569 return inet_csk_compat_setsockopt(sk
, level
, optname
,
2571 return do_tcp_setsockopt(sk
, level
, optname
, optval
, optlen
);
2573 EXPORT_SYMBOL(compat_tcp_setsockopt
);
2576 /* Return information about state of tcp endpoint in API format. */
2577 void tcp_get_info(const struct sock
*sk
, struct tcp_info
*info
)
2579 const struct tcp_sock
*tp
= tcp_sk(sk
);
2580 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
2581 u32 now
= tcp_time_stamp
;
2583 memset(info
, 0, sizeof(*info
));
2585 info
->tcpi_state
= sk
->sk_state
;
2586 info
->tcpi_ca_state
= icsk
->icsk_ca_state
;
2587 info
->tcpi_retransmits
= icsk
->icsk_retransmits
;
2588 info
->tcpi_probes
= icsk
->icsk_probes_out
;
2589 info
->tcpi_backoff
= icsk
->icsk_backoff
;
2591 if (tp
->rx_opt
.tstamp_ok
)
2592 info
->tcpi_options
|= TCPI_OPT_TIMESTAMPS
;
2593 if (tcp_is_sack(tp
))
2594 info
->tcpi_options
|= TCPI_OPT_SACK
;
2595 if (tp
->rx_opt
.wscale_ok
) {
2596 info
->tcpi_options
|= TCPI_OPT_WSCALE
;
2597 info
->tcpi_snd_wscale
= tp
->rx_opt
.snd_wscale
;
2598 info
->tcpi_rcv_wscale
= tp
->rx_opt
.rcv_wscale
;
2601 if (tp
->ecn_flags
& TCP_ECN_OK
)
2602 info
->tcpi_options
|= TCPI_OPT_ECN
;
2603 if (tp
->ecn_flags
& TCP_ECN_SEEN
)
2604 info
->tcpi_options
|= TCPI_OPT_ECN_SEEN
;
2606 info
->tcpi_rto
= jiffies_to_usecs(icsk
->icsk_rto
);
2607 info
->tcpi_ato
= jiffies_to_usecs(icsk
->icsk_ack
.ato
);
2608 info
->tcpi_snd_mss
= tp
->mss_cache
;
2609 info
->tcpi_rcv_mss
= icsk
->icsk_ack
.rcv_mss
;
2611 if (sk
->sk_state
== TCP_LISTEN
) {
2612 info
->tcpi_unacked
= sk
->sk_ack_backlog
;
2613 info
->tcpi_sacked
= sk
->sk_max_ack_backlog
;
2615 info
->tcpi_unacked
= tp
->packets_out
;
2616 info
->tcpi_sacked
= tp
->sacked_out
;
2618 info
->tcpi_lost
= tp
->lost_out
;
2619 info
->tcpi_retrans
= tp
->retrans_out
;
2620 info
->tcpi_fackets
= tp
->fackets_out
;
2622 info
->tcpi_last_data_sent
= jiffies_to_msecs(now
- tp
->lsndtime
);
2623 info
->tcpi_last_data_recv
= jiffies_to_msecs(now
- icsk
->icsk_ack
.lrcvtime
);
2624 info
->tcpi_last_ack_recv
= jiffies_to_msecs(now
- tp
->rcv_tstamp
);
2626 info
->tcpi_pmtu
= icsk
->icsk_pmtu_cookie
;
2627 info
->tcpi_rcv_ssthresh
= tp
->rcv_ssthresh
;
2628 info
->tcpi_rtt
= jiffies_to_usecs(tp
->srtt
)>>3;
2629 info
->tcpi_rttvar
= jiffies_to_usecs(tp
->mdev
)>>2;
2630 info
->tcpi_snd_ssthresh
= tp
->snd_ssthresh
;
2631 info
->tcpi_snd_cwnd
= tp
->snd_cwnd
;
2632 info
->tcpi_advmss
= tp
->advmss
;
2633 info
->tcpi_reordering
= tp
->reordering
;
2635 info
->tcpi_rcv_rtt
= jiffies_to_usecs(tp
->rcv_rtt_est
.rtt
)>>3;
2636 info
->tcpi_rcv_space
= tp
->rcvq_space
.space
;
2638 info
->tcpi_total_retrans
= tp
->total_retrans
;
2640 EXPORT_SYMBOL_GPL(tcp_get_info
);
2642 static int do_tcp_getsockopt(struct sock
*sk
, int level
,
2643 int optname
, char __user
*optval
, int __user
*optlen
)
2645 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2646 struct tcp_sock
*tp
= tcp_sk(sk
);
2649 if (get_user(len
, optlen
))
2652 len
= min_t(unsigned int, len
, sizeof(int));
2659 val
= tp
->mss_cache
;
2660 if (!val
&& ((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
)))
2661 val
= tp
->rx_opt
.user_mss
;
2663 val
= tp
->rx_opt
.mss_clamp
;
2666 val
= !!(tp
->nonagle
&TCP_NAGLE_OFF
);
2669 val
= !!(tp
->nonagle
&TCP_NAGLE_CORK
);
2672 val
= keepalive_time_when(tp
) / HZ
;
2675 val
= keepalive_intvl_when(tp
) / HZ
;
2678 val
= keepalive_probes(tp
);
2681 val
= icsk
->icsk_syn_retries
? : sysctl_tcp_syn_retries
;
2686 val
= (val
? : sysctl_tcp_fin_timeout
) / HZ
;
2688 case TCP_DEFER_ACCEPT
:
2689 val
= retrans_to_secs(icsk
->icsk_accept_queue
.rskq_defer_accept
,
2690 TCP_TIMEOUT_INIT
/ HZ
, TCP_RTO_MAX
/ HZ
);
2692 case TCP_WINDOW_CLAMP
:
2693 val
= tp
->window_clamp
;
2696 struct tcp_info info
;
2698 if (get_user(len
, optlen
))
2701 tcp_get_info(sk
, &info
);
2703 len
= min_t(unsigned int, len
, sizeof(info
));
2704 if (put_user(len
, optlen
))
2706 if (copy_to_user(optval
, &info
, len
))
2711 val
= !icsk
->icsk_ack
.pingpong
;
2714 case TCP_CONGESTION
:
2715 if (get_user(len
, optlen
))
2717 len
= min_t(unsigned int, len
, TCP_CA_NAME_MAX
);
2718 if (put_user(len
, optlen
))
2720 if (copy_to_user(optval
, icsk
->icsk_ca_ops
->name
, len
))
2724 case TCP_COOKIE_TRANSACTIONS
: {
2725 struct tcp_cookie_transactions ctd
;
2726 struct tcp_cookie_values
*cvp
= tp
->cookie_values
;
2728 if (get_user(len
, optlen
))
2730 if (len
< sizeof(ctd
))
2733 memset(&ctd
, 0, sizeof(ctd
));
2734 ctd
.tcpct_flags
= (tp
->rx_opt
.cookie_in_always
?
2735 TCP_COOKIE_IN_ALWAYS
: 0)
2736 | (tp
->rx_opt
.cookie_out_never
?
2737 TCP_COOKIE_OUT_NEVER
: 0);
2740 ctd
.tcpct_flags
|= (cvp
->s_data_in
?
2742 | (cvp
->s_data_out
?
2743 TCP_S_DATA_OUT
: 0);
2745 ctd
.tcpct_cookie_desired
= cvp
->cookie_desired
;
2746 ctd
.tcpct_s_data_desired
= cvp
->s_data_desired
;
2748 memcpy(&ctd
.tcpct_value
[0], &cvp
->cookie_pair
[0],
2749 cvp
->cookie_pair_size
);
2750 ctd
.tcpct_used
= cvp
->cookie_pair_size
;
2753 if (put_user(sizeof(ctd
), optlen
))
2755 if (copy_to_user(optval
, &ctd
, sizeof(ctd
)))
2759 case TCP_THIN_LINEAR_TIMEOUTS
:
2762 case TCP_THIN_DUPACK
:
2763 val
= tp
->thin_dupack
;
2770 case TCP_REPAIR_QUEUE
:
2772 val
= tp
->repair_queue
;
2778 if (tp
->repair_queue
== TCP_SEND_QUEUE
)
2779 val
= tp
->write_seq
;
2780 else if (tp
->repair_queue
== TCP_RECV_QUEUE
)
2786 case TCP_USER_TIMEOUT
:
2787 val
= jiffies_to_msecs(icsk
->icsk_user_timeout
);
2790 return -ENOPROTOOPT
;
2793 if (put_user(len
, optlen
))
2795 if (copy_to_user(optval
, &val
, len
))
2800 int tcp_getsockopt(struct sock
*sk
, int level
, int optname
, char __user
*optval
,
2803 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2805 if (level
!= SOL_TCP
)
2806 return icsk
->icsk_af_ops
->getsockopt(sk
, level
, optname
,
2808 return do_tcp_getsockopt(sk
, level
, optname
, optval
, optlen
);
2810 EXPORT_SYMBOL(tcp_getsockopt
);
2812 #ifdef CONFIG_COMPAT
2813 int compat_tcp_getsockopt(struct sock
*sk
, int level
, int optname
,
2814 char __user
*optval
, int __user
*optlen
)
2816 if (level
!= SOL_TCP
)
2817 return inet_csk_compat_getsockopt(sk
, level
, optname
,
2819 return do_tcp_getsockopt(sk
, level
, optname
, optval
, optlen
);
2821 EXPORT_SYMBOL(compat_tcp_getsockopt
);
2824 struct sk_buff
*tcp_tso_segment(struct sk_buff
*skb
,
2825 netdev_features_t features
)
2827 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
2832 unsigned int oldlen
;
2835 if (!pskb_may_pull(skb
, sizeof(*th
)))
2839 thlen
= th
->doff
* 4;
2840 if (thlen
< sizeof(*th
))
2843 if (!pskb_may_pull(skb
, thlen
))
2846 oldlen
= (u16
)~skb
->len
;
2847 __skb_pull(skb
, thlen
);
2849 mss
= skb_shinfo(skb
)->gso_size
;
2850 if (unlikely(skb
->len
<= mss
))
2853 if (skb_gso_ok(skb
, features
| NETIF_F_GSO_ROBUST
)) {
2854 /* Packet is from an untrusted source, reset gso_segs. */
2855 int type
= skb_shinfo(skb
)->gso_type
;
2863 !(type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
))))
2866 skb_shinfo(skb
)->gso_segs
= DIV_ROUND_UP(skb
->len
, mss
);
2872 segs
= skb_segment(skb
, features
);
2876 delta
= htonl(oldlen
+ (thlen
+ mss
));
2880 seq
= ntohl(th
->seq
);
2883 th
->fin
= th
->psh
= 0;
2885 th
->check
= ~csum_fold((__force __wsum
)((__force u32
)th
->check
+
2886 (__force u32
)delta
));
2887 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2889 csum_fold(csum_partial(skb_transport_header(skb
),
2896 th
->seq
= htonl(seq
);
2898 } while (skb
->next
);
2900 delta
= htonl(oldlen
+ (skb
->tail
- skb
->transport_header
) +
2902 th
->check
= ~csum_fold((__force __wsum
)((__force u32
)th
->check
+
2903 (__force u32
)delta
));
2904 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2905 th
->check
= csum_fold(csum_partial(skb_transport_header(skb
),
2911 EXPORT_SYMBOL(tcp_tso_segment
);
2913 struct sk_buff
**tcp_gro_receive(struct sk_buff
**head
, struct sk_buff
*skb
)
2915 struct sk_buff
**pp
= NULL
;
2922 unsigned int mss
= 1;
2928 off
= skb_gro_offset(skb
);
2929 hlen
= off
+ sizeof(*th
);
2930 th
= skb_gro_header_fast(skb
, off
);
2931 if (skb_gro_header_hard(skb
, hlen
)) {
2932 th
= skb_gro_header_slow(skb
, hlen
, off
);
2937 thlen
= th
->doff
* 4;
2938 if (thlen
< sizeof(*th
))
2942 if (skb_gro_header_hard(skb
, hlen
)) {
2943 th
= skb_gro_header_slow(skb
, hlen
, off
);
2948 skb_gro_pull(skb
, thlen
);
2950 len
= skb_gro_len(skb
);
2951 flags
= tcp_flag_word(th
);
2953 for (; (p
= *head
); head
= &p
->next
) {
2954 if (!NAPI_GRO_CB(p
)->same_flow
)
2959 if (*(u32
*)&th
->source
^ *(u32
*)&th2
->source
) {
2960 NAPI_GRO_CB(p
)->same_flow
= 0;
2967 goto out_check_final
;
2970 flush
= NAPI_GRO_CB(p
)->flush
;
2971 flush
|= (__force
int)(flags
& TCP_FLAG_CWR
);
2972 flush
|= (__force
int)((flags
^ tcp_flag_word(th2
)) &
2973 ~(TCP_FLAG_CWR
| TCP_FLAG_FIN
| TCP_FLAG_PSH
));
2974 flush
|= (__force
int)(th
->ack_seq
^ th2
->ack_seq
);
2975 for (i
= sizeof(*th
); i
< thlen
; i
+= 4)
2976 flush
|= *(u32
*)((u8
*)th
+ i
) ^
2977 *(u32
*)((u8
*)th2
+ i
);
2979 mss
= skb_shinfo(p
)->gso_size
;
2981 flush
|= (len
- 1) >= mss
;
2982 flush
|= (ntohl(th2
->seq
) + skb_gro_len(p
)) ^ ntohl(th
->seq
);
2984 if (flush
|| skb_gro_receive(head
, skb
)) {
2986 goto out_check_final
;
2991 tcp_flag_word(th2
) |= flags
& (TCP_FLAG_FIN
| TCP_FLAG_PSH
);
2995 flush
|= (__force
int)(flags
& (TCP_FLAG_URG
| TCP_FLAG_PSH
|
2996 TCP_FLAG_RST
| TCP_FLAG_SYN
|
2999 if (p
&& (!NAPI_GRO_CB(skb
)->same_flow
|| flush
))
3003 NAPI_GRO_CB(skb
)->flush
|= flush
;
3007 EXPORT_SYMBOL(tcp_gro_receive
);
3009 int tcp_gro_complete(struct sk_buff
*skb
)
3011 struct tcphdr
*th
= tcp_hdr(skb
);
3013 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
3014 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
3015 skb
->ip_summed
= CHECKSUM_PARTIAL
;
3017 skb_shinfo(skb
)->gso_segs
= NAPI_GRO_CB(skb
)->count
;
3020 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCP_ECN
;
3024 EXPORT_SYMBOL(tcp_gro_complete
);
3026 #ifdef CONFIG_TCP_MD5SIG
3027 static unsigned long tcp_md5sig_users
;
3028 static struct tcp_md5sig_pool __percpu
*tcp_md5sig_pool
;
3029 static DEFINE_SPINLOCK(tcp_md5sig_pool_lock
);
3031 static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu
*pool
)
3035 for_each_possible_cpu(cpu
) {
3036 struct tcp_md5sig_pool
*p
= per_cpu_ptr(pool
, cpu
);
3038 if (p
->md5_desc
.tfm
)
3039 crypto_free_hash(p
->md5_desc
.tfm
);
3044 void tcp_free_md5sig_pool(void)
3046 struct tcp_md5sig_pool __percpu
*pool
= NULL
;
3048 spin_lock_bh(&tcp_md5sig_pool_lock
);
3049 if (--tcp_md5sig_users
== 0) {
3050 pool
= tcp_md5sig_pool
;
3051 tcp_md5sig_pool
= NULL
;
3053 spin_unlock_bh(&tcp_md5sig_pool_lock
);
3055 __tcp_free_md5sig_pool(pool
);
3057 EXPORT_SYMBOL(tcp_free_md5sig_pool
);
3059 static struct tcp_md5sig_pool __percpu
*
3060 __tcp_alloc_md5sig_pool(struct sock
*sk
)
3063 struct tcp_md5sig_pool __percpu
*pool
;
3065 pool
= alloc_percpu(struct tcp_md5sig_pool
);
3069 for_each_possible_cpu(cpu
) {
3070 struct crypto_hash
*hash
;
3072 hash
= crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC
);
3073 if (!hash
|| IS_ERR(hash
))
3076 per_cpu_ptr(pool
, cpu
)->md5_desc
.tfm
= hash
;
3080 __tcp_free_md5sig_pool(pool
);
3084 struct tcp_md5sig_pool __percpu
*tcp_alloc_md5sig_pool(struct sock
*sk
)
3086 struct tcp_md5sig_pool __percpu
*pool
;
3090 spin_lock_bh(&tcp_md5sig_pool_lock
);
3091 pool
= tcp_md5sig_pool
;
3092 if (tcp_md5sig_users
++ == 0) {
3094 spin_unlock_bh(&tcp_md5sig_pool_lock
);
3097 spin_unlock_bh(&tcp_md5sig_pool_lock
);
3101 spin_unlock_bh(&tcp_md5sig_pool_lock
);
3104 /* we cannot hold spinlock here because this may sleep. */
3105 struct tcp_md5sig_pool __percpu
*p
;
3107 p
= __tcp_alloc_md5sig_pool(sk
);
3108 spin_lock_bh(&tcp_md5sig_pool_lock
);
3111 spin_unlock_bh(&tcp_md5sig_pool_lock
);
3114 pool
= tcp_md5sig_pool
;
3116 /* oops, it has already been assigned. */
3117 spin_unlock_bh(&tcp_md5sig_pool_lock
);
3118 __tcp_free_md5sig_pool(p
);
3120 tcp_md5sig_pool
= pool
= p
;
3121 spin_unlock_bh(&tcp_md5sig_pool_lock
);
3126 EXPORT_SYMBOL(tcp_alloc_md5sig_pool
);
3130 * tcp_get_md5sig_pool - get md5sig_pool for this user
3132 * We use percpu structure, so if we succeed, we exit with preemption
3133 * and BH disabled, to make sure another thread or softirq handling
3134 * wont try to get same context.
3136 struct tcp_md5sig_pool
*tcp_get_md5sig_pool(void)
3138 struct tcp_md5sig_pool __percpu
*p
;
3142 spin_lock(&tcp_md5sig_pool_lock
);
3143 p
= tcp_md5sig_pool
;
3146 spin_unlock(&tcp_md5sig_pool_lock
);
3149 return this_cpu_ptr(p
);
3154 EXPORT_SYMBOL(tcp_get_md5sig_pool
);
3156 void tcp_put_md5sig_pool(void)
3159 tcp_free_md5sig_pool();
3161 EXPORT_SYMBOL(tcp_put_md5sig_pool
);
3163 int tcp_md5_hash_header(struct tcp_md5sig_pool
*hp
,
3164 const struct tcphdr
*th
)
3166 struct scatterlist sg
;
3170 /* We are not allowed to change tcphdr, make a local copy */
3171 memcpy(&hdr
, th
, sizeof(hdr
));
3174 /* options aren't included in the hash */
3175 sg_init_one(&sg
, &hdr
, sizeof(hdr
));
3176 err
= crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(hdr
));
3179 EXPORT_SYMBOL(tcp_md5_hash_header
);
3181 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool
*hp
,
3182 const struct sk_buff
*skb
, unsigned int header_len
)
3184 struct scatterlist sg
;
3185 const struct tcphdr
*tp
= tcp_hdr(skb
);
3186 struct hash_desc
*desc
= &hp
->md5_desc
;
3188 const unsigned int head_data_len
= skb_headlen(skb
) > header_len
?
3189 skb_headlen(skb
) - header_len
: 0;
3190 const struct skb_shared_info
*shi
= skb_shinfo(skb
);
3191 struct sk_buff
*frag_iter
;
3193 sg_init_table(&sg
, 1);
3195 sg_set_buf(&sg
, ((u8
*) tp
) + header_len
, head_data_len
);
3196 if (crypto_hash_update(desc
, &sg
, head_data_len
))
3199 for (i
= 0; i
< shi
->nr_frags
; ++i
) {
3200 const struct skb_frag_struct
*f
= &shi
->frags
[i
];
3201 struct page
*page
= skb_frag_page(f
);
3202 sg_set_page(&sg
, page
, skb_frag_size(f
), f
->page_offset
);
3203 if (crypto_hash_update(desc
, &sg
, skb_frag_size(f
)))
3207 skb_walk_frags(skb
, frag_iter
)
3208 if (tcp_md5_hash_skb_data(hp
, frag_iter
, 0))
3213 EXPORT_SYMBOL(tcp_md5_hash_skb_data
);
3215 int tcp_md5_hash_key(struct tcp_md5sig_pool
*hp
, const struct tcp_md5sig_key
*key
)
3217 struct scatterlist sg
;
3219 sg_init_one(&sg
, key
->key
, key
->keylen
);
3220 return crypto_hash_update(&hp
->md5_desc
, &sg
, key
->keylen
);
3222 EXPORT_SYMBOL(tcp_md5_hash_key
);
3227 * Each Responder maintains up to two secret values concurrently for
3228 * efficient secret rollover. Each secret value has 4 states:
3230 * Generating. (tcp_secret_generating != tcp_secret_primary)
3231 * Generates new Responder-Cookies, but not yet used for primary
3232 * verification. This is a short-term state, typically lasting only
3233 * one round trip time (RTT).
3235 * Primary. (tcp_secret_generating == tcp_secret_primary)
3236 * Used both for generation and primary verification.
3238 * Retiring. (tcp_secret_retiring != tcp_secret_secondary)
3239 * Used for verification, until the first failure that can be
3240 * verified by the newer Generating secret. At that time, this
3241 * cookie's state is changed to Secondary, and the Generating
3242 * cookie's state is changed to Primary. This is a short-term state,
3243 * typically lasting only one round trip time (RTT).
3245 * Secondary. (tcp_secret_retiring == tcp_secret_secondary)
3246 * Used for secondary verification, after primary verification
3247 * failures. This state lasts no more than twice the Maximum Segment
3248 * Lifetime (2MSL). Then, the secret is discarded.
3250 struct tcp_cookie_secret
{
3251 /* The secret is divided into two parts. The digest part is the
3252 * equivalent of previously hashing a secret and saving the state,
3253 * and serves as an initialization vector (IV). The message part
3254 * serves as the trailing secret.
3256 u32 secrets
[COOKIE_WORKSPACE_WORDS
];
3257 unsigned long expires
;
3260 #define TCP_SECRET_1MSL (HZ * TCP_PAWS_MSL)
3261 #define TCP_SECRET_2MSL (HZ * TCP_PAWS_MSL * 2)
3262 #define TCP_SECRET_LIFE (HZ * 600)
3264 static struct tcp_cookie_secret tcp_secret_one
;
3265 static struct tcp_cookie_secret tcp_secret_two
;
3267 /* Essentially a circular list, without dynamic allocation. */
3268 static struct tcp_cookie_secret
*tcp_secret_generating
;
3269 static struct tcp_cookie_secret
*tcp_secret_primary
;
3270 static struct tcp_cookie_secret
*tcp_secret_retiring
;
3271 static struct tcp_cookie_secret
*tcp_secret_secondary
;
3273 static DEFINE_SPINLOCK(tcp_secret_locker
);
3275 /* Select a pseudo-random word in the cookie workspace.
3277 static inline u32
tcp_cookie_work(const u32
*ws
, const int n
)
3279 return ws
[COOKIE_DIGEST_WORDS
+ ((COOKIE_MESSAGE_WORDS
-1) & ws
[n
])];
3282 /* Fill bakery[COOKIE_WORKSPACE_WORDS] with generator, updating as needed.
3283 * Called in softirq context.
3284 * Returns: 0 for success.
3286 int tcp_cookie_generator(u32
*bakery
)
3288 unsigned long jiffy
= jiffies
;
3290 if (unlikely(time_after_eq(jiffy
, tcp_secret_generating
->expires
))) {
3291 spin_lock_bh(&tcp_secret_locker
);
3292 if (!time_after_eq(jiffy
, tcp_secret_generating
->expires
)) {
3293 /* refreshed by another */
3295 &tcp_secret_generating
->secrets
[0],
3296 COOKIE_WORKSPACE_WORDS
);
3298 /* still needs refreshing */
3299 get_random_bytes(bakery
, COOKIE_WORKSPACE_WORDS
);
3301 /* The first time, paranoia assumes that the
3302 * randomization function isn't as strong. But,
3303 * this secret initialization is delayed until
3304 * the last possible moment (packet arrival).
3305 * Although that time is observable, it is
3306 * unpredictably variable. Mash in the most
3307 * volatile clock bits available, and expire the
3308 * secret extra quickly.
3310 if (unlikely(tcp_secret_primary
->expires
==
3311 tcp_secret_secondary
->expires
)) {
3314 getnstimeofday(&tv
);
3315 bakery
[COOKIE_DIGEST_WORDS
+0] ^=
3318 tcp_secret_secondary
->expires
= jiffy
3320 + (0x0f & tcp_cookie_work(bakery
, 0));
3322 tcp_secret_secondary
->expires
= jiffy
3324 + (0xff & tcp_cookie_work(bakery
, 1));
3325 tcp_secret_primary
->expires
= jiffy
3327 + (0x1f & tcp_cookie_work(bakery
, 2));
3329 memcpy(&tcp_secret_secondary
->secrets
[0],
3330 bakery
, COOKIE_WORKSPACE_WORDS
);
3332 rcu_assign_pointer(tcp_secret_generating
,
3333 tcp_secret_secondary
);
3334 rcu_assign_pointer(tcp_secret_retiring
,
3335 tcp_secret_primary
);
3337 * Neither call_rcu() nor synchronize_rcu() needed.
3338 * Retiring data is not freed. It is replaced after
3339 * further (locked) pointer updates, and a quiet time
3340 * (minimum 1MSL, maximum LIFE - 2MSL).
3343 spin_unlock_bh(&tcp_secret_locker
);
3347 &rcu_dereference(tcp_secret_generating
)->secrets
[0],
3348 COOKIE_WORKSPACE_WORDS
);
3349 rcu_read_unlock_bh();
3353 EXPORT_SYMBOL(tcp_cookie_generator
);
3355 void tcp_done(struct sock
*sk
)
3357 if (sk
->sk_state
== TCP_SYN_SENT
|| sk
->sk_state
== TCP_SYN_RECV
)
3358 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_ATTEMPTFAILS
);
3360 tcp_set_state(sk
, TCP_CLOSE
);
3361 tcp_clear_xmit_timers(sk
);
3363 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3365 if (!sock_flag(sk
, SOCK_DEAD
))
3366 sk
->sk_state_change(sk
);
3368 inet_csk_destroy_sock(sk
);
3370 EXPORT_SYMBOL_GPL(tcp_done
);
3372 extern struct tcp_congestion_ops tcp_reno
;
3374 static __initdata
unsigned long thash_entries
;
3375 static int __init
set_thash_entries(char *str
)
3379 thash_entries
= simple_strtoul(str
, &str
, 0);
3382 __setup("thash_entries=", set_thash_entries
);
3384 void tcp_init_mem(struct net
*net
)
3386 unsigned long limit
= nr_free_buffer_pages() / 8;
3387 limit
= max(limit
, 128UL);
3388 net
->ipv4
.sysctl_tcp_mem
[0] = limit
/ 4 * 3;
3389 net
->ipv4
.sysctl_tcp_mem
[1] = limit
;
3390 net
->ipv4
.sysctl_tcp_mem
[2] = net
->ipv4
.sysctl_tcp_mem
[0] * 2;
3393 void __init
tcp_init(void)
3395 struct sk_buff
*skb
= NULL
;
3396 unsigned long limit
;
3399 unsigned long jiffy
= jiffies
;
3401 BUILD_BUG_ON(sizeof(struct tcp_skb_cb
) > sizeof(skb
->cb
));
3403 percpu_counter_init(&tcp_sockets_allocated
, 0);
3404 percpu_counter_init(&tcp_orphan_count
, 0);
3405 tcp_hashinfo
.bind_bucket_cachep
=
3406 kmem_cache_create("tcp_bind_bucket",
3407 sizeof(struct inet_bind_bucket
), 0,
3408 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
3410 /* Size and allocate the main established and bind bucket
3413 * The methodology is similar to that of the buffer cache.
3415 tcp_hashinfo
.ehash
=
3416 alloc_large_system_hash("TCP established",
3417 sizeof(struct inet_ehash_bucket
),
3419 (totalram_pages
>= 128 * 1024) ?
3423 &tcp_hashinfo
.ehash_mask
,
3424 thash_entries
? 0 : 512 * 1024);
3425 for (i
= 0; i
<= tcp_hashinfo
.ehash_mask
; i
++) {
3426 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo
.ehash
[i
].chain
, i
);
3427 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo
.ehash
[i
].twchain
, i
);
3429 if (inet_ehash_locks_alloc(&tcp_hashinfo
))
3430 panic("TCP: failed to alloc ehash_locks");
3431 tcp_hashinfo
.bhash
=
3432 alloc_large_system_hash("TCP bind",
3433 sizeof(struct inet_bind_hashbucket
),
3434 tcp_hashinfo
.ehash_mask
+ 1,
3435 (totalram_pages
>= 128 * 1024) ?
3438 &tcp_hashinfo
.bhash_size
,
3441 tcp_hashinfo
.bhash_size
= 1U << tcp_hashinfo
.bhash_size
;
3442 for (i
= 0; i
< tcp_hashinfo
.bhash_size
; i
++) {
3443 spin_lock_init(&tcp_hashinfo
.bhash
[i
].lock
);
3444 INIT_HLIST_HEAD(&tcp_hashinfo
.bhash
[i
].chain
);
3448 cnt
= tcp_hashinfo
.ehash_mask
+ 1;
3450 tcp_death_row
.sysctl_max_tw_buckets
= cnt
/ 2;
3451 sysctl_tcp_max_orphans
= cnt
/ 2;
3452 sysctl_max_syn_backlog
= max(128, cnt
/ 256);
3454 tcp_init_mem(&init_net
);
3455 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3456 limit
= nr_free_buffer_pages() << (PAGE_SHIFT
- 7);
3457 max_share
= min(4UL*1024*1024, limit
);
3459 sysctl_tcp_wmem
[0] = SK_MEM_QUANTUM
;
3460 sysctl_tcp_wmem
[1] = 16*1024;
3461 sysctl_tcp_wmem
[2] = max(64*1024, max_share
);
3463 sysctl_tcp_rmem
[0] = SK_MEM_QUANTUM
;
3464 sysctl_tcp_rmem
[1] = 87380;
3465 sysctl_tcp_rmem
[2] = max(87380, max_share
);
3467 pr_info("Hash tables configured (established %u bind %u)\n",
3468 tcp_hashinfo
.ehash_mask
+ 1, tcp_hashinfo
.bhash_size
);
3470 tcp_register_congestion_control(&tcp_reno
);
3472 memset(&tcp_secret_one
.secrets
[0], 0, sizeof(tcp_secret_one
.secrets
));
3473 memset(&tcp_secret_two
.secrets
[0], 0, sizeof(tcp_secret_two
.secrets
));
3474 tcp_secret_one
.expires
= jiffy
; /* past due */
3475 tcp_secret_two
.expires
= jiffy
; /* past due */
3476 tcp_secret_generating
= &tcp_secret_one
;
3477 tcp_secret_primary
= &tcp_secret_one
;
3478 tcp_secret_retiring
= &tcp_secret_two
;
3479 tcp_secret_secondary
= &tcp_secret_two
;