3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
66 #include <net/busy_poll.h>
68 #include <linux/proc_fs.h>
69 #include <linux/seq_file.h>
71 #include <linux/crypto.h>
72 #include <linux/scatterlist.h>
74 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
);
75 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
76 struct request_sock
*req
);
78 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
80 static const struct inet_connection_sock_af_ops ipv6_mapped
;
81 static const struct inet_connection_sock_af_ops ipv6_specific
;
82 #ifdef CONFIG_TCP_MD5SIG
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
84 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
86 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
87 const struct in6_addr
*addr
)
93 static void inet6_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
95 struct dst_entry
*dst
= skb_dst(skb
);
96 const struct rt6_info
*rt
= (const struct rt6_info
*)dst
;
100 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
102 inet6_sk(sk
)->rx_dst_cookie
= rt
->rt6i_node
->fn_sernum
;
105 static void tcp_v6_hash(struct sock
*sk
)
107 if (sk
->sk_state
!= TCP_CLOSE
) {
108 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
113 __inet6_hash(sk
, NULL
);
118 static __u32
tcp_v6_init_sequence(const struct sk_buff
*skb
)
120 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
121 ipv6_hdr(skb
)->saddr
.s6_addr32
,
123 tcp_hdr(skb
)->source
);
126 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
129 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
130 struct inet_sock
*inet
= inet_sk(sk
);
131 struct inet_connection_sock
*icsk
= inet_csk(sk
);
132 struct ipv6_pinfo
*np
= inet6_sk(sk
);
133 struct tcp_sock
*tp
= tcp_sk(sk
);
134 struct in6_addr
*saddr
= NULL
, *final_p
, final
;
137 struct dst_entry
*dst
;
141 if (addr_len
< SIN6_LEN_RFC2133
)
144 if (usin
->sin6_family
!= AF_INET6
)
145 return -EAFNOSUPPORT
;
147 memset(&fl6
, 0, sizeof(fl6
));
150 fl6
.flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
151 IP6_ECN_flow_init(fl6
.flowlabel
);
152 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
153 struct ip6_flowlabel
*flowlabel
;
154 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
155 if (flowlabel
== NULL
)
157 fl6_sock_release(flowlabel
);
162 * connect() to INADDR_ANY means loopback (BSD'ism).
165 if (ipv6_addr_any(&usin
->sin6_addr
))
166 usin
->sin6_addr
.s6_addr
[15] = 0x1;
168 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
170 if (addr_type
& IPV6_ADDR_MULTICAST
)
173 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
174 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
175 usin
->sin6_scope_id
) {
176 /* If interface is set while binding, indices
179 if (sk
->sk_bound_dev_if
&&
180 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
183 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
186 /* Connect to link-local address requires an interface */
187 if (!sk
->sk_bound_dev_if
)
191 if (tp
->rx_opt
.ts_recent_stamp
&&
192 !ipv6_addr_equal(&sk
->sk_v6_daddr
, &usin
->sin6_addr
)) {
193 tp
->rx_opt
.ts_recent
= 0;
194 tp
->rx_opt
.ts_recent_stamp
= 0;
198 sk
->sk_v6_daddr
= usin
->sin6_addr
;
199 np
->flow_label
= fl6
.flowlabel
;
205 if (addr_type
== IPV6_ADDR_MAPPED
) {
206 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
207 struct sockaddr_in sin
;
209 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
211 if (__ipv6_only_sock(sk
))
214 sin
.sin_family
= AF_INET
;
215 sin
.sin_port
= usin
->sin6_port
;
216 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
218 icsk
->icsk_af_ops
= &ipv6_mapped
;
219 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
220 #ifdef CONFIG_TCP_MD5SIG
221 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
224 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
227 icsk
->icsk_ext_hdr_len
= exthdrlen
;
228 icsk
->icsk_af_ops
= &ipv6_specific
;
229 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
230 #ifdef CONFIG_TCP_MD5SIG
231 tp
->af_specific
= &tcp_sock_ipv6_specific
;
235 ipv6_addr_set_v4mapped(inet
->inet_saddr
, &np
->saddr
);
236 ipv6_addr_set_v4mapped(inet
->inet_rcv_saddr
,
237 &sk
->sk_v6_rcv_saddr
);
243 if (!ipv6_addr_any(&sk
->sk_v6_rcv_saddr
))
244 saddr
= &sk
->sk_v6_rcv_saddr
;
246 fl6
.flowi6_proto
= IPPROTO_TCP
;
247 fl6
.daddr
= sk
->sk_v6_daddr
;
248 fl6
.saddr
= saddr
? *saddr
: np
->saddr
;
249 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
250 fl6
.flowi6_mark
= sk
->sk_mark
;
251 fl6
.fl6_dport
= usin
->sin6_port
;
252 fl6
.fl6_sport
= inet
->inet_sport
;
254 final_p
= fl6_update_dst(&fl6
, np
->opt
, &final
);
256 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
258 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
);
266 sk
->sk_v6_rcv_saddr
= *saddr
;
269 /* set the source address */
271 inet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
273 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
274 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
276 rt
= (struct rt6_info
*) dst
;
277 if (tcp_death_row
.sysctl_tw_recycle
&&
278 !tp
->rx_opt
.ts_recent_stamp
&&
279 ipv6_addr_equal(&rt
->rt6i_dst
.addr
, &sk
->sk_v6_daddr
))
280 tcp_fetch_timewait_stamp(sk
, dst
);
282 icsk
->icsk_ext_hdr_len
= 0;
284 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
287 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
289 inet
->inet_dport
= usin
->sin6_port
;
291 tcp_set_state(sk
, TCP_SYN_SENT
);
292 err
= inet6_hash_connect(&tcp_death_row
, sk
);
296 if (!tp
->write_seq
&& likely(!tp
->repair
))
297 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
298 sk
->sk_v6_daddr
.s6_addr32
,
302 err
= tcp_connect(sk
);
309 tcp_set_state(sk
, TCP_CLOSE
);
312 inet
->inet_dport
= 0;
313 sk
->sk_route_caps
= 0;
317 static void tcp_v6_mtu_reduced(struct sock
*sk
)
319 struct dst_entry
*dst
;
321 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
324 dst
= inet6_csk_update_pmtu(sk
, tcp_sk(sk
)->mtu_info
);
328 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
329 tcp_sync_mss(sk
, dst_mtu(dst
));
330 tcp_simple_retransmit(sk
);
334 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
335 u8 type
, u8 code
, int offset
, __be32 info
)
337 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
338 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
339 struct ipv6_pinfo
*np
;
343 struct request_sock
*fastopen
;
345 struct net
*net
= dev_net(skb
->dev
);
347 sk
= inet6_lookup(net
, &tcp_hashinfo
, &hdr
->daddr
,
348 th
->dest
, &hdr
->saddr
, th
->source
, skb
->dev
->ifindex
);
351 ICMP6_INC_STATS_BH(net
, __in6_dev_get(skb
->dev
),
356 if (sk
->sk_state
== TCP_TIME_WAIT
) {
357 inet_twsk_put(inet_twsk(sk
));
362 if (sock_owned_by_user(sk
) && type
!= ICMPV6_PKT_TOOBIG
)
363 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
365 if (sk
->sk_state
== TCP_CLOSE
)
368 if (ipv6_hdr(skb
)->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
369 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
374 seq
= ntohl(th
->seq
);
375 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
376 fastopen
= tp
->fastopen_rsk
;
377 snd_una
= fastopen
? tcp_rsk(fastopen
)->snt_isn
: tp
->snd_una
;
378 if (sk
->sk_state
!= TCP_LISTEN
&&
379 !between(seq
, snd_una
, tp
->snd_nxt
)) {
380 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
386 if (type
== NDISC_REDIRECT
) {
387 struct dst_entry
*dst
= __sk_dst_check(sk
, np
->dst_cookie
);
390 dst
->ops
->redirect(dst
, sk
, skb
);
394 if (type
== ICMPV6_PKT_TOOBIG
) {
395 /* We are not interested in TCP_LISTEN and open_requests
396 * (SYN-ACKs send out by Linux are always <576bytes so
397 * they should go through unfragmented).
399 if (sk
->sk_state
== TCP_LISTEN
)
402 if (!ip6_sk_accept_pmtu(sk
))
405 tp
->mtu_info
= ntohl(info
);
406 if (!sock_owned_by_user(sk
))
407 tcp_v6_mtu_reduced(sk
);
408 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
,
414 icmpv6_err_convert(type
, code
, &err
);
416 /* Might be for an request_sock */
417 switch (sk
->sk_state
) {
418 struct request_sock
*req
, **prev
;
420 if (sock_owned_by_user(sk
))
423 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
424 &hdr
->saddr
, inet6_iif(skb
));
428 /* ICMPs are not backlogged, hence we cannot get
429 * an established socket here.
431 WARN_ON(req
->sk
!= NULL
);
433 if (seq
!= tcp_rsk(req
)->snt_isn
) {
434 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
438 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
439 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
444 /* Only in fast or simultaneous open. If a fast open socket is
445 * is already accepted it is treated as a connected one below.
447 if (fastopen
&& fastopen
->sk
== NULL
)
450 if (!sock_owned_by_user(sk
)) {
452 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
456 sk
->sk_err_soft
= err
;
460 if (!sock_owned_by_user(sk
) && np
->recverr
) {
462 sk
->sk_error_report(sk
);
464 sk
->sk_err_soft
= err
;
472 static int tcp_v6_send_synack(struct sock
*sk
, struct dst_entry
*dst
,
474 struct request_sock
*req
,
476 struct tcp_fastopen_cookie
*foc
)
478 struct inet_request_sock
*ireq
= inet_rsk(req
);
479 struct ipv6_pinfo
*np
= inet6_sk(sk
);
483 /* First, grab a route. */
484 if (!dst
&& (dst
= inet6_csk_route_req(sk
, fl6
, req
)) == NULL
)
487 skb
= tcp_make_synack(sk
, dst
, req
, foc
);
490 __tcp_v6_send_check(skb
, &ireq
->ir_v6_loc_addr
,
491 &ireq
->ir_v6_rmt_addr
);
493 fl6
->daddr
= ireq
->ir_v6_rmt_addr
;
494 if (np
->repflow
&& (ireq
->pktopts
!= NULL
))
495 fl6
->flowlabel
= ip6_flowlabel(ipv6_hdr(ireq
->pktopts
));
497 skb_set_queue_mapping(skb
, queue_mapping
);
498 err
= ip6_xmit(sk
, skb
, fl6
, np
->opt
, np
->tclass
);
499 err
= net_xmit_eval(err
);
506 static int tcp_v6_rtx_synack(struct sock
*sk
, struct request_sock
*req
)
511 res
= tcp_v6_send_synack(sk
, NULL
, &fl6
, req
, 0, NULL
);
513 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_RETRANSSEGS
);
514 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPSYNRETRANS
);
519 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
521 kfree_skb(inet_rsk(req
)->pktopts
);
524 #ifdef CONFIG_TCP_MD5SIG
525 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
526 const struct in6_addr
*addr
)
528 return tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)addr
, AF_INET6
);
531 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(struct sock
*sk
,
532 struct sock
*addr_sk
)
534 return tcp_v6_md5_do_lookup(sk
, &addr_sk
->sk_v6_daddr
);
537 static struct tcp_md5sig_key
*tcp_v6_reqsk_md5_lookup(struct sock
*sk
,
538 struct request_sock
*req
)
540 return tcp_v6_md5_do_lookup(sk
, &inet_rsk(req
)->ir_v6_rmt_addr
);
543 static int tcp_v6_parse_md5_keys(struct sock
*sk
, char __user
*optval
,
546 struct tcp_md5sig cmd
;
547 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
549 if (optlen
< sizeof(cmd
))
552 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
555 if (sin6
->sin6_family
!= AF_INET6
)
558 if (!cmd
.tcpm_keylen
) {
559 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
560 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
562 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
566 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
569 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
570 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
571 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
573 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
574 AF_INET6
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
577 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
578 const struct in6_addr
*daddr
,
579 const struct in6_addr
*saddr
, int nbytes
)
581 struct tcp6_pseudohdr
*bp
;
582 struct scatterlist sg
;
584 bp
= &hp
->md5_blk
.ip6
;
585 /* 1. TCP pseudo-header (RFC2460) */
588 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
589 bp
->len
= cpu_to_be32(nbytes
);
591 sg_init_one(&sg
, bp
, sizeof(*bp
));
592 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
595 static int tcp_v6_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
596 const struct in6_addr
*daddr
, struct in6_addr
*saddr
,
597 const struct tcphdr
*th
)
599 struct tcp_md5sig_pool
*hp
;
600 struct hash_desc
*desc
;
602 hp
= tcp_get_md5sig_pool();
604 goto clear_hash_noput
;
605 desc
= &hp
->md5_desc
;
607 if (crypto_hash_init(desc
))
609 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
611 if (tcp_md5_hash_header(hp
, th
))
613 if (tcp_md5_hash_key(hp
, key
))
615 if (crypto_hash_final(desc
, md5_hash
))
618 tcp_put_md5sig_pool();
622 tcp_put_md5sig_pool();
624 memset(md5_hash
, 0, 16);
628 static int tcp_v6_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
629 const struct sock
*sk
,
630 const struct request_sock
*req
,
631 const struct sk_buff
*skb
)
633 const struct in6_addr
*saddr
, *daddr
;
634 struct tcp_md5sig_pool
*hp
;
635 struct hash_desc
*desc
;
636 const struct tcphdr
*th
= tcp_hdr(skb
);
639 saddr
= &inet6_sk(sk
)->saddr
;
640 daddr
= &sk
->sk_v6_daddr
;
642 saddr
= &inet_rsk(req
)->ir_v6_loc_addr
;
643 daddr
= &inet_rsk(req
)->ir_v6_rmt_addr
;
645 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
646 saddr
= &ip6h
->saddr
;
647 daddr
= &ip6h
->daddr
;
650 hp
= tcp_get_md5sig_pool();
652 goto clear_hash_noput
;
653 desc
= &hp
->md5_desc
;
655 if (crypto_hash_init(desc
))
658 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
660 if (tcp_md5_hash_header(hp
, th
))
662 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
664 if (tcp_md5_hash_key(hp
, key
))
666 if (crypto_hash_final(desc
, md5_hash
))
669 tcp_put_md5sig_pool();
673 tcp_put_md5sig_pool();
675 memset(md5_hash
, 0, 16);
679 static int tcp_v6_inbound_md5_hash(struct sock
*sk
, const struct sk_buff
*skb
)
681 const __u8
*hash_location
= NULL
;
682 struct tcp_md5sig_key
*hash_expected
;
683 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
684 const struct tcphdr
*th
= tcp_hdr(skb
);
688 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
689 hash_location
= tcp_parse_md5sig_option(th
);
691 /* We've parsed the options - do we have a hash? */
692 if (!hash_expected
&& !hash_location
)
695 if (hash_expected
&& !hash_location
) {
696 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
700 if (!hash_expected
&& hash_location
) {
701 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
705 /* check the signature */
706 genhash
= tcp_v6_md5_hash_skb(newhash
,
710 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
711 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
712 genhash
? "failed" : "mismatch",
713 &ip6h
->saddr
, ntohs(th
->source
),
714 &ip6h
->daddr
, ntohs(th
->dest
));
721 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
723 .obj_size
= sizeof(struct tcp6_request_sock
),
724 .rtx_syn_ack
= tcp_v6_rtx_synack
,
725 .send_ack
= tcp_v6_reqsk_send_ack
,
726 .destructor
= tcp_v6_reqsk_destructor
,
727 .send_reset
= tcp_v6_send_reset
,
728 .syn_ack_timeout
= tcp_syn_ack_timeout
,
731 #ifdef CONFIG_TCP_MD5SIG
732 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
733 .md5_lookup
= tcp_v6_reqsk_md5_lookup
,
734 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
738 static void tcp_v6_send_response(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
,
739 u32 tsval
, u32 tsecr
, int oif
,
740 struct tcp_md5sig_key
*key
, int rst
, u8 tclass
,
743 const struct tcphdr
*th
= tcp_hdr(skb
);
745 struct sk_buff
*buff
;
747 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
748 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
749 unsigned int tot_len
= sizeof(struct tcphdr
);
750 struct dst_entry
*dst
;
754 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
755 #ifdef CONFIG_TCP_MD5SIG
757 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
760 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
765 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
767 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
768 skb_reset_transport_header(buff
);
770 /* Swap the send and the receive. */
771 memset(t1
, 0, sizeof(*t1
));
772 t1
->dest
= th
->source
;
773 t1
->source
= th
->dest
;
774 t1
->doff
= tot_len
/ 4;
775 t1
->seq
= htonl(seq
);
776 t1
->ack_seq
= htonl(ack
);
777 t1
->ack
= !rst
|| !th
->ack
;
779 t1
->window
= htons(win
);
781 topt
= (__be32
*)(t1
+ 1);
784 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
785 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
786 *topt
++ = htonl(tsval
);
787 *topt
++ = htonl(tsecr
);
790 #ifdef CONFIG_TCP_MD5SIG
792 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
793 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
794 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
795 &ipv6_hdr(skb
)->saddr
,
796 &ipv6_hdr(skb
)->daddr
, t1
);
800 memset(&fl6
, 0, sizeof(fl6
));
801 fl6
.daddr
= ipv6_hdr(skb
)->saddr
;
802 fl6
.saddr
= ipv6_hdr(skb
)->daddr
;
803 fl6
.flowlabel
= label
;
805 buff
->ip_summed
= CHECKSUM_PARTIAL
;
808 __tcp_v6_send_check(buff
, &fl6
.saddr
, &fl6
.daddr
);
810 fl6
.flowi6_proto
= IPPROTO_TCP
;
811 if (rt6_need_strict(&fl6
.daddr
) && !oif
)
812 fl6
.flowi6_oif
= inet6_iif(skb
);
814 fl6
.flowi6_oif
= oif
;
815 fl6
.flowi6_mark
= IP6_REPLY_MARK(net
, skb
->mark
);
816 fl6
.fl6_dport
= t1
->dest
;
817 fl6
.fl6_sport
= t1
->source
;
818 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
820 /* Pass a socket to ip6_dst_lookup either it is for RST
821 * Underlying function will use this to retrieve the network
824 dst
= ip6_dst_lookup_flow(ctl_sk
, &fl6
, NULL
);
826 skb_dst_set(buff
, dst
);
827 ip6_xmit(ctl_sk
, buff
, &fl6
, NULL
, tclass
);
828 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
830 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
837 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
839 const struct tcphdr
*th
= tcp_hdr(skb
);
840 u32 seq
= 0, ack_seq
= 0;
841 struct tcp_md5sig_key
*key
= NULL
;
842 #ifdef CONFIG_TCP_MD5SIG
843 const __u8
*hash_location
= NULL
;
844 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
845 unsigned char newhash
[16];
847 struct sock
*sk1
= NULL
;
854 if (!ipv6_unicast_destination(skb
))
857 #ifdef CONFIG_TCP_MD5SIG
858 hash_location
= tcp_parse_md5sig_option(th
);
859 if (!sk
&& hash_location
) {
861 * active side is lost. Try to find listening socket through
862 * source port, and then find md5 key through listening socket.
863 * we are not loose security here:
864 * Incoming packet is checked with md5 hash with finding key,
865 * no RST generated if md5 hash doesn't match.
867 sk1
= inet6_lookup_listener(dev_net(skb_dst(skb
)->dev
),
868 &tcp_hashinfo
, &ipv6h
->saddr
,
869 th
->source
, &ipv6h
->daddr
,
870 ntohs(th
->source
), inet6_iif(skb
));
875 key
= tcp_v6_md5_do_lookup(sk1
, &ipv6h
->saddr
);
879 genhash
= tcp_v6_md5_hash_skb(newhash
, key
, NULL
, NULL
, skb
);
880 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
883 key
= sk
? tcp_v6_md5_do_lookup(sk
, &ipv6h
->saddr
) : NULL
;
888 seq
= ntohl(th
->ack_seq
);
890 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
893 oif
= sk
? sk
->sk_bound_dev_if
: 0;
894 tcp_v6_send_response(skb
, seq
, ack_seq
, 0, 0, 0, oif
, key
, 1, 0, 0);
896 #ifdef CONFIG_TCP_MD5SIG
905 static void tcp_v6_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
,
906 u32 win
, u32 tsval
, u32 tsecr
, int oif
,
907 struct tcp_md5sig_key
*key
, u8 tclass
,
910 tcp_v6_send_response(skb
, seq
, ack
, win
, tsval
, tsecr
, oif
, key
, 0, tclass
,
914 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
916 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
917 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
919 tcp_v6_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
920 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
921 tcp_time_stamp
+ tcptw
->tw_ts_offset
,
922 tcptw
->tw_ts_recent
, tw
->tw_bound_dev_if
, tcp_twsk_md5_key(tcptw
),
923 tw
->tw_tclass
, (tw
->tw_flowlabel
<< 12));
928 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
929 struct request_sock
*req
)
931 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
932 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
934 tcp_v6_send_ack(skb
, (sk
->sk_state
== TCP_LISTEN
) ?
935 tcp_rsk(req
)->snt_isn
+ 1 : tcp_sk(sk
)->snd_nxt
,
936 tcp_rsk(req
)->rcv_nxt
,
937 req
->rcv_wnd
, tcp_time_stamp
, req
->ts_recent
, sk
->sk_bound_dev_if
,
938 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
),
943 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
, struct sk_buff
*skb
)
945 struct request_sock
*req
, **prev
;
946 const struct tcphdr
*th
= tcp_hdr(skb
);
949 /* Find possible connection requests. */
950 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
951 &ipv6_hdr(skb
)->saddr
,
952 &ipv6_hdr(skb
)->daddr
, inet6_iif(skb
));
954 return tcp_check_req(sk
, skb
, req
, prev
, false);
956 nsk
= __inet6_lookup_established(sock_net(sk
), &tcp_hashinfo
,
957 &ipv6_hdr(skb
)->saddr
, th
->source
,
958 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
), inet6_iif(skb
));
961 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
965 inet_twsk_put(inet_twsk(nsk
));
969 #ifdef CONFIG_SYN_COOKIES
971 sk
= cookie_v6_check(sk
, skb
);
976 /* FIXME: this is substantially similar to the ipv4 code.
977 * Can some kind of merge be done? -- erics
979 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
981 struct tcp_options_received tmp_opt
;
982 struct request_sock
*req
;
983 struct inet_request_sock
*ireq
;
984 struct ipv6_pinfo
*np
= inet6_sk(sk
);
985 struct tcp_sock
*tp
= tcp_sk(sk
);
986 __u32 isn
= TCP_SKB_CB(skb
)->when
;
987 struct dst_entry
*dst
= NULL
;
988 struct tcp_fastopen_cookie foc
= { .len
= -1 };
989 bool want_cookie
= false, fastopen
;
993 if (skb
->protocol
== htons(ETH_P_IP
))
994 return tcp_v4_conn_request(sk
, skb
);
996 if (!ipv6_unicast_destination(skb
))
999 if ((sysctl_tcp_syncookies
== 2 ||
1000 inet_csk_reqsk_queue_is_full(sk
)) && !isn
) {
1001 want_cookie
= tcp_syn_flood_action(sk
, skb
, "TCPv6");
1006 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1) {
1007 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1011 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
1015 #ifdef CONFIG_TCP_MD5SIG
1016 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv6_ops
;
1019 tcp_clear_options(&tmp_opt
);
1020 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
1021 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1022 tcp_parse_options(skb
, &tmp_opt
, 0, want_cookie
? NULL
: &foc
);
1024 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1025 tcp_clear_options(&tmp_opt
);
1027 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1028 tcp_openreq_init(req
, &tmp_opt
, skb
);
1030 ireq
= inet_rsk(req
);
1031 ireq
->ir_v6_rmt_addr
= ipv6_hdr(skb
)->saddr
;
1032 ireq
->ir_v6_loc_addr
= ipv6_hdr(skb
)->daddr
;
1033 if (!want_cookie
|| tmp_opt
.tstamp_ok
)
1034 TCP_ECN_create_request(req
, skb
, sock_net(sk
));
1036 ireq
->ir_iif
= sk
->sk_bound_dev_if
;
1037 ireq
->ir_mark
= inet_request_mark(sk
, skb
);
1039 /* So that link locals have meaning */
1040 if (!sk
->sk_bound_dev_if
&&
1041 ipv6_addr_type(&ireq
->ir_v6_rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
1042 ireq
->ir_iif
= inet6_iif(skb
);
1045 if (ipv6_opt_accepted(sk
, skb
) ||
1046 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
1047 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
||
1049 atomic_inc(&skb
->users
);
1050 ireq
->pktopts
= skb
;
1054 isn
= cookie_v6_init_sequence(sk
, skb
, &req
->mss
);
1055 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1059 /* VJ's idea. We save last timestamp seen
1060 * from the destination in peer table, when entering
1061 * state TIME-WAIT, and check against it before
1062 * accepting new connection request.
1064 * If "isn" is not zero, this request hit alive
1065 * timewait bucket, so that all the necessary checks
1066 * are made in the function processing timewait state.
1068 if (tmp_opt
.saw_tstamp
&&
1069 tcp_death_row
.sysctl_tw_recycle
&&
1070 (dst
= inet6_csk_route_req(sk
, &fl6
, req
)) != NULL
) {
1071 if (!tcp_peer_is_proven(req
, dst
, true)) {
1072 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_PAWSPASSIVEREJECTED
);
1073 goto drop_and_release
;
1076 /* Kill the following clause, if you dislike this way. */
1077 else if (!sysctl_tcp_syncookies
&&
1078 (sysctl_max_syn_backlog
- inet_csk_reqsk_queue_len(sk
) <
1079 (sysctl_max_syn_backlog
>> 2)) &&
1080 !tcp_peer_is_proven(req
, dst
, false)) {
1081 /* Without syncookies last quarter of
1082 * backlog is filled with destinations,
1083 * proven to be alive.
1084 * It means that we continue to communicate
1085 * to destinations, already remembered
1086 * to the moment of synflood.
1088 LIMIT_NETDEBUG(KERN_DEBUG
"TCP: drop open request from %pI6/%u\n",
1089 &ireq
->ir_v6_rmt_addr
, ntohs(tcp_hdr(skb
)->source
));
1090 goto drop_and_release
;
1093 isn
= tcp_v6_init_sequence(skb
);
1097 if (security_inet_conn_request(sk
, skb
, req
))
1098 goto drop_and_release
;
1100 if (!dst
&& (dst
= inet6_csk_route_req(sk
, &fl6
, req
)) == NULL
)
1103 tcp_rsk(req
)->snt_isn
= isn
;
1104 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
1105 tcp_openreq_init_rwin(req
, sk
, dst
);
1106 fastopen
= !want_cookie
&&
1107 tcp_try_fastopen(sk
, skb
, req
, &foc
, dst
);
1108 err
= tcp_v6_send_synack(sk
, dst
, &fl6
, req
,
1109 skb_get_queue_mapping(skb
), &foc
);
1111 if (err
|| want_cookie
)
1114 tcp_rsk(req
)->listener
= NULL
;
1115 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1124 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1125 return 0; /* don't send reset */
1128 static struct sock
*tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1129 struct request_sock
*req
,
1130 struct dst_entry
*dst
)
1132 struct inet_request_sock
*ireq
;
1133 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
1134 struct tcp6_sock
*newtcp6sk
;
1135 struct inet_sock
*newinet
;
1136 struct tcp_sock
*newtp
;
1138 #ifdef CONFIG_TCP_MD5SIG
1139 struct tcp_md5sig_key
*key
;
1143 if (skb
->protocol
== htons(ETH_P_IP
)) {
1148 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
1153 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1154 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1156 newinet
= inet_sk(newsk
);
1157 newnp
= inet6_sk(newsk
);
1158 newtp
= tcp_sk(newsk
);
1160 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1162 ipv6_addr_set_v4mapped(newinet
->inet_daddr
, &newsk
->sk_v6_daddr
);
1164 ipv6_addr_set_v4mapped(newinet
->inet_saddr
, &newnp
->saddr
);
1166 newsk
->sk_v6_rcv_saddr
= newnp
->saddr
;
1168 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1169 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1170 #ifdef CONFIG_TCP_MD5SIG
1171 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1174 newnp
->ipv6_ac_list
= NULL
;
1175 newnp
->ipv6_fl_list
= NULL
;
1176 newnp
->pktoptions
= NULL
;
1178 newnp
->mcast_oif
= inet6_iif(skb
);
1179 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1180 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1182 newnp
->flow_label
= ip6_flowlabel(ipv6_hdr(skb
));
1185 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1186 * here, tcp_create_openreq_child now does this for us, see the comment in
1187 * that function for the gory details. -acme
1190 /* It is tricky place. Until this moment IPv4 tcp
1191 worked with IPv6 icsk.icsk_af_ops.
1194 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1199 ireq
= inet_rsk(req
);
1201 if (sk_acceptq_is_full(sk
))
1205 dst
= inet6_csk_route_req(sk
, &fl6
, req
);
1210 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1215 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1216 * count here, tcp_create_openreq_child now does this for us, see the
1217 * comment in that function for the gory details. -acme
1220 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1221 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1222 inet6_sk_rx_dst_set(newsk
, skb
);
1224 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1225 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1227 newtp
= tcp_sk(newsk
);
1228 newinet
= inet_sk(newsk
);
1229 newnp
= inet6_sk(newsk
);
1231 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1233 newsk
->sk_v6_daddr
= ireq
->ir_v6_rmt_addr
;
1234 newnp
->saddr
= ireq
->ir_v6_loc_addr
;
1235 newsk
->sk_v6_rcv_saddr
= ireq
->ir_v6_loc_addr
;
1236 newsk
->sk_bound_dev_if
= ireq
->ir_iif
;
1238 /* Now IPv6 options...
1240 First: no IPv4 options.
1242 newinet
->inet_opt
= NULL
;
1243 newnp
->ipv6_ac_list
= NULL
;
1244 newnp
->ipv6_fl_list
= NULL
;
1247 newnp
->rxopt
.all
= np
->rxopt
.all
;
1249 /* Clone pktoptions received with SYN */
1250 newnp
->pktoptions
= NULL
;
1251 if (ireq
->pktopts
!= NULL
) {
1252 newnp
->pktoptions
= skb_clone(ireq
->pktopts
,
1253 sk_gfp_atomic(sk
, GFP_ATOMIC
));
1254 consume_skb(ireq
->pktopts
);
1255 ireq
->pktopts
= NULL
;
1256 if (newnp
->pktoptions
)
1257 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1260 newnp
->mcast_oif
= inet6_iif(skb
);
1261 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1262 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1264 newnp
->flow_label
= ip6_flowlabel(ipv6_hdr(skb
));
1266 /* Clone native IPv6 options from listening socket (if any)
1268 Yes, keeping reference count would be much more clever,
1269 but we make one more one thing there: reattach optmem
1273 newnp
->opt
= ipv6_dup_options(newsk
, np
->opt
);
1275 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1277 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
1278 newnp
->opt
->opt_flen
);
1280 tcp_sync_mss(newsk
, dst_mtu(dst
));
1281 newtp
->advmss
= dst_metric_advmss(dst
);
1282 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1283 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1284 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1286 tcp_initialize_rcv_mss(newsk
);
1288 newinet
->inet_daddr
= newinet
->inet_saddr
= LOOPBACK4_IPV6
;
1289 newinet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
1291 #ifdef CONFIG_TCP_MD5SIG
1292 /* Copy over the MD5 key from the original socket */
1293 key
= tcp_v6_md5_do_lookup(sk
, &newsk
->sk_v6_daddr
);
1295 /* We're using one, so create a matching key
1296 * on the newsk structure. If we fail to get
1297 * memory, then we end up not copying the key
1300 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newsk
->sk_v6_daddr
,
1301 AF_INET6
, key
->key
, key
->keylen
,
1302 sk_gfp_atomic(sk
, GFP_ATOMIC
));
1306 if (__inet_inherit_port(sk
, newsk
) < 0) {
1307 inet_csk_prepare_forced_close(newsk
);
1311 __inet6_hash(newsk
, NULL
);
1316 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1320 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1324 /* The socket must have it's spinlock held when we get
1327 * We have a potential double-lock case here, so even when
1328 * doing backlog processing we use the BH locking scheme.
1329 * This is because we cannot sleep with the original spinlock
1332 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1334 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1335 struct tcp_sock
*tp
;
1336 struct sk_buff
*opt_skb
= NULL
;
1338 /* Imagine: socket is IPv6. IPv4 packet arrives,
1339 goes to IPv4 receive handler and backlogged.
1340 From backlog it always goes here. Kerboom...
1341 Fortunately, tcp_rcv_established and rcv_established
1342 handle them correctly, but it is not case with
1343 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1346 if (skb
->protocol
== htons(ETH_P_IP
))
1347 return tcp_v4_do_rcv(sk
, skb
);
1349 #ifdef CONFIG_TCP_MD5SIG
1350 if (tcp_v6_inbound_md5_hash(sk
, skb
))
1354 if (sk_filter(sk
, skb
))
1358 * socket locking is here for SMP purposes as backlog rcv
1359 * is currently called with bh processing disabled.
1362 /* Do Stevens' IPV6_PKTOPTIONS.
1364 Yes, guys, it is the only place in our code, where we
1365 may make it not affecting IPv4.
1366 The rest of code is protocol independent,
1367 and I do not like idea to uglify IPv4.
1369 Actually, all the idea behind IPV6_PKTOPTIONS
1370 looks not very well thought. For now we latch
1371 options, received in the last packet, enqueued
1372 by tcp. Feel free to propose better solution.
1376 opt_skb
= skb_clone(skb
, sk_gfp_atomic(sk
, GFP_ATOMIC
));
1378 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1379 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1381 sock_rps_save_rxhash(sk
, skb
);
1383 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1384 dst
->ops
->check(dst
, np
->rx_dst_cookie
) == NULL
) {
1386 sk
->sk_rx_dst
= NULL
;
1390 tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
);
1392 goto ipv6_pktoptions
;
1396 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1399 if (sk
->sk_state
== TCP_LISTEN
) {
1400 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1405 * Queue it on the new socket if the new socket is active,
1406 * otherwise we just shortcircuit this and continue with
1410 sock_rps_save_rxhash(nsk
, skb
);
1411 if (tcp_child_process(sk
, nsk
, skb
))
1414 __kfree_skb(opt_skb
);
1418 sock_rps_save_rxhash(sk
, skb
);
1420 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1423 goto ipv6_pktoptions
;
1427 tcp_v6_send_reset(sk
, skb
);
1430 __kfree_skb(opt_skb
);
1434 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1435 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1440 /* Do you ask, what is it?
1442 1. skb was enqueued by tcp.
1443 2. skb is added to tail of read queue, rather than out of order.
1444 3. socket is not in passive state.
1445 4. Finally, it really contains options, which user wants to receive.
1448 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1449 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1450 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1451 np
->mcast_oif
= inet6_iif(opt_skb
);
1452 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1453 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1454 if (np
->rxopt
.bits
.rxflow
|| np
->rxopt
.bits
.rxtclass
)
1455 np
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(opt_skb
));
1457 np
->flow_label
= ip6_flowlabel(ipv6_hdr(opt_skb
));
1458 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1459 skb_set_owner_r(opt_skb
, sk
);
1460 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1462 __kfree_skb(opt_skb
);
1463 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1471 static int tcp_v6_rcv(struct sk_buff
*skb
)
1473 const struct tcphdr
*th
;
1474 const struct ipv6hdr
*hdr
;
1477 struct net
*net
= dev_net(skb
->dev
);
1479 if (skb
->pkt_type
!= PACKET_HOST
)
1483 * Count it even if it's bad.
1485 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1487 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1492 if (th
->doff
< sizeof(struct tcphdr
)/4)
1494 if (!pskb_may_pull(skb
, th
->doff
*4))
1497 if (skb_checksum_init(skb
, IPPROTO_TCP
, ip6_compute_pseudo
))
1501 hdr
= ipv6_hdr(skb
);
1502 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1503 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1504 skb
->len
- th
->doff
*4);
1505 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1506 TCP_SKB_CB(skb
)->when
= 0;
1507 TCP_SKB_CB(skb
)->ip_dsfield
= ipv6_get_dsfield(hdr
);
1508 TCP_SKB_CB(skb
)->sacked
= 0;
1510 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1515 if (sk
->sk_state
== TCP_TIME_WAIT
)
1518 if (hdr
->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
1519 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1520 goto discard_and_relse
;
1523 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1524 goto discard_and_relse
;
1526 if (sk_filter(sk
, skb
))
1527 goto discard_and_relse
;
1529 sk_mark_napi_id(sk
, skb
);
1532 bh_lock_sock_nested(sk
);
1534 if (!sock_owned_by_user(sk
)) {
1535 #ifdef CONFIG_NET_DMA
1536 struct tcp_sock
*tp
= tcp_sk(sk
);
1537 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1538 tp
->ucopy
.dma_chan
= net_dma_find_channel();
1539 if (tp
->ucopy
.dma_chan
)
1540 ret
= tcp_v6_do_rcv(sk
, skb
);
1544 if (!tcp_prequeue(sk
, skb
))
1545 ret
= tcp_v6_do_rcv(sk
, skb
);
1547 } else if (unlikely(sk_add_backlog(sk
, skb
,
1548 sk
->sk_rcvbuf
+ sk
->sk_sndbuf
))) {
1550 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1551 goto discard_and_relse
;
1556 return ret
? -1 : 0;
1559 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1562 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1564 TCP_INC_STATS_BH(net
, TCP_MIB_CSUMERRORS
);
1566 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1568 tcp_v6_send_reset(NULL
, skb
);
1580 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1581 inet_twsk_put(inet_twsk(sk
));
1585 if (skb
->len
< (th
->doff
<<2)) {
1586 inet_twsk_put(inet_twsk(sk
));
1589 if (tcp_checksum_complete(skb
)) {
1590 inet_twsk_put(inet_twsk(sk
));
1594 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1599 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1600 &ipv6_hdr(skb
)->saddr
, th
->source
,
1601 &ipv6_hdr(skb
)->daddr
,
1602 ntohs(th
->dest
), inet6_iif(skb
));
1604 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1605 inet_twsk_deschedule(tw
, &tcp_death_row
);
1610 /* Fall through to ACK */
1613 tcp_v6_timewait_ack(sk
, skb
);
1617 case TCP_TW_SUCCESS
:
1623 static void tcp_v6_early_demux(struct sk_buff
*skb
)
1625 const struct ipv6hdr
*hdr
;
1626 const struct tcphdr
*th
;
1629 if (skb
->pkt_type
!= PACKET_HOST
)
1632 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1635 hdr
= ipv6_hdr(skb
);
1638 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1641 sk
= __inet6_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1642 &hdr
->saddr
, th
->source
,
1643 &hdr
->daddr
, ntohs(th
->dest
),
1647 skb
->destructor
= sock_edemux
;
1648 if (sk
->sk_state
!= TCP_TIME_WAIT
) {
1649 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1652 dst
= dst_check(dst
, inet6_sk(sk
)->rx_dst_cookie
);
1654 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1655 skb_dst_set_noref(skb
, dst
);
1660 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
1661 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
1662 .twsk_unique
= tcp_twsk_unique
,
1663 .twsk_destructor
= tcp_twsk_destructor
,
1666 static const struct inet_connection_sock_af_ops ipv6_specific
= {
1667 .queue_xmit
= inet6_csk_xmit
,
1668 .send_check
= tcp_v6_send_check
,
1669 .rebuild_header
= inet6_sk_rebuild_header
,
1670 .sk_rx_dst_set
= inet6_sk_rx_dst_set
,
1671 .conn_request
= tcp_v6_conn_request
,
1672 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1673 .net_header_len
= sizeof(struct ipv6hdr
),
1674 .net_frag_header_len
= sizeof(struct frag_hdr
),
1675 .setsockopt
= ipv6_setsockopt
,
1676 .getsockopt
= ipv6_getsockopt
,
1677 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1678 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1679 .bind_conflict
= inet6_csk_bind_conflict
,
1680 #ifdef CONFIG_COMPAT
1681 .compat_setsockopt
= compat_ipv6_setsockopt
,
1682 .compat_getsockopt
= compat_ipv6_getsockopt
,
1686 #ifdef CONFIG_TCP_MD5SIG
1687 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1688 .md5_lookup
= tcp_v6_md5_lookup
,
1689 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1690 .md5_parse
= tcp_v6_parse_md5_keys
,
1695 * TCP over IPv4 via INET6 API
1697 static const struct inet_connection_sock_af_ops ipv6_mapped
= {
1698 .queue_xmit
= ip_queue_xmit
,
1699 .send_check
= tcp_v4_send_check
,
1700 .rebuild_header
= inet_sk_rebuild_header
,
1701 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
1702 .conn_request
= tcp_v6_conn_request
,
1703 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1704 .net_header_len
= sizeof(struct iphdr
),
1705 .setsockopt
= ipv6_setsockopt
,
1706 .getsockopt
= ipv6_getsockopt
,
1707 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1708 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1709 .bind_conflict
= inet6_csk_bind_conflict
,
1710 #ifdef CONFIG_COMPAT
1711 .compat_setsockopt
= compat_ipv6_setsockopt
,
1712 .compat_getsockopt
= compat_ipv6_getsockopt
,
1716 #ifdef CONFIG_TCP_MD5SIG
1717 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1718 .md5_lookup
= tcp_v4_md5_lookup
,
1719 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1720 .md5_parse
= tcp_v6_parse_md5_keys
,
1724 /* NOTE: A lot of things set to zero explicitly by call to
1725 * sk_alloc() so need not be done here.
1727 static int tcp_v6_init_sock(struct sock
*sk
)
1729 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1733 icsk
->icsk_af_ops
= &ipv6_specific
;
1735 #ifdef CONFIG_TCP_MD5SIG
1736 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv6_specific
;
1742 static void tcp_v6_destroy_sock(struct sock
*sk
)
1744 tcp_v4_destroy_sock(sk
);
1745 inet6_destroy_sock(sk
);
1748 #ifdef CONFIG_PROC_FS
1749 /* Proc filesystem TCPv6 sock list dumping. */
1750 static void get_openreq6(struct seq_file
*seq
,
1751 const struct sock
*sk
, struct request_sock
*req
, int i
, kuid_t uid
)
1753 int ttd
= req
->expires
- jiffies
;
1754 const struct in6_addr
*src
= &inet_rsk(req
)->ir_v6_loc_addr
;
1755 const struct in6_addr
*dest
= &inet_rsk(req
)->ir_v6_rmt_addr
;
1761 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1762 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1764 src
->s6_addr32
[0], src
->s6_addr32
[1],
1765 src
->s6_addr32
[2], src
->s6_addr32
[3],
1766 inet_rsk(req
)->ir_num
,
1767 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1768 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1769 ntohs(inet_rsk(req
)->ir_rmt_port
),
1771 0, 0, /* could print option size, but that is af dependent. */
1772 1, /* timers active (only the expire timer) */
1773 jiffies_to_clock_t(ttd
),
1775 from_kuid_munged(seq_user_ns(seq
), uid
),
1776 0, /* non standard timer */
1777 0, /* open_requests have no inode */
1781 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1783 const struct in6_addr
*dest
, *src
;
1786 unsigned long timer_expires
;
1787 const struct inet_sock
*inet
= inet_sk(sp
);
1788 const struct tcp_sock
*tp
= tcp_sk(sp
);
1789 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1790 struct fastopen_queue
*fastopenq
= icsk
->icsk_accept_queue
.fastopenq
;
1792 dest
= &sp
->sk_v6_daddr
;
1793 src
= &sp
->sk_v6_rcv_saddr
;
1794 destp
= ntohs(inet
->inet_dport
);
1795 srcp
= ntohs(inet
->inet_sport
);
1797 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
1799 timer_expires
= icsk
->icsk_timeout
;
1800 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1802 timer_expires
= icsk
->icsk_timeout
;
1803 } else if (timer_pending(&sp
->sk_timer
)) {
1805 timer_expires
= sp
->sk_timer
.expires
;
1808 timer_expires
= jiffies
;
1812 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1813 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1815 src
->s6_addr32
[0], src
->s6_addr32
[1],
1816 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1817 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1818 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1820 tp
->write_seq
-tp
->snd_una
,
1821 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
1823 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
1824 icsk
->icsk_retransmits
,
1825 from_kuid_munged(seq_user_ns(seq
), sock_i_uid(sp
)),
1826 icsk
->icsk_probes_out
,
1828 atomic_read(&sp
->sk_refcnt
), sp
,
1829 jiffies_to_clock_t(icsk
->icsk_rto
),
1830 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
1831 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
1833 sp
->sk_state
== TCP_LISTEN
?
1834 (fastopenq
? fastopenq
->max_qlen
: 0) :
1835 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
)
1839 static void get_timewait6_sock(struct seq_file
*seq
,
1840 struct inet_timewait_sock
*tw
, int i
)
1842 const struct in6_addr
*dest
, *src
;
1844 s32 delta
= tw
->tw_ttd
- inet_tw_time_stamp();
1846 dest
= &tw
->tw_v6_daddr
;
1847 src
= &tw
->tw_v6_rcv_saddr
;
1848 destp
= ntohs(tw
->tw_dport
);
1849 srcp
= ntohs(tw
->tw_sport
);
1852 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1853 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1855 src
->s6_addr32
[0], src
->s6_addr32
[1],
1856 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1857 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1858 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1859 tw
->tw_substate
, 0, 0,
1860 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
1861 atomic_read(&tw
->tw_refcnt
), tw
);
1864 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
1866 struct tcp_iter_state
*st
;
1867 struct sock
*sk
= v
;
1869 if (v
== SEQ_START_TOKEN
) {
1874 "st tx_queue rx_queue tr tm->when retrnsmt"
1875 " uid timeout inode\n");
1880 switch (st
->state
) {
1881 case TCP_SEQ_STATE_LISTENING
:
1882 case TCP_SEQ_STATE_ESTABLISHED
:
1883 if (sk
->sk_state
== TCP_TIME_WAIT
)
1884 get_timewait6_sock(seq
, v
, st
->num
);
1886 get_tcp6_sock(seq
, v
, st
->num
);
1888 case TCP_SEQ_STATE_OPENREQ
:
1889 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
1896 static const struct file_operations tcp6_afinfo_seq_fops
= {
1897 .owner
= THIS_MODULE
,
1898 .open
= tcp_seq_open
,
1900 .llseek
= seq_lseek
,
1901 .release
= seq_release_net
1904 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
1907 .seq_fops
= &tcp6_afinfo_seq_fops
,
1909 .show
= tcp6_seq_show
,
1913 int __net_init
tcp6_proc_init(struct net
*net
)
1915 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
1918 void tcp6_proc_exit(struct net
*net
)
1920 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
1924 static void tcp_v6_clear_sk(struct sock
*sk
, int size
)
1926 struct inet_sock
*inet
= inet_sk(sk
);
1928 /* we do not want to clear pinet6 field, because of RCU lookups */
1929 sk_prot_clear_nulls(sk
, offsetof(struct inet_sock
, pinet6
));
1931 size
-= offsetof(struct inet_sock
, pinet6
) + sizeof(inet
->pinet6
);
1932 memset(&inet
->pinet6
+ 1, 0, size
);
1935 struct proto tcpv6_prot
= {
1937 .owner
= THIS_MODULE
,
1939 .connect
= tcp_v6_connect
,
1940 .disconnect
= tcp_disconnect
,
1941 .accept
= inet_csk_accept
,
1943 .init
= tcp_v6_init_sock
,
1944 .destroy
= tcp_v6_destroy_sock
,
1945 .shutdown
= tcp_shutdown
,
1946 .setsockopt
= tcp_setsockopt
,
1947 .getsockopt
= tcp_getsockopt
,
1948 .recvmsg
= tcp_recvmsg
,
1949 .sendmsg
= tcp_sendmsg
,
1950 .sendpage
= tcp_sendpage
,
1951 .backlog_rcv
= tcp_v6_do_rcv
,
1952 .release_cb
= tcp_release_cb
,
1953 .mtu_reduced
= tcp_v6_mtu_reduced
,
1954 .hash
= tcp_v6_hash
,
1955 .unhash
= inet_unhash
,
1956 .get_port
= inet_csk_get_port
,
1957 .enter_memory_pressure
= tcp_enter_memory_pressure
,
1958 .stream_memory_free
= tcp_stream_memory_free
,
1959 .sockets_allocated
= &tcp_sockets_allocated
,
1960 .memory_allocated
= &tcp_memory_allocated
,
1961 .memory_pressure
= &tcp_memory_pressure
,
1962 .orphan_count
= &tcp_orphan_count
,
1963 .sysctl_mem
= sysctl_tcp_mem
,
1964 .sysctl_wmem
= sysctl_tcp_wmem
,
1965 .sysctl_rmem
= sysctl_tcp_rmem
,
1966 .max_header
= MAX_TCP_HEADER
,
1967 .obj_size
= sizeof(struct tcp6_sock
),
1968 .slab_flags
= SLAB_DESTROY_BY_RCU
,
1969 .twsk_prot
= &tcp6_timewait_sock_ops
,
1970 .rsk_prot
= &tcp6_request_sock_ops
,
1971 .h
.hashinfo
= &tcp_hashinfo
,
1972 .no_autobind
= true,
1973 #ifdef CONFIG_COMPAT
1974 .compat_setsockopt
= compat_tcp_setsockopt
,
1975 .compat_getsockopt
= compat_tcp_getsockopt
,
1977 #ifdef CONFIG_MEMCG_KMEM
1978 .proto_cgroup
= tcp_proto_cgroup
,
1980 .clear_sk
= tcp_v6_clear_sk
,
1983 static const struct inet6_protocol tcpv6_protocol
= {
1984 .early_demux
= tcp_v6_early_demux
,
1985 .handler
= tcp_v6_rcv
,
1986 .err_handler
= tcp_v6_err
,
1987 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
1990 static struct inet_protosw tcpv6_protosw
= {
1991 .type
= SOCK_STREAM
,
1992 .protocol
= IPPROTO_TCP
,
1993 .prot
= &tcpv6_prot
,
1994 .ops
= &inet6_stream_ops
,
1995 .flags
= INET_PROTOSW_PERMANENT
|
1999 static int __net_init
tcpv6_net_init(struct net
*net
)
2001 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
2002 SOCK_RAW
, IPPROTO_TCP
, net
);
2005 static void __net_exit
tcpv6_net_exit(struct net
*net
)
2007 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
2010 static void __net_exit
tcpv6_net_exit_batch(struct list_head
*net_exit_list
)
2012 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET6
);
2015 static struct pernet_operations tcpv6_net_ops
= {
2016 .init
= tcpv6_net_init
,
2017 .exit
= tcpv6_net_exit
,
2018 .exit_batch
= tcpv6_net_exit_batch
,
2021 int __init
tcpv6_init(void)
2025 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2029 /* register inet6 protocol */
2030 ret
= inet6_register_protosw(&tcpv6_protosw
);
2032 goto out_tcpv6_protocol
;
2034 ret
= register_pernet_subsys(&tcpv6_net_ops
);
2036 goto out_tcpv6_protosw
;
2041 inet6_unregister_protosw(&tcpv6_protosw
);
2043 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2047 void tcpv6_exit(void)
2049 unregister_pernet_subsys(&tcpv6_net_ops
);
2050 inet6_unregister_protosw(&tcpv6_protosw
);
2051 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);