Merge branch 'kbuild' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[deliverable/linux.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
66
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
69
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
72
73 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
76
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 #else
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86 const struct in6_addr *addr)
87 {
88 return NULL;
89 }
90 #endif
91
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93 {
94 struct dst_entry *dst = skb_dst(skb);
95
96 if (dst) {
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
98
99 dst_hold(dst);
100 sk->sk_rx_dst = dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 if (rt->rt6i_node)
103 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
104 }
105 }
106
107 static void tcp_v6_hash(struct sock *sk)
108 {
109 if (sk->sk_state != TCP_CLOSE) {
110 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
111 tcp_prot.hash(sk);
112 return;
113 }
114 local_bh_disable();
115 __inet6_hash(sk, NULL);
116 local_bh_enable();
117 }
118 }
119
120 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
121 {
122 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
123 ipv6_hdr(skb)->saddr.s6_addr32,
124 tcp_hdr(skb)->dest,
125 tcp_hdr(skb)->source);
126 }
127
128 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
129 int addr_len)
130 {
131 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
132 struct inet_sock *inet = inet_sk(sk);
133 struct inet_connection_sock *icsk = inet_csk(sk);
134 struct ipv6_pinfo *np = inet6_sk(sk);
135 struct tcp_sock *tp = tcp_sk(sk);
136 struct in6_addr *saddr = NULL, *final_p, final;
137 struct rt6_info *rt;
138 struct flowi6 fl6;
139 struct dst_entry *dst;
140 int addr_type;
141 int err;
142
143 if (addr_len < SIN6_LEN_RFC2133)
144 return -EINVAL;
145
146 if (usin->sin6_family != AF_INET6)
147 return -EAFNOSUPPORT;
148
149 memset(&fl6, 0, sizeof(fl6));
150
151 if (np->sndflow) {
152 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
153 IP6_ECN_flow_init(fl6.flowlabel);
154 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
155 struct ip6_flowlabel *flowlabel;
156 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
157 if (flowlabel == NULL)
158 return -EINVAL;
159 fl6_sock_release(flowlabel);
160 }
161 }
162
163 /*
164 * connect() to INADDR_ANY means loopback (BSD'ism).
165 */
166
167 if (ipv6_addr_any(&usin->sin6_addr))
168 usin->sin6_addr.s6_addr[15] = 0x1;
169
170 addr_type = ipv6_addr_type(&usin->sin6_addr);
171
172 if (addr_type & IPV6_ADDR_MULTICAST)
173 return -ENETUNREACH;
174
175 if (addr_type&IPV6_ADDR_LINKLOCAL) {
176 if (addr_len >= sizeof(struct sockaddr_in6) &&
177 usin->sin6_scope_id) {
178 /* If interface is set while binding, indices
179 * must coincide.
180 */
181 if (sk->sk_bound_dev_if &&
182 sk->sk_bound_dev_if != usin->sin6_scope_id)
183 return -EINVAL;
184
185 sk->sk_bound_dev_if = usin->sin6_scope_id;
186 }
187
188 /* Connect to link-local address requires an interface */
189 if (!sk->sk_bound_dev_if)
190 return -EINVAL;
191 }
192
193 if (tp->rx_opt.ts_recent_stamp &&
194 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
195 tp->rx_opt.ts_recent = 0;
196 tp->rx_opt.ts_recent_stamp = 0;
197 tp->write_seq = 0;
198 }
199
200 sk->sk_v6_daddr = usin->sin6_addr;
201 np->flow_label = fl6.flowlabel;
202
203 ip6_set_txhash(sk);
204
205 /*
206 * TCP over IPv4
207 */
208
209 if (addr_type == IPV6_ADDR_MAPPED) {
210 u32 exthdrlen = icsk->icsk_ext_hdr_len;
211 struct sockaddr_in sin;
212
213 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
214
215 if (__ipv6_only_sock(sk))
216 return -ENETUNREACH;
217
218 sin.sin_family = AF_INET;
219 sin.sin_port = usin->sin6_port;
220 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
221
222 icsk->icsk_af_ops = &ipv6_mapped;
223 sk->sk_backlog_rcv = tcp_v4_do_rcv;
224 #ifdef CONFIG_TCP_MD5SIG
225 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
226 #endif
227
228 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
229
230 if (err) {
231 icsk->icsk_ext_hdr_len = exthdrlen;
232 icsk->icsk_af_ops = &ipv6_specific;
233 sk->sk_backlog_rcv = tcp_v6_do_rcv;
234 #ifdef CONFIG_TCP_MD5SIG
235 tp->af_specific = &tcp_sock_ipv6_specific;
236 #endif
237 goto failure;
238 } else {
239 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
240 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
241 &sk->sk_v6_rcv_saddr);
242 }
243
244 return err;
245 }
246
247 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
248 saddr = &sk->sk_v6_rcv_saddr;
249
250 fl6.flowi6_proto = IPPROTO_TCP;
251 fl6.daddr = sk->sk_v6_daddr;
252 fl6.saddr = saddr ? *saddr : np->saddr;
253 fl6.flowi6_oif = sk->sk_bound_dev_if;
254 fl6.flowi6_mark = sk->sk_mark;
255 fl6.fl6_dport = usin->sin6_port;
256 fl6.fl6_sport = inet->inet_sport;
257
258 final_p = fl6_update_dst(&fl6, np->opt, &final);
259
260 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
261
262 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
263 if (IS_ERR(dst)) {
264 err = PTR_ERR(dst);
265 goto failure;
266 }
267
268 if (saddr == NULL) {
269 saddr = &fl6.saddr;
270 sk->sk_v6_rcv_saddr = *saddr;
271 }
272
273 /* set the source address */
274 np->saddr = *saddr;
275 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
276
277 sk->sk_gso_type = SKB_GSO_TCPV6;
278 __ip6_dst_store(sk, dst, NULL, NULL);
279
280 rt = (struct rt6_info *) dst;
281 if (tcp_death_row.sysctl_tw_recycle &&
282 !tp->rx_opt.ts_recent_stamp &&
283 ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
284 tcp_fetch_timewait_stamp(sk, dst);
285
286 icsk->icsk_ext_hdr_len = 0;
287 if (np->opt)
288 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
289 np->opt->opt_nflen);
290
291 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
292
293 inet->inet_dport = usin->sin6_port;
294
295 tcp_set_state(sk, TCP_SYN_SENT);
296 err = inet6_hash_connect(&tcp_death_row, sk);
297 if (err)
298 goto late_failure;
299
300 if (!tp->write_seq && likely(!tp->repair))
301 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
302 sk->sk_v6_daddr.s6_addr32,
303 inet->inet_sport,
304 inet->inet_dport);
305
306 err = tcp_connect(sk);
307 if (err)
308 goto late_failure;
309
310 return 0;
311
312 late_failure:
313 tcp_set_state(sk, TCP_CLOSE);
314 __sk_dst_reset(sk);
315 failure:
316 inet->inet_dport = 0;
317 sk->sk_route_caps = 0;
318 return err;
319 }
320
321 static void tcp_v6_mtu_reduced(struct sock *sk)
322 {
323 struct dst_entry *dst;
324
325 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
326 return;
327
328 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
329 if (!dst)
330 return;
331
332 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
333 tcp_sync_mss(sk, dst_mtu(dst));
334 tcp_simple_retransmit(sk);
335 }
336 }
337
338 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
339 u8 type, u8 code, int offset, __be32 info)
340 {
341 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
342 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
343 struct ipv6_pinfo *np;
344 struct sock *sk;
345 int err;
346 struct tcp_sock *tp;
347 struct request_sock *fastopen;
348 __u32 seq, snd_una;
349 struct net *net = dev_net(skb->dev);
350
351 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
352 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
353
354 if (sk == NULL) {
355 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
356 ICMP6_MIB_INERRORS);
357 return;
358 }
359
360 if (sk->sk_state == TCP_TIME_WAIT) {
361 inet_twsk_put(inet_twsk(sk));
362 return;
363 }
364
365 bh_lock_sock(sk);
366 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
367 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
368
369 if (sk->sk_state == TCP_CLOSE)
370 goto out;
371
372 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
373 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
374 goto out;
375 }
376
377 tp = tcp_sk(sk);
378 seq = ntohl(th->seq);
379 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
380 fastopen = tp->fastopen_rsk;
381 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
382 if (sk->sk_state != TCP_LISTEN &&
383 !between(seq, snd_una, tp->snd_nxt)) {
384 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
385 goto out;
386 }
387
388 np = inet6_sk(sk);
389
390 if (type == NDISC_REDIRECT) {
391 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
392
393 if (dst)
394 dst->ops->redirect(dst, sk, skb);
395 goto out;
396 }
397
398 if (type == ICMPV6_PKT_TOOBIG) {
399 /* We are not interested in TCP_LISTEN and open_requests
400 * (SYN-ACKs send out by Linux are always <576bytes so
401 * they should go through unfragmented).
402 */
403 if (sk->sk_state == TCP_LISTEN)
404 goto out;
405
406 if (!ip6_sk_accept_pmtu(sk))
407 goto out;
408
409 tp->mtu_info = ntohl(info);
410 if (!sock_owned_by_user(sk))
411 tcp_v6_mtu_reduced(sk);
412 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
413 &tp->tsq_flags))
414 sock_hold(sk);
415 goto out;
416 }
417
418 icmpv6_err_convert(type, code, &err);
419
420 /* Might be for an request_sock */
421 switch (sk->sk_state) {
422 struct request_sock *req, **prev;
423 case TCP_LISTEN:
424 if (sock_owned_by_user(sk))
425 goto out;
426
427 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
428 &hdr->saddr, inet6_iif(skb));
429 if (!req)
430 goto out;
431
432 /* ICMPs are not backlogged, hence we cannot get
433 * an established socket here.
434 */
435 WARN_ON(req->sk != NULL);
436
437 if (seq != tcp_rsk(req)->snt_isn) {
438 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
439 goto out;
440 }
441
442 inet_csk_reqsk_queue_drop(sk, req, prev);
443 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
444 goto out;
445
446 case TCP_SYN_SENT:
447 case TCP_SYN_RECV:
448 /* Only in fast or simultaneous open. If a fast open socket is
449 * is already accepted it is treated as a connected one below.
450 */
451 if (fastopen && fastopen->sk == NULL)
452 break;
453
454 if (!sock_owned_by_user(sk)) {
455 sk->sk_err = err;
456 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
457
458 tcp_done(sk);
459 } else
460 sk->sk_err_soft = err;
461 goto out;
462 }
463
464 if (!sock_owned_by_user(sk) && np->recverr) {
465 sk->sk_err = err;
466 sk->sk_error_report(sk);
467 } else
468 sk->sk_err_soft = err;
469
470 out:
471 bh_unlock_sock(sk);
472 sock_put(sk);
473 }
474
475
476 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
477 struct flowi *fl,
478 struct request_sock *req,
479 u16 queue_mapping,
480 struct tcp_fastopen_cookie *foc)
481 {
482 struct inet_request_sock *ireq = inet_rsk(req);
483 struct ipv6_pinfo *np = inet6_sk(sk);
484 struct flowi6 *fl6 = &fl->u.ip6;
485 struct sk_buff *skb;
486 int err = -ENOMEM;
487
488 /* First, grab a route. */
489 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
490 goto done;
491
492 skb = tcp_make_synack(sk, dst, req, foc);
493
494 if (skb) {
495 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
496 &ireq->ir_v6_rmt_addr);
497
498 fl6->daddr = ireq->ir_v6_rmt_addr;
499 if (np->repflow && (ireq->pktopts != NULL))
500 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
501
502 skb_set_queue_mapping(skb, queue_mapping);
503 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
504 err = net_xmit_eval(err);
505 }
506
507 done:
508 return err;
509 }
510
511
512 static void tcp_v6_reqsk_destructor(struct request_sock *req)
513 {
514 kfree_skb(inet_rsk(req)->pktopts);
515 }
516
517 #ifdef CONFIG_TCP_MD5SIG
518 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
519 const struct in6_addr *addr)
520 {
521 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
522 }
523
524 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
525 struct sock *addr_sk)
526 {
527 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
528 }
529
530 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
531 struct request_sock *req)
532 {
533 return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
534 }
535
536 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
537 int optlen)
538 {
539 struct tcp_md5sig cmd;
540 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
541
542 if (optlen < sizeof(cmd))
543 return -EINVAL;
544
545 if (copy_from_user(&cmd, optval, sizeof(cmd)))
546 return -EFAULT;
547
548 if (sin6->sin6_family != AF_INET6)
549 return -EINVAL;
550
551 if (!cmd.tcpm_keylen) {
552 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
553 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
554 AF_INET);
555 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
556 AF_INET6);
557 }
558
559 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
560 return -EINVAL;
561
562 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
563 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
564 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
565
566 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
567 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
568 }
569
570 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
571 const struct in6_addr *daddr,
572 const struct in6_addr *saddr, int nbytes)
573 {
574 struct tcp6_pseudohdr *bp;
575 struct scatterlist sg;
576
577 bp = &hp->md5_blk.ip6;
578 /* 1. TCP pseudo-header (RFC2460) */
579 bp->saddr = *saddr;
580 bp->daddr = *daddr;
581 bp->protocol = cpu_to_be32(IPPROTO_TCP);
582 bp->len = cpu_to_be32(nbytes);
583
584 sg_init_one(&sg, bp, sizeof(*bp));
585 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
586 }
587
588 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
589 const struct in6_addr *daddr, struct in6_addr *saddr,
590 const struct tcphdr *th)
591 {
592 struct tcp_md5sig_pool *hp;
593 struct hash_desc *desc;
594
595 hp = tcp_get_md5sig_pool();
596 if (!hp)
597 goto clear_hash_noput;
598 desc = &hp->md5_desc;
599
600 if (crypto_hash_init(desc))
601 goto clear_hash;
602 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
603 goto clear_hash;
604 if (tcp_md5_hash_header(hp, th))
605 goto clear_hash;
606 if (tcp_md5_hash_key(hp, key))
607 goto clear_hash;
608 if (crypto_hash_final(desc, md5_hash))
609 goto clear_hash;
610
611 tcp_put_md5sig_pool();
612 return 0;
613
614 clear_hash:
615 tcp_put_md5sig_pool();
616 clear_hash_noput:
617 memset(md5_hash, 0, 16);
618 return 1;
619 }
620
621 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
622 const struct sock *sk,
623 const struct request_sock *req,
624 const struct sk_buff *skb)
625 {
626 const struct in6_addr *saddr, *daddr;
627 struct tcp_md5sig_pool *hp;
628 struct hash_desc *desc;
629 const struct tcphdr *th = tcp_hdr(skb);
630
631 if (sk) {
632 saddr = &inet6_sk(sk)->saddr;
633 daddr = &sk->sk_v6_daddr;
634 } else if (req) {
635 saddr = &inet_rsk(req)->ir_v6_loc_addr;
636 daddr = &inet_rsk(req)->ir_v6_rmt_addr;
637 } else {
638 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
639 saddr = &ip6h->saddr;
640 daddr = &ip6h->daddr;
641 }
642
643 hp = tcp_get_md5sig_pool();
644 if (!hp)
645 goto clear_hash_noput;
646 desc = &hp->md5_desc;
647
648 if (crypto_hash_init(desc))
649 goto clear_hash;
650
651 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
652 goto clear_hash;
653 if (tcp_md5_hash_header(hp, th))
654 goto clear_hash;
655 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
656 goto clear_hash;
657 if (tcp_md5_hash_key(hp, key))
658 goto clear_hash;
659 if (crypto_hash_final(desc, md5_hash))
660 goto clear_hash;
661
662 tcp_put_md5sig_pool();
663 return 0;
664
665 clear_hash:
666 tcp_put_md5sig_pool();
667 clear_hash_noput:
668 memset(md5_hash, 0, 16);
669 return 1;
670 }
671
672 static int __tcp_v6_inbound_md5_hash(struct sock *sk,
673 const struct sk_buff *skb)
674 {
675 const __u8 *hash_location = NULL;
676 struct tcp_md5sig_key *hash_expected;
677 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
678 const struct tcphdr *th = tcp_hdr(skb);
679 int genhash;
680 u8 newhash[16];
681
682 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
683 hash_location = tcp_parse_md5sig_option(th);
684
685 /* We've parsed the options - do we have a hash? */
686 if (!hash_expected && !hash_location)
687 return 0;
688
689 if (hash_expected && !hash_location) {
690 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
691 return 1;
692 }
693
694 if (!hash_expected && hash_location) {
695 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
696 return 1;
697 }
698
699 /* check the signature */
700 genhash = tcp_v6_md5_hash_skb(newhash,
701 hash_expected,
702 NULL, NULL, skb);
703
704 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
705 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
706 genhash ? "failed" : "mismatch",
707 &ip6h->saddr, ntohs(th->source),
708 &ip6h->daddr, ntohs(th->dest));
709 return 1;
710 }
711 return 0;
712 }
713
714 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
715 {
716 int ret;
717
718 rcu_read_lock();
719 ret = __tcp_v6_inbound_md5_hash(sk, skb);
720 rcu_read_unlock();
721
722 return ret;
723 }
724
725 #endif
726
727 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
728 struct sk_buff *skb)
729 {
730 struct inet_request_sock *ireq = inet_rsk(req);
731 struct ipv6_pinfo *np = inet6_sk(sk);
732
733 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
734 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
735
736 ireq->ir_iif = sk->sk_bound_dev_if;
737
738 /* So that link locals have meaning */
739 if (!sk->sk_bound_dev_if &&
740 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
741 ireq->ir_iif = inet6_iif(skb);
742
743 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
744 (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
745 np->rxopt.bits.rxinfo ||
746 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
747 np->rxopt.bits.rxohlim || np->repflow)) {
748 atomic_inc(&skb->users);
749 ireq->pktopts = skb;
750 }
751 }
752
753 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
754 const struct request_sock *req,
755 bool *strict)
756 {
757 if (strict)
758 *strict = true;
759 return inet6_csk_route_req(sk, &fl->u.ip6, req);
760 }
761
762 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
763 .family = AF_INET6,
764 .obj_size = sizeof(struct tcp6_request_sock),
765 .rtx_syn_ack = tcp_rtx_synack,
766 .send_ack = tcp_v6_reqsk_send_ack,
767 .destructor = tcp_v6_reqsk_destructor,
768 .send_reset = tcp_v6_send_reset,
769 .syn_ack_timeout = tcp_syn_ack_timeout,
770 };
771
772 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
773 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
774 sizeof(struct ipv6hdr),
775 #ifdef CONFIG_TCP_MD5SIG
776 .md5_lookup = tcp_v6_reqsk_md5_lookup,
777 .calc_md5_hash = tcp_v6_md5_hash_skb,
778 #endif
779 .init_req = tcp_v6_init_req,
780 #ifdef CONFIG_SYN_COOKIES
781 .cookie_init_seq = cookie_v6_init_sequence,
782 #endif
783 .route_req = tcp_v6_route_req,
784 .init_seq = tcp_v6_init_sequence,
785 .send_synack = tcp_v6_send_synack,
786 .queue_hash_add = inet6_csk_reqsk_queue_hash_add,
787 };
788
789 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
790 u32 tsval, u32 tsecr, int oif,
791 struct tcp_md5sig_key *key, int rst, u8 tclass,
792 u32 label)
793 {
794 const struct tcphdr *th = tcp_hdr(skb);
795 struct tcphdr *t1;
796 struct sk_buff *buff;
797 struct flowi6 fl6;
798 struct net *net = dev_net(skb_dst(skb)->dev);
799 struct sock *ctl_sk = net->ipv6.tcp_sk;
800 unsigned int tot_len = sizeof(struct tcphdr);
801 struct dst_entry *dst;
802 __be32 *topt;
803
804 if (tsecr)
805 tot_len += TCPOLEN_TSTAMP_ALIGNED;
806 #ifdef CONFIG_TCP_MD5SIG
807 if (key)
808 tot_len += TCPOLEN_MD5SIG_ALIGNED;
809 #endif
810
811 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
812 GFP_ATOMIC);
813 if (buff == NULL)
814 return;
815
816 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
817
818 t1 = (struct tcphdr *) skb_push(buff, tot_len);
819 skb_reset_transport_header(buff);
820
821 /* Swap the send and the receive. */
822 memset(t1, 0, sizeof(*t1));
823 t1->dest = th->source;
824 t1->source = th->dest;
825 t1->doff = tot_len / 4;
826 t1->seq = htonl(seq);
827 t1->ack_seq = htonl(ack);
828 t1->ack = !rst || !th->ack;
829 t1->rst = rst;
830 t1->window = htons(win);
831
832 topt = (__be32 *)(t1 + 1);
833
834 if (tsecr) {
835 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
836 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
837 *topt++ = htonl(tsval);
838 *topt++ = htonl(tsecr);
839 }
840
841 #ifdef CONFIG_TCP_MD5SIG
842 if (key) {
843 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
844 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
845 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
846 &ipv6_hdr(skb)->saddr,
847 &ipv6_hdr(skb)->daddr, t1);
848 }
849 #endif
850
851 memset(&fl6, 0, sizeof(fl6));
852 fl6.daddr = ipv6_hdr(skb)->saddr;
853 fl6.saddr = ipv6_hdr(skb)->daddr;
854 fl6.flowlabel = label;
855
856 buff->ip_summed = CHECKSUM_PARTIAL;
857 buff->csum = 0;
858
859 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
860
861 fl6.flowi6_proto = IPPROTO_TCP;
862 if (rt6_need_strict(&fl6.daddr) && !oif)
863 fl6.flowi6_oif = inet6_iif(skb);
864 else
865 fl6.flowi6_oif = oif;
866 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
867 fl6.fl6_dport = t1->dest;
868 fl6.fl6_sport = t1->source;
869 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
870
871 /* Pass a socket to ip6_dst_lookup either it is for RST
872 * Underlying function will use this to retrieve the network
873 * namespace
874 */
875 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
876 if (!IS_ERR(dst)) {
877 skb_dst_set(buff, dst);
878 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
879 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
880 if (rst)
881 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
882 return;
883 }
884
885 kfree_skb(buff);
886 }
887
888 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
889 {
890 const struct tcphdr *th = tcp_hdr(skb);
891 u32 seq = 0, ack_seq = 0;
892 struct tcp_md5sig_key *key = NULL;
893 #ifdef CONFIG_TCP_MD5SIG
894 const __u8 *hash_location = NULL;
895 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
896 unsigned char newhash[16];
897 int genhash;
898 struct sock *sk1 = NULL;
899 #endif
900 int oif;
901
902 if (th->rst)
903 return;
904
905 if (!ipv6_unicast_destination(skb))
906 return;
907
908 #ifdef CONFIG_TCP_MD5SIG
909 hash_location = tcp_parse_md5sig_option(th);
910 if (!sk && hash_location) {
911 /*
912 * active side is lost. Try to find listening socket through
913 * source port, and then find md5 key through listening socket.
914 * we are not loose security here:
915 * Incoming packet is checked with md5 hash with finding key,
916 * no RST generated if md5 hash doesn't match.
917 */
918 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
919 &tcp_hashinfo, &ipv6h->saddr,
920 th->source, &ipv6h->daddr,
921 ntohs(th->source), inet6_iif(skb));
922 if (!sk1)
923 return;
924
925 rcu_read_lock();
926 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
927 if (!key)
928 goto release_sk1;
929
930 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
931 if (genhash || memcmp(hash_location, newhash, 16) != 0)
932 goto release_sk1;
933 } else {
934 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
935 }
936 #endif
937
938 if (th->ack)
939 seq = ntohl(th->ack_seq);
940 else
941 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
942 (th->doff << 2);
943
944 oif = sk ? sk->sk_bound_dev_if : 0;
945 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
946
947 #ifdef CONFIG_TCP_MD5SIG
948 release_sk1:
949 if (sk1) {
950 rcu_read_unlock();
951 sock_put(sk1);
952 }
953 #endif
954 }
955
956 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
957 u32 win, u32 tsval, u32 tsecr, int oif,
958 struct tcp_md5sig_key *key, u8 tclass,
959 u32 label)
960 {
961 tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, oif, key, 0, tclass,
962 label);
963 }
964
965 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
966 {
967 struct inet_timewait_sock *tw = inet_twsk(sk);
968 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
969
970 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
971 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
972 tcp_time_stamp + tcptw->tw_ts_offset,
973 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
974 tw->tw_tclass, (tw->tw_flowlabel << 12));
975
976 inet_twsk_put(tw);
977 }
978
979 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
980 struct request_sock *req)
981 {
982 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
983 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
984 */
985 tcp_v6_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
986 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
987 tcp_rsk(req)->rcv_nxt,
988 req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
989 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
990 0, 0);
991 }
992
993
994 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
995 {
996 struct request_sock *req, **prev;
997 const struct tcphdr *th = tcp_hdr(skb);
998 struct sock *nsk;
999
1000 /* Find possible connection requests. */
1001 req = inet6_csk_search_req(sk, &prev, th->source,
1002 &ipv6_hdr(skb)->saddr,
1003 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1004 if (req)
1005 return tcp_check_req(sk, skb, req, prev, false);
1006
1007 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1008 &ipv6_hdr(skb)->saddr, th->source,
1009 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1010
1011 if (nsk) {
1012 if (nsk->sk_state != TCP_TIME_WAIT) {
1013 bh_lock_sock(nsk);
1014 return nsk;
1015 }
1016 inet_twsk_put(inet_twsk(nsk));
1017 return NULL;
1018 }
1019
1020 #ifdef CONFIG_SYN_COOKIES
1021 if (!th->syn)
1022 sk = cookie_v6_check(sk, skb);
1023 #endif
1024 return sk;
1025 }
1026
1027 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1028 {
1029 if (skb->protocol == htons(ETH_P_IP))
1030 return tcp_v4_conn_request(sk, skb);
1031
1032 if (!ipv6_unicast_destination(skb))
1033 goto drop;
1034
1035 return tcp_conn_request(&tcp6_request_sock_ops,
1036 &tcp_request_sock_ipv6_ops, sk, skb);
1037
1038 drop:
1039 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1040 return 0; /* don't send reset */
1041 }
1042
1043 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1044 struct request_sock *req,
1045 struct dst_entry *dst)
1046 {
1047 struct inet_request_sock *ireq;
1048 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1049 struct tcp6_sock *newtcp6sk;
1050 struct inet_sock *newinet;
1051 struct tcp_sock *newtp;
1052 struct sock *newsk;
1053 #ifdef CONFIG_TCP_MD5SIG
1054 struct tcp_md5sig_key *key;
1055 #endif
1056 struct flowi6 fl6;
1057
1058 if (skb->protocol == htons(ETH_P_IP)) {
1059 /*
1060 * v6 mapped
1061 */
1062
1063 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1064
1065 if (newsk == NULL)
1066 return NULL;
1067
1068 newtcp6sk = (struct tcp6_sock *)newsk;
1069 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1070
1071 newinet = inet_sk(newsk);
1072 newnp = inet6_sk(newsk);
1073 newtp = tcp_sk(newsk);
1074
1075 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1076
1077 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
1078
1079 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1080
1081 newsk->sk_v6_rcv_saddr = newnp->saddr;
1082
1083 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1084 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1085 #ifdef CONFIG_TCP_MD5SIG
1086 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1087 #endif
1088
1089 newnp->ipv6_ac_list = NULL;
1090 newnp->ipv6_fl_list = NULL;
1091 newnp->pktoptions = NULL;
1092 newnp->opt = NULL;
1093 newnp->mcast_oif = inet6_iif(skb);
1094 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1095 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1096 if (np->repflow)
1097 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1098
1099 /*
1100 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1101 * here, tcp_create_openreq_child now does this for us, see the comment in
1102 * that function for the gory details. -acme
1103 */
1104
1105 /* It is tricky place. Until this moment IPv4 tcp
1106 worked with IPv6 icsk.icsk_af_ops.
1107 Sync it now.
1108 */
1109 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1110
1111 return newsk;
1112 }
1113
1114 ireq = inet_rsk(req);
1115
1116 if (sk_acceptq_is_full(sk))
1117 goto out_overflow;
1118
1119 if (!dst) {
1120 dst = inet6_csk_route_req(sk, &fl6, req);
1121 if (!dst)
1122 goto out;
1123 }
1124
1125 newsk = tcp_create_openreq_child(sk, req, skb);
1126 if (newsk == NULL)
1127 goto out_nonewsk;
1128
1129 /*
1130 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1131 * count here, tcp_create_openreq_child now does this for us, see the
1132 * comment in that function for the gory details. -acme
1133 */
1134
1135 newsk->sk_gso_type = SKB_GSO_TCPV6;
1136 __ip6_dst_store(newsk, dst, NULL, NULL);
1137 inet6_sk_rx_dst_set(newsk, skb);
1138
1139 newtcp6sk = (struct tcp6_sock *)newsk;
1140 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1141
1142 newtp = tcp_sk(newsk);
1143 newinet = inet_sk(newsk);
1144 newnp = inet6_sk(newsk);
1145
1146 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1147
1148 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1149 newnp->saddr = ireq->ir_v6_loc_addr;
1150 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1151 newsk->sk_bound_dev_if = ireq->ir_iif;
1152
1153 ip6_set_txhash(newsk);
1154
1155 /* Now IPv6 options...
1156
1157 First: no IPv4 options.
1158 */
1159 newinet->inet_opt = NULL;
1160 newnp->ipv6_ac_list = NULL;
1161 newnp->ipv6_fl_list = NULL;
1162
1163 /* Clone RX bits */
1164 newnp->rxopt.all = np->rxopt.all;
1165
1166 /* Clone pktoptions received with SYN */
1167 newnp->pktoptions = NULL;
1168 if (ireq->pktopts != NULL) {
1169 newnp->pktoptions = skb_clone(ireq->pktopts,
1170 sk_gfp_atomic(sk, GFP_ATOMIC));
1171 consume_skb(ireq->pktopts);
1172 ireq->pktopts = NULL;
1173 if (newnp->pktoptions)
1174 skb_set_owner_r(newnp->pktoptions, newsk);
1175 }
1176 newnp->opt = NULL;
1177 newnp->mcast_oif = inet6_iif(skb);
1178 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1179 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1180 if (np->repflow)
1181 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1182
1183 /* Clone native IPv6 options from listening socket (if any)
1184
1185 Yes, keeping reference count would be much more clever,
1186 but we make one more one thing there: reattach optmem
1187 to newsk.
1188 */
1189 if (np->opt)
1190 newnp->opt = ipv6_dup_options(newsk, np->opt);
1191
1192 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1193 if (newnp->opt)
1194 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1195 newnp->opt->opt_flen);
1196
1197 tcp_sync_mss(newsk, dst_mtu(dst));
1198 newtp->advmss = dst_metric_advmss(dst);
1199 if (tcp_sk(sk)->rx_opt.user_mss &&
1200 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1201 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1202
1203 tcp_initialize_rcv_mss(newsk);
1204
1205 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1206 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1207
1208 #ifdef CONFIG_TCP_MD5SIG
1209 /* Copy over the MD5 key from the original socket */
1210 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1211 if (key != NULL) {
1212 /* We're using one, so create a matching key
1213 * on the newsk structure. If we fail to get
1214 * memory, then we end up not copying the key
1215 * across. Shucks.
1216 */
1217 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1218 AF_INET6, key->key, key->keylen,
1219 sk_gfp_atomic(sk, GFP_ATOMIC));
1220 }
1221 #endif
1222
1223 if (__inet_inherit_port(sk, newsk) < 0) {
1224 inet_csk_prepare_forced_close(newsk);
1225 tcp_done(newsk);
1226 goto out;
1227 }
1228 __inet6_hash(newsk, NULL);
1229
1230 return newsk;
1231
1232 out_overflow:
1233 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1234 out_nonewsk:
1235 dst_release(dst);
1236 out:
1237 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1238 return NULL;
1239 }
1240
1241 /* The socket must have it's spinlock held when we get
1242 * here.
1243 *
1244 * We have a potential double-lock case here, so even when
1245 * doing backlog processing we use the BH locking scheme.
1246 * This is because we cannot sleep with the original spinlock
1247 * held.
1248 */
1249 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1250 {
1251 struct ipv6_pinfo *np = inet6_sk(sk);
1252 struct tcp_sock *tp;
1253 struct sk_buff *opt_skb = NULL;
1254
1255 /* Imagine: socket is IPv6. IPv4 packet arrives,
1256 goes to IPv4 receive handler and backlogged.
1257 From backlog it always goes here. Kerboom...
1258 Fortunately, tcp_rcv_established and rcv_established
1259 handle them correctly, but it is not case with
1260 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1261 */
1262
1263 if (skb->protocol == htons(ETH_P_IP))
1264 return tcp_v4_do_rcv(sk, skb);
1265
1266 if (sk_filter(sk, skb))
1267 goto discard;
1268
1269 /*
1270 * socket locking is here for SMP purposes as backlog rcv
1271 * is currently called with bh processing disabled.
1272 */
1273
1274 /* Do Stevens' IPV6_PKTOPTIONS.
1275
1276 Yes, guys, it is the only place in our code, where we
1277 may make it not affecting IPv4.
1278 The rest of code is protocol independent,
1279 and I do not like idea to uglify IPv4.
1280
1281 Actually, all the idea behind IPV6_PKTOPTIONS
1282 looks not very well thought. For now we latch
1283 options, received in the last packet, enqueued
1284 by tcp. Feel free to propose better solution.
1285 --ANK (980728)
1286 */
1287 if (np->rxopt.all)
1288 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1289
1290 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1291 struct dst_entry *dst = sk->sk_rx_dst;
1292
1293 sock_rps_save_rxhash(sk, skb);
1294 if (dst) {
1295 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1296 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1297 dst_release(dst);
1298 sk->sk_rx_dst = NULL;
1299 }
1300 }
1301
1302 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1303 if (opt_skb)
1304 goto ipv6_pktoptions;
1305 return 0;
1306 }
1307
1308 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1309 goto csum_err;
1310
1311 if (sk->sk_state == TCP_LISTEN) {
1312 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1313 if (!nsk)
1314 goto discard;
1315
1316 /*
1317 * Queue it on the new socket if the new socket is active,
1318 * otherwise we just shortcircuit this and continue with
1319 * the new socket..
1320 */
1321 if (nsk != sk) {
1322 sock_rps_save_rxhash(nsk, skb);
1323 if (tcp_child_process(sk, nsk, skb))
1324 goto reset;
1325 if (opt_skb)
1326 __kfree_skb(opt_skb);
1327 return 0;
1328 }
1329 } else
1330 sock_rps_save_rxhash(sk, skb);
1331
1332 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1333 goto reset;
1334 if (opt_skb)
1335 goto ipv6_pktoptions;
1336 return 0;
1337
1338 reset:
1339 tcp_v6_send_reset(sk, skb);
1340 discard:
1341 if (opt_skb)
1342 __kfree_skb(opt_skb);
1343 kfree_skb(skb);
1344 return 0;
1345 csum_err:
1346 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1347 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1348 goto discard;
1349
1350
1351 ipv6_pktoptions:
1352 /* Do you ask, what is it?
1353
1354 1. skb was enqueued by tcp.
1355 2. skb is added to tail of read queue, rather than out of order.
1356 3. socket is not in passive state.
1357 4. Finally, it really contains options, which user wants to receive.
1358 */
1359 tp = tcp_sk(sk);
1360 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1361 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1362 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1363 np->mcast_oif = inet6_iif(opt_skb);
1364 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1365 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1366 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1367 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1368 if (np->repflow)
1369 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1370 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1371 skb_set_owner_r(opt_skb, sk);
1372 opt_skb = xchg(&np->pktoptions, opt_skb);
1373 } else {
1374 __kfree_skb(opt_skb);
1375 opt_skb = xchg(&np->pktoptions, NULL);
1376 }
1377 }
1378
1379 kfree_skb(opt_skb);
1380 return 0;
1381 }
1382
1383 static int tcp_v6_rcv(struct sk_buff *skb)
1384 {
1385 const struct tcphdr *th;
1386 const struct ipv6hdr *hdr;
1387 struct sock *sk;
1388 int ret;
1389 struct net *net = dev_net(skb->dev);
1390
1391 if (skb->pkt_type != PACKET_HOST)
1392 goto discard_it;
1393
1394 /*
1395 * Count it even if it's bad.
1396 */
1397 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1398
1399 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1400 goto discard_it;
1401
1402 th = tcp_hdr(skb);
1403
1404 if (th->doff < sizeof(struct tcphdr)/4)
1405 goto bad_packet;
1406 if (!pskb_may_pull(skb, th->doff*4))
1407 goto discard_it;
1408
1409 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1410 goto csum_error;
1411
1412 th = tcp_hdr(skb);
1413 hdr = ipv6_hdr(skb);
1414 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1415 * barrier() makes sure compiler wont play fool^Waliasing games.
1416 */
1417 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1418 sizeof(struct inet6_skb_parm));
1419 barrier();
1420
1421 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1422 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1423 skb->len - th->doff*4);
1424 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1425 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1426 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1427 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1428 TCP_SKB_CB(skb)->sacked = 0;
1429
1430 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1431 if (!sk)
1432 goto no_tcp_socket;
1433
1434 process:
1435 if (sk->sk_state == TCP_TIME_WAIT)
1436 goto do_time_wait;
1437
1438 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1439 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1440 goto discard_and_relse;
1441 }
1442
1443 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1444 goto discard_and_relse;
1445
1446 #ifdef CONFIG_TCP_MD5SIG
1447 if (tcp_v6_inbound_md5_hash(sk, skb))
1448 goto discard_and_relse;
1449 #endif
1450
1451 if (sk_filter(sk, skb))
1452 goto discard_and_relse;
1453
1454 sk_mark_napi_id(sk, skb);
1455 skb->dev = NULL;
1456
1457 bh_lock_sock_nested(sk);
1458 ret = 0;
1459 if (!sock_owned_by_user(sk)) {
1460 if (!tcp_prequeue(sk, skb))
1461 ret = tcp_v6_do_rcv(sk, skb);
1462 } else if (unlikely(sk_add_backlog(sk, skb,
1463 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1464 bh_unlock_sock(sk);
1465 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1466 goto discard_and_relse;
1467 }
1468 bh_unlock_sock(sk);
1469
1470 sock_put(sk);
1471 return ret ? -1 : 0;
1472
1473 no_tcp_socket:
1474 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1475 goto discard_it;
1476
1477 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1478 csum_error:
1479 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1480 bad_packet:
1481 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1482 } else {
1483 tcp_v6_send_reset(NULL, skb);
1484 }
1485
1486 discard_it:
1487 kfree_skb(skb);
1488 return 0;
1489
1490 discard_and_relse:
1491 sock_put(sk);
1492 goto discard_it;
1493
1494 do_time_wait:
1495 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1496 inet_twsk_put(inet_twsk(sk));
1497 goto discard_it;
1498 }
1499
1500 if (skb->len < (th->doff<<2)) {
1501 inet_twsk_put(inet_twsk(sk));
1502 goto bad_packet;
1503 }
1504 if (tcp_checksum_complete(skb)) {
1505 inet_twsk_put(inet_twsk(sk));
1506 goto csum_error;
1507 }
1508
1509 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1510 case TCP_TW_SYN:
1511 {
1512 struct sock *sk2;
1513
1514 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1515 &ipv6_hdr(skb)->saddr, th->source,
1516 &ipv6_hdr(skb)->daddr,
1517 ntohs(th->dest), inet6_iif(skb));
1518 if (sk2 != NULL) {
1519 struct inet_timewait_sock *tw = inet_twsk(sk);
1520 inet_twsk_deschedule(tw, &tcp_death_row);
1521 inet_twsk_put(tw);
1522 sk = sk2;
1523 goto process;
1524 }
1525 /* Fall through to ACK */
1526 }
1527 case TCP_TW_ACK:
1528 tcp_v6_timewait_ack(sk, skb);
1529 break;
1530 case TCP_TW_RST:
1531 goto no_tcp_socket;
1532 case TCP_TW_SUCCESS:
1533 ;
1534 }
1535 goto discard_it;
1536 }
1537
1538 static void tcp_v6_early_demux(struct sk_buff *skb)
1539 {
1540 const struct ipv6hdr *hdr;
1541 const struct tcphdr *th;
1542 struct sock *sk;
1543
1544 if (skb->pkt_type != PACKET_HOST)
1545 return;
1546
1547 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1548 return;
1549
1550 hdr = ipv6_hdr(skb);
1551 th = tcp_hdr(skb);
1552
1553 if (th->doff < sizeof(struct tcphdr) / 4)
1554 return;
1555
1556 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1557 &hdr->saddr, th->source,
1558 &hdr->daddr, ntohs(th->dest),
1559 inet6_iif(skb));
1560 if (sk) {
1561 skb->sk = sk;
1562 skb->destructor = sock_edemux;
1563 if (sk->sk_state != TCP_TIME_WAIT) {
1564 struct dst_entry *dst = sk->sk_rx_dst;
1565
1566 if (dst)
1567 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1568 if (dst &&
1569 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1570 skb_dst_set_noref(skb, dst);
1571 }
1572 }
1573 }
1574
1575 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1576 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1577 .twsk_unique = tcp_twsk_unique,
1578 .twsk_destructor = tcp_twsk_destructor,
1579 };
1580
1581 static const struct inet_connection_sock_af_ops ipv6_specific = {
1582 .queue_xmit = inet6_csk_xmit,
1583 .send_check = tcp_v6_send_check,
1584 .rebuild_header = inet6_sk_rebuild_header,
1585 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1586 .conn_request = tcp_v6_conn_request,
1587 .syn_recv_sock = tcp_v6_syn_recv_sock,
1588 .net_header_len = sizeof(struct ipv6hdr),
1589 .net_frag_header_len = sizeof(struct frag_hdr),
1590 .setsockopt = ipv6_setsockopt,
1591 .getsockopt = ipv6_getsockopt,
1592 .addr2sockaddr = inet6_csk_addr2sockaddr,
1593 .sockaddr_len = sizeof(struct sockaddr_in6),
1594 .bind_conflict = inet6_csk_bind_conflict,
1595 #ifdef CONFIG_COMPAT
1596 .compat_setsockopt = compat_ipv6_setsockopt,
1597 .compat_getsockopt = compat_ipv6_getsockopt,
1598 #endif
1599 .mtu_reduced = tcp_v6_mtu_reduced,
1600 };
1601
1602 #ifdef CONFIG_TCP_MD5SIG
1603 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1604 .md5_lookup = tcp_v6_md5_lookup,
1605 .calc_md5_hash = tcp_v6_md5_hash_skb,
1606 .md5_parse = tcp_v6_parse_md5_keys,
1607 };
1608 #endif
1609
1610 /*
1611 * TCP over IPv4 via INET6 API
1612 */
1613 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1614 .queue_xmit = ip_queue_xmit,
1615 .send_check = tcp_v4_send_check,
1616 .rebuild_header = inet_sk_rebuild_header,
1617 .sk_rx_dst_set = inet_sk_rx_dst_set,
1618 .conn_request = tcp_v6_conn_request,
1619 .syn_recv_sock = tcp_v6_syn_recv_sock,
1620 .net_header_len = sizeof(struct iphdr),
1621 .setsockopt = ipv6_setsockopt,
1622 .getsockopt = ipv6_getsockopt,
1623 .addr2sockaddr = inet6_csk_addr2sockaddr,
1624 .sockaddr_len = sizeof(struct sockaddr_in6),
1625 .bind_conflict = inet6_csk_bind_conflict,
1626 #ifdef CONFIG_COMPAT
1627 .compat_setsockopt = compat_ipv6_setsockopt,
1628 .compat_getsockopt = compat_ipv6_getsockopt,
1629 #endif
1630 .mtu_reduced = tcp_v4_mtu_reduced,
1631 };
1632
1633 #ifdef CONFIG_TCP_MD5SIG
1634 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1635 .md5_lookup = tcp_v4_md5_lookup,
1636 .calc_md5_hash = tcp_v4_md5_hash_skb,
1637 .md5_parse = tcp_v6_parse_md5_keys,
1638 };
1639 #endif
1640
1641 /* NOTE: A lot of things set to zero explicitly by call to
1642 * sk_alloc() so need not be done here.
1643 */
1644 static int tcp_v6_init_sock(struct sock *sk)
1645 {
1646 struct inet_connection_sock *icsk = inet_csk(sk);
1647
1648 tcp_init_sock(sk);
1649
1650 icsk->icsk_af_ops = &ipv6_specific;
1651
1652 #ifdef CONFIG_TCP_MD5SIG
1653 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1654 #endif
1655
1656 return 0;
1657 }
1658
1659 static void tcp_v6_destroy_sock(struct sock *sk)
1660 {
1661 tcp_v4_destroy_sock(sk);
1662 inet6_destroy_sock(sk);
1663 }
1664
1665 #ifdef CONFIG_PROC_FS
1666 /* Proc filesystem TCPv6 sock list dumping. */
1667 static void get_openreq6(struct seq_file *seq,
1668 const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1669 {
1670 int ttd = req->expires - jiffies;
1671 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1672 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1673
1674 if (ttd < 0)
1675 ttd = 0;
1676
1677 seq_printf(seq,
1678 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1679 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1680 i,
1681 src->s6_addr32[0], src->s6_addr32[1],
1682 src->s6_addr32[2], src->s6_addr32[3],
1683 inet_rsk(req)->ir_num,
1684 dest->s6_addr32[0], dest->s6_addr32[1],
1685 dest->s6_addr32[2], dest->s6_addr32[3],
1686 ntohs(inet_rsk(req)->ir_rmt_port),
1687 TCP_SYN_RECV,
1688 0, 0, /* could print option size, but that is af dependent. */
1689 1, /* timers active (only the expire timer) */
1690 jiffies_to_clock_t(ttd),
1691 req->num_timeout,
1692 from_kuid_munged(seq_user_ns(seq), uid),
1693 0, /* non standard timer */
1694 0, /* open_requests have no inode */
1695 0, req);
1696 }
1697
1698 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1699 {
1700 const struct in6_addr *dest, *src;
1701 __u16 destp, srcp;
1702 int timer_active;
1703 unsigned long timer_expires;
1704 const struct inet_sock *inet = inet_sk(sp);
1705 const struct tcp_sock *tp = tcp_sk(sp);
1706 const struct inet_connection_sock *icsk = inet_csk(sp);
1707 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1708
1709 dest = &sp->sk_v6_daddr;
1710 src = &sp->sk_v6_rcv_saddr;
1711 destp = ntohs(inet->inet_dport);
1712 srcp = ntohs(inet->inet_sport);
1713
1714 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1715 timer_active = 1;
1716 timer_expires = icsk->icsk_timeout;
1717 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1718 timer_active = 4;
1719 timer_expires = icsk->icsk_timeout;
1720 } else if (timer_pending(&sp->sk_timer)) {
1721 timer_active = 2;
1722 timer_expires = sp->sk_timer.expires;
1723 } else {
1724 timer_active = 0;
1725 timer_expires = jiffies;
1726 }
1727
1728 seq_printf(seq,
1729 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1730 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1731 i,
1732 src->s6_addr32[0], src->s6_addr32[1],
1733 src->s6_addr32[2], src->s6_addr32[3], srcp,
1734 dest->s6_addr32[0], dest->s6_addr32[1],
1735 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1736 sp->sk_state,
1737 tp->write_seq-tp->snd_una,
1738 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1739 timer_active,
1740 jiffies_delta_to_clock_t(timer_expires - jiffies),
1741 icsk->icsk_retransmits,
1742 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1743 icsk->icsk_probes_out,
1744 sock_i_ino(sp),
1745 atomic_read(&sp->sk_refcnt), sp,
1746 jiffies_to_clock_t(icsk->icsk_rto),
1747 jiffies_to_clock_t(icsk->icsk_ack.ato),
1748 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1749 tp->snd_cwnd,
1750 sp->sk_state == TCP_LISTEN ?
1751 (fastopenq ? fastopenq->max_qlen : 0) :
1752 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1753 );
1754 }
1755
1756 static void get_timewait6_sock(struct seq_file *seq,
1757 struct inet_timewait_sock *tw, int i)
1758 {
1759 const struct in6_addr *dest, *src;
1760 __u16 destp, srcp;
1761 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
1762
1763 dest = &tw->tw_v6_daddr;
1764 src = &tw->tw_v6_rcv_saddr;
1765 destp = ntohs(tw->tw_dport);
1766 srcp = ntohs(tw->tw_sport);
1767
1768 seq_printf(seq,
1769 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1770 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1771 i,
1772 src->s6_addr32[0], src->s6_addr32[1],
1773 src->s6_addr32[2], src->s6_addr32[3], srcp,
1774 dest->s6_addr32[0], dest->s6_addr32[1],
1775 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1776 tw->tw_substate, 0, 0,
1777 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1778 atomic_read(&tw->tw_refcnt), tw);
1779 }
1780
1781 static int tcp6_seq_show(struct seq_file *seq, void *v)
1782 {
1783 struct tcp_iter_state *st;
1784 struct sock *sk = v;
1785
1786 if (v == SEQ_START_TOKEN) {
1787 seq_puts(seq,
1788 " sl "
1789 "local_address "
1790 "remote_address "
1791 "st tx_queue rx_queue tr tm->when retrnsmt"
1792 " uid timeout inode\n");
1793 goto out;
1794 }
1795 st = seq->private;
1796
1797 switch (st->state) {
1798 case TCP_SEQ_STATE_LISTENING:
1799 case TCP_SEQ_STATE_ESTABLISHED:
1800 if (sk->sk_state == TCP_TIME_WAIT)
1801 get_timewait6_sock(seq, v, st->num);
1802 else
1803 get_tcp6_sock(seq, v, st->num);
1804 break;
1805 case TCP_SEQ_STATE_OPENREQ:
1806 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1807 break;
1808 }
1809 out:
1810 return 0;
1811 }
1812
1813 static const struct file_operations tcp6_afinfo_seq_fops = {
1814 .owner = THIS_MODULE,
1815 .open = tcp_seq_open,
1816 .read = seq_read,
1817 .llseek = seq_lseek,
1818 .release = seq_release_net
1819 };
1820
1821 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1822 .name = "tcp6",
1823 .family = AF_INET6,
1824 .seq_fops = &tcp6_afinfo_seq_fops,
1825 .seq_ops = {
1826 .show = tcp6_seq_show,
1827 },
1828 };
1829
1830 int __net_init tcp6_proc_init(struct net *net)
1831 {
1832 return tcp_proc_register(net, &tcp6_seq_afinfo);
1833 }
1834
1835 void tcp6_proc_exit(struct net *net)
1836 {
1837 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1838 }
1839 #endif
1840
1841 static void tcp_v6_clear_sk(struct sock *sk, int size)
1842 {
1843 struct inet_sock *inet = inet_sk(sk);
1844
1845 /* we do not want to clear pinet6 field, because of RCU lookups */
1846 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1847
1848 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1849 memset(&inet->pinet6 + 1, 0, size);
1850 }
1851
1852 struct proto tcpv6_prot = {
1853 .name = "TCPv6",
1854 .owner = THIS_MODULE,
1855 .close = tcp_close,
1856 .connect = tcp_v6_connect,
1857 .disconnect = tcp_disconnect,
1858 .accept = inet_csk_accept,
1859 .ioctl = tcp_ioctl,
1860 .init = tcp_v6_init_sock,
1861 .destroy = tcp_v6_destroy_sock,
1862 .shutdown = tcp_shutdown,
1863 .setsockopt = tcp_setsockopt,
1864 .getsockopt = tcp_getsockopt,
1865 .recvmsg = tcp_recvmsg,
1866 .sendmsg = tcp_sendmsg,
1867 .sendpage = tcp_sendpage,
1868 .backlog_rcv = tcp_v6_do_rcv,
1869 .release_cb = tcp_release_cb,
1870 .hash = tcp_v6_hash,
1871 .unhash = inet_unhash,
1872 .get_port = inet_csk_get_port,
1873 .enter_memory_pressure = tcp_enter_memory_pressure,
1874 .stream_memory_free = tcp_stream_memory_free,
1875 .sockets_allocated = &tcp_sockets_allocated,
1876 .memory_allocated = &tcp_memory_allocated,
1877 .memory_pressure = &tcp_memory_pressure,
1878 .orphan_count = &tcp_orphan_count,
1879 .sysctl_mem = sysctl_tcp_mem,
1880 .sysctl_wmem = sysctl_tcp_wmem,
1881 .sysctl_rmem = sysctl_tcp_rmem,
1882 .max_header = MAX_TCP_HEADER,
1883 .obj_size = sizeof(struct tcp6_sock),
1884 .slab_flags = SLAB_DESTROY_BY_RCU,
1885 .twsk_prot = &tcp6_timewait_sock_ops,
1886 .rsk_prot = &tcp6_request_sock_ops,
1887 .h.hashinfo = &tcp_hashinfo,
1888 .no_autobind = true,
1889 #ifdef CONFIG_COMPAT
1890 .compat_setsockopt = compat_tcp_setsockopt,
1891 .compat_getsockopt = compat_tcp_getsockopt,
1892 #endif
1893 #ifdef CONFIG_MEMCG_KMEM
1894 .proto_cgroup = tcp_proto_cgroup,
1895 #endif
1896 .clear_sk = tcp_v6_clear_sk,
1897 };
1898
1899 static const struct inet6_protocol tcpv6_protocol = {
1900 .early_demux = tcp_v6_early_demux,
1901 .handler = tcp_v6_rcv,
1902 .err_handler = tcp_v6_err,
1903 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1904 };
1905
1906 static struct inet_protosw tcpv6_protosw = {
1907 .type = SOCK_STREAM,
1908 .protocol = IPPROTO_TCP,
1909 .prot = &tcpv6_prot,
1910 .ops = &inet6_stream_ops,
1911 .flags = INET_PROTOSW_PERMANENT |
1912 INET_PROTOSW_ICSK,
1913 };
1914
1915 static int __net_init tcpv6_net_init(struct net *net)
1916 {
1917 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1918 SOCK_RAW, IPPROTO_TCP, net);
1919 }
1920
1921 static void __net_exit tcpv6_net_exit(struct net *net)
1922 {
1923 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1924 }
1925
1926 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1927 {
1928 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1929 }
1930
1931 static struct pernet_operations tcpv6_net_ops = {
1932 .init = tcpv6_net_init,
1933 .exit = tcpv6_net_exit,
1934 .exit_batch = tcpv6_net_exit_batch,
1935 };
1936
1937 int __init tcpv6_init(void)
1938 {
1939 int ret;
1940
1941 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1942 if (ret)
1943 goto out;
1944
1945 /* register inet6 protocol */
1946 ret = inet6_register_protosw(&tcpv6_protosw);
1947 if (ret)
1948 goto out_tcpv6_protocol;
1949
1950 ret = register_pernet_subsys(&tcpv6_net_ops);
1951 if (ret)
1952 goto out_tcpv6_protosw;
1953 out:
1954 return ret;
1955
1956 out_tcpv6_protosw:
1957 inet6_unregister_protosw(&tcpv6_protosw);
1958 out_tcpv6_protocol:
1959 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1960 goto out;
1961 }
1962
1963 void tcpv6_exit(void)
1964 {
1965 unregister_pernet_subsys(&tcpv6_net_ops);
1966 inet6_unregister_protosw(&tcpv6_protosw);
1967 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1968 }
This page took 0.084552 seconds and 6 git commands to generate.