Merge branch 'x86-efi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
66
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
69
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
72
73 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
76
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 #else
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86 const struct in6_addr *addr)
87 {
88 return NULL;
89 }
90 #endif
91
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93 {
94 struct dst_entry *dst = skb_dst(skb);
95
96 if (dst) {
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
98
99 dst_hold(dst);
100 sk->sk_rx_dst = dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 if (rt->rt6i_node)
103 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
104 }
105 }
106
107 static void tcp_v6_hash(struct sock *sk)
108 {
109 if (sk->sk_state != TCP_CLOSE) {
110 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
111 tcp_prot.hash(sk);
112 return;
113 }
114 local_bh_disable();
115 __inet6_hash(sk, NULL);
116 local_bh_enable();
117 }
118 }
119
120 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
121 {
122 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
123 ipv6_hdr(skb)->saddr.s6_addr32,
124 tcp_hdr(skb)->dest,
125 tcp_hdr(skb)->source);
126 }
127
128 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
129 int addr_len)
130 {
131 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
132 struct inet_sock *inet = inet_sk(sk);
133 struct inet_connection_sock *icsk = inet_csk(sk);
134 struct ipv6_pinfo *np = inet6_sk(sk);
135 struct tcp_sock *tp = tcp_sk(sk);
136 struct in6_addr *saddr = NULL, *final_p, final;
137 struct rt6_info *rt;
138 struct flowi6 fl6;
139 struct dst_entry *dst;
140 int addr_type;
141 int err;
142
143 if (addr_len < SIN6_LEN_RFC2133)
144 return -EINVAL;
145
146 if (usin->sin6_family != AF_INET6)
147 return -EAFNOSUPPORT;
148
149 memset(&fl6, 0, sizeof(fl6));
150
151 if (np->sndflow) {
152 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
153 IP6_ECN_flow_init(fl6.flowlabel);
154 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
155 struct ip6_flowlabel *flowlabel;
156 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
157 if (flowlabel == NULL)
158 return -EINVAL;
159 fl6_sock_release(flowlabel);
160 }
161 }
162
163 /*
164 * connect() to INADDR_ANY means loopback (BSD'ism).
165 */
166
167 if (ipv6_addr_any(&usin->sin6_addr))
168 usin->sin6_addr.s6_addr[15] = 0x1;
169
170 addr_type = ipv6_addr_type(&usin->sin6_addr);
171
172 if (addr_type & IPV6_ADDR_MULTICAST)
173 return -ENETUNREACH;
174
175 if (addr_type&IPV6_ADDR_LINKLOCAL) {
176 if (addr_len >= sizeof(struct sockaddr_in6) &&
177 usin->sin6_scope_id) {
178 /* If interface is set while binding, indices
179 * must coincide.
180 */
181 if (sk->sk_bound_dev_if &&
182 sk->sk_bound_dev_if != usin->sin6_scope_id)
183 return -EINVAL;
184
185 sk->sk_bound_dev_if = usin->sin6_scope_id;
186 }
187
188 /* Connect to link-local address requires an interface */
189 if (!sk->sk_bound_dev_if)
190 return -EINVAL;
191 }
192
193 if (tp->rx_opt.ts_recent_stamp &&
194 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
195 tp->rx_opt.ts_recent = 0;
196 tp->rx_opt.ts_recent_stamp = 0;
197 tp->write_seq = 0;
198 }
199
200 sk->sk_v6_daddr = usin->sin6_addr;
201 np->flow_label = fl6.flowlabel;
202
203 ip6_set_txhash(sk);
204
205 /*
206 * TCP over IPv4
207 */
208
209 if (addr_type == IPV6_ADDR_MAPPED) {
210 u32 exthdrlen = icsk->icsk_ext_hdr_len;
211 struct sockaddr_in sin;
212
213 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
214
215 if (__ipv6_only_sock(sk))
216 return -ENETUNREACH;
217
218 sin.sin_family = AF_INET;
219 sin.sin_port = usin->sin6_port;
220 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
221
222 icsk->icsk_af_ops = &ipv6_mapped;
223 sk->sk_backlog_rcv = tcp_v4_do_rcv;
224 #ifdef CONFIG_TCP_MD5SIG
225 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
226 #endif
227
228 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
229
230 if (err) {
231 icsk->icsk_ext_hdr_len = exthdrlen;
232 icsk->icsk_af_ops = &ipv6_specific;
233 sk->sk_backlog_rcv = tcp_v6_do_rcv;
234 #ifdef CONFIG_TCP_MD5SIG
235 tp->af_specific = &tcp_sock_ipv6_specific;
236 #endif
237 goto failure;
238 } else {
239 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
240 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
241 &sk->sk_v6_rcv_saddr);
242 }
243
244 return err;
245 }
246
247 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
248 saddr = &sk->sk_v6_rcv_saddr;
249
250 fl6.flowi6_proto = IPPROTO_TCP;
251 fl6.daddr = sk->sk_v6_daddr;
252 fl6.saddr = saddr ? *saddr : np->saddr;
253 fl6.flowi6_oif = sk->sk_bound_dev_if;
254 fl6.flowi6_mark = sk->sk_mark;
255 fl6.fl6_dport = usin->sin6_port;
256 fl6.fl6_sport = inet->inet_sport;
257
258 final_p = fl6_update_dst(&fl6, np->opt, &final);
259
260 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
261
262 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
263 if (IS_ERR(dst)) {
264 err = PTR_ERR(dst);
265 goto failure;
266 }
267
268 if (saddr == NULL) {
269 saddr = &fl6.saddr;
270 sk->sk_v6_rcv_saddr = *saddr;
271 }
272
273 /* set the source address */
274 np->saddr = *saddr;
275 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
276
277 sk->sk_gso_type = SKB_GSO_TCPV6;
278 __ip6_dst_store(sk, dst, NULL, NULL);
279
280 rt = (struct rt6_info *) dst;
281 if (tcp_death_row.sysctl_tw_recycle &&
282 !tp->rx_opt.ts_recent_stamp &&
283 ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
284 tcp_fetch_timewait_stamp(sk, dst);
285
286 icsk->icsk_ext_hdr_len = 0;
287 if (np->opt)
288 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
289 np->opt->opt_nflen);
290
291 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
292
293 inet->inet_dport = usin->sin6_port;
294
295 tcp_set_state(sk, TCP_SYN_SENT);
296 err = inet6_hash_connect(&tcp_death_row, sk);
297 if (err)
298 goto late_failure;
299
300 if (!tp->write_seq && likely(!tp->repair))
301 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
302 sk->sk_v6_daddr.s6_addr32,
303 inet->inet_sport,
304 inet->inet_dport);
305
306 err = tcp_connect(sk);
307 if (err)
308 goto late_failure;
309
310 return 0;
311
312 late_failure:
313 tcp_set_state(sk, TCP_CLOSE);
314 __sk_dst_reset(sk);
315 failure:
316 inet->inet_dport = 0;
317 sk->sk_route_caps = 0;
318 return err;
319 }
320
321 static void tcp_v6_mtu_reduced(struct sock *sk)
322 {
323 struct dst_entry *dst;
324
325 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
326 return;
327
328 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
329 if (!dst)
330 return;
331
332 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
333 tcp_sync_mss(sk, dst_mtu(dst));
334 tcp_simple_retransmit(sk);
335 }
336 }
337
338 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
339 u8 type, u8 code, int offset, __be32 info)
340 {
341 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
342 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
343 struct ipv6_pinfo *np;
344 struct sock *sk;
345 int err;
346 struct tcp_sock *tp;
347 struct request_sock *fastopen;
348 __u32 seq, snd_una;
349 struct net *net = dev_net(skb->dev);
350
351 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
352 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
353
354 if (sk == NULL) {
355 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
356 ICMP6_MIB_INERRORS);
357 return;
358 }
359
360 if (sk->sk_state == TCP_TIME_WAIT) {
361 inet_twsk_put(inet_twsk(sk));
362 return;
363 }
364
365 bh_lock_sock(sk);
366 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
367 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
368
369 if (sk->sk_state == TCP_CLOSE)
370 goto out;
371
372 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
373 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
374 goto out;
375 }
376
377 tp = tcp_sk(sk);
378 seq = ntohl(th->seq);
379 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
380 fastopen = tp->fastopen_rsk;
381 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
382 if (sk->sk_state != TCP_LISTEN &&
383 !between(seq, snd_una, tp->snd_nxt)) {
384 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
385 goto out;
386 }
387
388 np = inet6_sk(sk);
389
390 if (type == NDISC_REDIRECT) {
391 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
392
393 if (dst)
394 dst->ops->redirect(dst, sk, skb);
395 goto out;
396 }
397
398 if (type == ICMPV6_PKT_TOOBIG) {
399 /* We are not interested in TCP_LISTEN and open_requests
400 * (SYN-ACKs send out by Linux are always <576bytes so
401 * they should go through unfragmented).
402 */
403 if (sk->sk_state == TCP_LISTEN)
404 goto out;
405
406 if (!ip6_sk_accept_pmtu(sk))
407 goto out;
408
409 tp->mtu_info = ntohl(info);
410 if (!sock_owned_by_user(sk))
411 tcp_v6_mtu_reduced(sk);
412 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
413 &tp->tsq_flags))
414 sock_hold(sk);
415 goto out;
416 }
417
418 icmpv6_err_convert(type, code, &err);
419
420 /* Might be for an request_sock */
421 switch (sk->sk_state) {
422 struct request_sock *req, **prev;
423 case TCP_LISTEN:
424 if (sock_owned_by_user(sk))
425 goto out;
426
427 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
428 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
429 &hdr->saddr, inet6_iif(skb));
430 if (!req)
431 goto out;
432
433 /* ICMPs are not backlogged, hence we cannot get
434 * an established socket here.
435 */
436 WARN_ON(req->sk != NULL);
437
438 if (seq != tcp_rsk(req)->snt_isn) {
439 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
440 goto out;
441 }
442
443 inet_csk_reqsk_queue_drop(sk, req, prev);
444 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
445 goto out;
446
447 case TCP_SYN_SENT:
448 case TCP_SYN_RECV:
449 /* Only in fast or simultaneous open. If a fast open socket is
450 * is already accepted it is treated as a connected one below.
451 */
452 if (fastopen && fastopen->sk == NULL)
453 break;
454
455 if (!sock_owned_by_user(sk)) {
456 sk->sk_err = err;
457 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
458
459 tcp_done(sk);
460 } else
461 sk->sk_err_soft = err;
462 goto out;
463 }
464
465 if (!sock_owned_by_user(sk) && np->recverr) {
466 sk->sk_err = err;
467 sk->sk_error_report(sk);
468 } else
469 sk->sk_err_soft = err;
470
471 out:
472 bh_unlock_sock(sk);
473 sock_put(sk);
474 }
475
476
477 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
478 struct flowi *fl,
479 struct request_sock *req,
480 u16 queue_mapping,
481 struct tcp_fastopen_cookie *foc)
482 {
483 struct inet_request_sock *ireq = inet_rsk(req);
484 struct ipv6_pinfo *np = inet6_sk(sk);
485 struct flowi6 *fl6 = &fl->u.ip6;
486 struct sk_buff *skb;
487 int err = -ENOMEM;
488
489 /* First, grab a route. */
490 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
491 goto done;
492
493 skb = tcp_make_synack(sk, dst, req, foc);
494
495 if (skb) {
496 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
497 &ireq->ir_v6_rmt_addr);
498
499 fl6->daddr = ireq->ir_v6_rmt_addr;
500 if (np->repflow && (ireq->pktopts != NULL))
501 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
502
503 skb_set_queue_mapping(skb, queue_mapping);
504 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
505 err = net_xmit_eval(err);
506 }
507
508 done:
509 return err;
510 }
511
512
513 static void tcp_v6_reqsk_destructor(struct request_sock *req)
514 {
515 kfree_skb(inet_rsk(req)->pktopts);
516 }
517
518 #ifdef CONFIG_TCP_MD5SIG
519 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
520 const struct in6_addr *addr)
521 {
522 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
523 }
524
525 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
526 struct sock *addr_sk)
527 {
528 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
529 }
530
531 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
532 struct request_sock *req)
533 {
534 return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
535 }
536
537 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
538 int optlen)
539 {
540 struct tcp_md5sig cmd;
541 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
542
543 if (optlen < sizeof(cmd))
544 return -EINVAL;
545
546 if (copy_from_user(&cmd, optval, sizeof(cmd)))
547 return -EFAULT;
548
549 if (sin6->sin6_family != AF_INET6)
550 return -EINVAL;
551
552 if (!cmd.tcpm_keylen) {
553 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
554 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
555 AF_INET);
556 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
557 AF_INET6);
558 }
559
560 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
561 return -EINVAL;
562
563 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
564 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
565 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
566
567 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
568 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
569 }
570
571 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
572 const struct in6_addr *daddr,
573 const struct in6_addr *saddr, int nbytes)
574 {
575 struct tcp6_pseudohdr *bp;
576 struct scatterlist sg;
577
578 bp = &hp->md5_blk.ip6;
579 /* 1. TCP pseudo-header (RFC2460) */
580 bp->saddr = *saddr;
581 bp->daddr = *daddr;
582 bp->protocol = cpu_to_be32(IPPROTO_TCP);
583 bp->len = cpu_to_be32(nbytes);
584
585 sg_init_one(&sg, bp, sizeof(*bp));
586 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
587 }
588
589 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
590 const struct in6_addr *daddr, struct in6_addr *saddr,
591 const struct tcphdr *th)
592 {
593 struct tcp_md5sig_pool *hp;
594 struct hash_desc *desc;
595
596 hp = tcp_get_md5sig_pool();
597 if (!hp)
598 goto clear_hash_noput;
599 desc = &hp->md5_desc;
600
601 if (crypto_hash_init(desc))
602 goto clear_hash;
603 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
604 goto clear_hash;
605 if (tcp_md5_hash_header(hp, th))
606 goto clear_hash;
607 if (tcp_md5_hash_key(hp, key))
608 goto clear_hash;
609 if (crypto_hash_final(desc, md5_hash))
610 goto clear_hash;
611
612 tcp_put_md5sig_pool();
613 return 0;
614
615 clear_hash:
616 tcp_put_md5sig_pool();
617 clear_hash_noput:
618 memset(md5_hash, 0, 16);
619 return 1;
620 }
621
622 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
623 const struct sock *sk,
624 const struct request_sock *req,
625 const struct sk_buff *skb)
626 {
627 const struct in6_addr *saddr, *daddr;
628 struct tcp_md5sig_pool *hp;
629 struct hash_desc *desc;
630 const struct tcphdr *th = tcp_hdr(skb);
631
632 if (sk) {
633 saddr = &inet6_sk(sk)->saddr;
634 daddr = &sk->sk_v6_daddr;
635 } else if (req) {
636 saddr = &inet_rsk(req)->ir_v6_loc_addr;
637 daddr = &inet_rsk(req)->ir_v6_rmt_addr;
638 } else {
639 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
640 saddr = &ip6h->saddr;
641 daddr = &ip6h->daddr;
642 }
643
644 hp = tcp_get_md5sig_pool();
645 if (!hp)
646 goto clear_hash_noput;
647 desc = &hp->md5_desc;
648
649 if (crypto_hash_init(desc))
650 goto clear_hash;
651
652 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
653 goto clear_hash;
654 if (tcp_md5_hash_header(hp, th))
655 goto clear_hash;
656 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
657 goto clear_hash;
658 if (tcp_md5_hash_key(hp, key))
659 goto clear_hash;
660 if (crypto_hash_final(desc, md5_hash))
661 goto clear_hash;
662
663 tcp_put_md5sig_pool();
664 return 0;
665
666 clear_hash:
667 tcp_put_md5sig_pool();
668 clear_hash_noput:
669 memset(md5_hash, 0, 16);
670 return 1;
671 }
672
673 static int __tcp_v6_inbound_md5_hash(struct sock *sk,
674 const struct sk_buff *skb)
675 {
676 const __u8 *hash_location = NULL;
677 struct tcp_md5sig_key *hash_expected;
678 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
679 const struct tcphdr *th = tcp_hdr(skb);
680 int genhash;
681 u8 newhash[16];
682
683 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
684 hash_location = tcp_parse_md5sig_option(th);
685
686 /* We've parsed the options - do we have a hash? */
687 if (!hash_expected && !hash_location)
688 return 0;
689
690 if (hash_expected && !hash_location) {
691 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
692 return 1;
693 }
694
695 if (!hash_expected && hash_location) {
696 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
697 return 1;
698 }
699
700 /* check the signature */
701 genhash = tcp_v6_md5_hash_skb(newhash,
702 hash_expected,
703 NULL, NULL, skb);
704
705 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
706 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
707 genhash ? "failed" : "mismatch",
708 &ip6h->saddr, ntohs(th->source),
709 &ip6h->daddr, ntohs(th->dest));
710 return 1;
711 }
712 return 0;
713 }
714
715 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
716 {
717 int ret;
718
719 rcu_read_lock();
720 ret = __tcp_v6_inbound_md5_hash(sk, skb);
721 rcu_read_unlock();
722
723 return ret;
724 }
725
726 #endif
727
728 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
729 struct sk_buff *skb)
730 {
731 struct inet_request_sock *ireq = inet_rsk(req);
732 struct ipv6_pinfo *np = inet6_sk(sk);
733
734 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
735 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
736
737 ireq->ir_iif = sk->sk_bound_dev_if;
738
739 /* So that link locals have meaning */
740 if (!sk->sk_bound_dev_if &&
741 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
742 ireq->ir_iif = tcp_v6_iif(skb);
743
744 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
745 (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
746 np->rxopt.bits.rxinfo ||
747 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
748 np->rxopt.bits.rxohlim || np->repflow)) {
749 atomic_inc(&skb->users);
750 ireq->pktopts = skb;
751 }
752 }
753
754 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
755 const struct request_sock *req,
756 bool *strict)
757 {
758 if (strict)
759 *strict = true;
760 return inet6_csk_route_req(sk, &fl->u.ip6, req);
761 }
762
763 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
764 .family = AF_INET6,
765 .obj_size = sizeof(struct tcp6_request_sock),
766 .rtx_syn_ack = tcp_rtx_synack,
767 .send_ack = tcp_v6_reqsk_send_ack,
768 .destructor = tcp_v6_reqsk_destructor,
769 .send_reset = tcp_v6_send_reset,
770 .syn_ack_timeout = tcp_syn_ack_timeout,
771 };
772
773 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
774 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
775 sizeof(struct ipv6hdr),
776 #ifdef CONFIG_TCP_MD5SIG
777 .md5_lookup = tcp_v6_reqsk_md5_lookup,
778 .calc_md5_hash = tcp_v6_md5_hash_skb,
779 #endif
780 .init_req = tcp_v6_init_req,
781 #ifdef CONFIG_SYN_COOKIES
782 .cookie_init_seq = cookie_v6_init_sequence,
783 #endif
784 .route_req = tcp_v6_route_req,
785 .init_seq = tcp_v6_init_sequence,
786 .send_synack = tcp_v6_send_synack,
787 .queue_hash_add = inet6_csk_reqsk_queue_hash_add,
788 };
789
790 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
791 u32 tsval, u32 tsecr, int oif,
792 struct tcp_md5sig_key *key, int rst, u8 tclass,
793 u32 label)
794 {
795 const struct tcphdr *th = tcp_hdr(skb);
796 struct tcphdr *t1;
797 struct sk_buff *buff;
798 struct flowi6 fl6;
799 struct net *net = dev_net(skb_dst(skb)->dev);
800 struct sock *ctl_sk = net->ipv6.tcp_sk;
801 unsigned int tot_len = sizeof(struct tcphdr);
802 struct dst_entry *dst;
803 __be32 *topt;
804
805 if (tsecr)
806 tot_len += TCPOLEN_TSTAMP_ALIGNED;
807 #ifdef CONFIG_TCP_MD5SIG
808 if (key)
809 tot_len += TCPOLEN_MD5SIG_ALIGNED;
810 #endif
811
812 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
813 GFP_ATOMIC);
814 if (buff == NULL)
815 return;
816
817 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
818
819 t1 = (struct tcphdr *) skb_push(buff, tot_len);
820 skb_reset_transport_header(buff);
821
822 /* Swap the send and the receive. */
823 memset(t1, 0, sizeof(*t1));
824 t1->dest = th->source;
825 t1->source = th->dest;
826 t1->doff = tot_len / 4;
827 t1->seq = htonl(seq);
828 t1->ack_seq = htonl(ack);
829 t1->ack = !rst || !th->ack;
830 t1->rst = rst;
831 t1->window = htons(win);
832
833 topt = (__be32 *)(t1 + 1);
834
835 if (tsecr) {
836 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
837 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
838 *topt++ = htonl(tsval);
839 *topt++ = htonl(tsecr);
840 }
841
842 #ifdef CONFIG_TCP_MD5SIG
843 if (key) {
844 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
845 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
846 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
847 &ipv6_hdr(skb)->saddr,
848 &ipv6_hdr(skb)->daddr, t1);
849 }
850 #endif
851
852 memset(&fl6, 0, sizeof(fl6));
853 fl6.daddr = ipv6_hdr(skb)->saddr;
854 fl6.saddr = ipv6_hdr(skb)->daddr;
855 fl6.flowlabel = label;
856
857 buff->ip_summed = CHECKSUM_PARTIAL;
858 buff->csum = 0;
859
860 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
861
862 fl6.flowi6_proto = IPPROTO_TCP;
863 if (rt6_need_strict(&fl6.daddr) && !oif)
864 fl6.flowi6_oif = tcp_v6_iif(skb);
865 else
866 fl6.flowi6_oif = oif;
867 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
868 fl6.fl6_dport = t1->dest;
869 fl6.fl6_sport = t1->source;
870 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
871
872 /* Pass a socket to ip6_dst_lookup either it is for RST
873 * Underlying function will use this to retrieve the network
874 * namespace
875 */
876 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
877 if (!IS_ERR(dst)) {
878 skb_dst_set(buff, dst);
879 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
880 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
881 if (rst)
882 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
883 return;
884 }
885
886 kfree_skb(buff);
887 }
888
889 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
890 {
891 const struct tcphdr *th = tcp_hdr(skb);
892 u32 seq = 0, ack_seq = 0;
893 struct tcp_md5sig_key *key = NULL;
894 #ifdef CONFIG_TCP_MD5SIG
895 const __u8 *hash_location = NULL;
896 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
897 unsigned char newhash[16];
898 int genhash;
899 struct sock *sk1 = NULL;
900 #endif
901 int oif;
902
903 if (th->rst)
904 return;
905
906 if (!ipv6_unicast_destination(skb))
907 return;
908
909 #ifdef CONFIG_TCP_MD5SIG
910 hash_location = tcp_parse_md5sig_option(th);
911 if (!sk && hash_location) {
912 /*
913 * active side is lost. Try to find listening socket through
914 * source port, and then find md5 key through listening socket.
915 * we are not loose security here:
916 * Incoming packet is checked with md5 hash with finding key,
917 * no RST generated if md5 hash doesn't match.
918 */
919 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
920 &tcp_hashinfo, &ipv6h->saddr,
921 th->source, &ipv6h->daddr,
922 ntohs(th->source), tcp_v6_iif(skb));
923 if (!sk1)
924 return;
925
926 rcu_read_lock();
927 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
928 if (!key)
929 goto release_sk1;
930
931 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
932 if (genhash || memcmp(hash_location, newhash, 16) != 0)
933 goto release_sk1;
934 } else {
935 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
936 }
937 #endif
938
939 if (th->ack)
940 seq = ntohl(th->ack_seq);
941 else
942 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
943 (th->doff << 2);
944
945 oif = sk ? sk->sk_bound_dev_if : 0;
946 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
947
948 #ifdef CONFIG_TCP_MD5SIG
949 release_sk1:
950 if (sk1) {
951 rcu_read_unlock();
952 sock_put(sk1);
953 }
954 #endif
955 }
956
957 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
958 u32 win, u32 tsval, u32 tsecr, int oif,
959 struct tcp_md5sig_key *key, u8 tclass,
960 u32 label)
961 {
962 tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, oif, key, 0, tclass,
963 label);
964 }
965
966 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
967 {
968 struct inet_timewait_sock *tw = inet_twsk(sk);
969 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
970
971 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
972 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
973 tcp_time_stamp + tcptw->tw_ts_offset,
974 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
975 tw->tw_tclass, (tw->tw_flowlabel << 12));
976
977 inet_twsk_put(tw);
978 }
979
980 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
981 struct request_sock *req)
982 {
983 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
984 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
985 */
986 tcp_v6_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
987 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
988 tcp_rsk(req)->rcv_nxt,
989 req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
990 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
991 0, 0);
992 }
993
994
995 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
996 {
997 struct request_sock *req, **prev;
998 const struct tcphdr *th = tcp_hdr(skb);
999 struct sock *nsk;
1000
1001 /* Find possible connection requests. */
1002 req = inet6_csk_search_req(sk, &prev, th->source,
1003 &ipv6_hdr(skb)->saddr,
1004 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
1005 if (req)
1006 return tcp_check_req(sk, skb, req, prev, false);
1007
1008 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1009 &ipv6_hdr(skb)->saddr, th->source,
1010 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1011 tcp_v6_iif(skb));
1012
1013 if (nsk) {
1014 if (nsk->sk_state != TCP_TIME_WAIT) {
1015 bh_lock_sock(nsk);
1016 return nsk;
1017 }
1018 inet_twsk_put(inet_twsk(nsk));
1019 return NULL;
1020 }
1021
1022 #ifdef CONFIG_SYN_COOKIES
1023 if (!th->syn)
1024 sk = cookie_v6_check(sk, skb);
1025 #endif
1026 return sk;
1027 }
1028
1029 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1030 {
1031 if (skb->protocol == htons(ETH_P_IP))
1032 return tcp_v4_conn_request(sk, skb);
1033
1034 if (!ipv6_unicast_destination(skb))
1035 goto drop;
1036
1037 return tcp_conn_request(&tcp6_request_sock_ops,
1038 &tcp_request_sock_ipv6_ops, sk, skb);
1039
1040 drop:
1041 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1042 return 0; /* don't send reset */
1043 }
1044
1045 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1046 struct request_sock *req,
1047 struct dst_entry *dst)
1048 {
1049 struct inet_request_sock *ireq;
1050 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1051 struct tcp6_sock *newtcp6sk;
1052 struct inet_sock *newinet;
1053 struct tcp_sock *newtp;
1054 struct sock *newsk;
1055 #ifdef CONFIG_TCP_MD5SIG
1056 struct tcp_md5sig_key *key;
1057 #endif
1058 struct flowi6 fl6;
1059
1060 if (skb->protocol == htons(ETH_P_IP)) {
1061 /*
1062 * v6 mapped
1063 */
1064
1065 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1066
1067 if (newsk == NULL)
1068 return NULL;
1069
1070 newtcp6sk = (struct tcp6_sock *)newsk;
1071 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1072
1073 newinet = inet_sk(newsk);
1074 newnp = inet6_sk(newsk);
1075 newtp = tcp_sk(newsk);
1076
1077 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1078
1079 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
1080
1081 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1082
1083 newsk->sk_v6_rcv_saddr = newnp->saddr;
1084
1085 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1086 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1087 #ifdef CONFIG_TCP_MD5SIG
1088 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1089 #endif
1090
1091 newnp->ipv6_ac_list = NULL;
1092 newnp->ipv6_fl_list = NULL;
1093 newnp->pktoptions = NULL;
1094 newnp->opt = NULL;
1095 newnp->mcast_oif = tcp_v6_iif(skb);
1096 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1097 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1098 if (np->repflow)
1099 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1100
1101 /*
1102 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1103 * here, tcp_create_openreq_child now does this for us, see the comment in
1104 * that function for the gory details. -acme
1105 */
1106
1107 /* It is tricky place. Until this moment IPv4 tcp
1108 worked with IPv6 icsk.icsk_af_ops.
1109 Sync it now.
1110 */
1111 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1112
1113 return newsk;
1114 }
1115
1116 ireq = inet_rsk(req);
1117
1118 if (sk_acceptq_is_full(sk))
1119 goto out_overflow;
1120
1121 if (!dst) {
1122 dst = inet6_csk_route_req(sk, &fl6, req);
1123 if (!dst)
1124 goto out;
1125 }
1126
1127 newsk = tcp_create_openreq_child(sk, req, skb);
1128 if (newsk == NULL)
1129 goto out_nonewsk;
1130
1131 /*
1132 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1133 * count here, tcp_create_openreq_child now does this for us, see the
1134 * comment in that function for the gory details. -acme
1135 */
1136
1137 newsk->sk_gso_type = SKB_GSO_TCPV6;
1138 __ip6_dst_store(newsk, dst, NULL, NULL);
1139 inet6_sk_rx_dst_set(newsk, skb);
1140
1141 newtcp6sk = (struct tcp6_sock *)newsk;
1142 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1143
1144 newtp = tcp_sk(newsk);
1145 newinet = inet_sk(newsk);
1146 newnp = inet6_sk(newsk);
1147
1148 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1149
1150 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1151 newnp->saddr = ireq->ir_v6_loc_addr;
1152 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1153 newsk->sk_bound_dev_if = ireq->ir_iif;
1154
1155 ip6_set_txhash(newsk);
1156
1157 /* Now IPv6 options...
1158
1159 First: no IPv4 options.
1160 */
1161 newinet->inet_opt = NULL;
1162 newnp->ipv6_ac_list = NULL;
1163 newnp->ipv6_fl_list = NULL;
1164
1165 /* Clone RX bits */
1166 newnp->rxopt.all = np->rxopt.all;
1167
1168 /* Clone pktoptions received with SYN */
1169 newnp->pktoptions = NULL;
1170 if (ireq->pktopts != NULL) {
1171 newnp->pktoptions = skb_clone(ireq->pktopts,
1172 sk_gfp_atomic(sk, GFP_ATOMIC));
1173 consume_skb(ireq->pktopts);
1174 ireq->pktopts = NULL;
1175 if (newnp->pktoptions)
1176 skb_set_owner_r(newnp->pktoptions, newsk);
1177 }
1178 newnp->opt = NULL;
1179 newnp->mcast_oif = tcp_v6_iif(skb);
1180 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1181 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1182 if (np->repflow)
1183 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1184
1185 /* Clone native IPv6 options from listening socket (if any)
1186
1187 Yes, keeping reference count would be much more clever,
1188 but we make one more one thing there: reattach optmem
1189 to newsk.
1190 */
1191 if (np->opt)
1192 newnp->opt = ipv6_dup_options(newsk, np->opt);
1193
1194 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1195 if (newnp->opt)
1196 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1197 newnp->opt->opt_flen);
1198
1199 tcp_sync_mss(newsk, dst_mtu(dst));
1200 newtp->advmss = dst_metric_advmss(dst);
1201 if (tcp_sk(sk)->rx_opt.user_mss &&
1202 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1203 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1204
1205 tcp_initialize_rcv_mss(newsk);
1206
1207 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1208 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1209
1210 #ifdef CONFIG_TCP_MD5SIG
1211 /* Copy over the MD5 key from the original socket */
1212 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1213 if (key != NULL) {
1214 /* We're using one, so create a matching key
1215 * on the newsk structure. If we fail to get
1216 * memory, then we end up not copying the key
1217 * across. Shucks.
1218 */
1219 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1220 AF_INET6, key->key, key->keylen,
1221 sk_gfp_atomic(sk, GFP_ATOMIC));
1222 }
1223 #endif
1224
1225 if (__inet_inherit_port(sk, newsk) < 0) {
1226 inet_csk_prepare_forced_close(newsk);
1227 tcp_done(newsk);
1228 goto out;
1229 }
1230 __inet6_hash(newsk, NULL);
1231
1232 return newsk;
1233
1234 out_overflow:
1235 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1236 out_nonewsk:
1237 dst_release(dst);
1238 out:
1239 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1240 return NULL;
1241 }
1242
1243 /* The socket must have it's spinlock held when we get
1244 * here.
1245 *
1246 * We have a potential double-lock case here, so even when
1247 * doing backlog processing we use the BH locking scheme.
1248 * This is because we cannot sleep with the original spinlock
1249 * held.
1250 */
1251 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1252 {
1253 struct ipv6_pinfo *np = inet6_sk(sk);
1254 struct tcp_sock *tp;
1255 struct sk_buff *opt_skb = NULL;
1256
1257 /* Imagine: socket is IPv6. IPv4 packet arrives,
1258 goes to IPv4 receive handler and backlogged.
1259 From backlog it always goes here. Kerboom...
1260 Fortunately, tcp_rcv_established and rcv_established
1261 handle them correctly, but it is not case with
1262 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1263 */
1264
1265 if (skb->protocol == htons(ETH_P_IP))
1266 return tcp_v4_do_rcv(sk, skb);
1267
1268 if (sk_filter(sk, skb))
1269 goto discard;
1270
1271 /*
1272 * socket locking is here for SMP purposes as backlog rcv
1273 * is currently called with bh processing disabled.
1274 */
1275
1276 /* Do Stevens' IPV6_PKTOPTIONS.
1277
1278 Yes, guys, it is the only place in our code, where we
1279 may make it not affecting IPv4.
1280 The rest of code is protocol independent,
1281 and I do not like idea to uglify IPv4.
1282
1283 Actually, all the idea behind IPV6_PKTOPTIONS
1284 looks not very well thought. For now we latch
1285 options, received in the last packet, enqueued
1286 by tcp. Feel free to propose better solution.
1287 --ANK (980728)
1288 */
1289 if (np->rxopt.all)
1290 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1291
1292 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1293 struct dst_entry *dst = sk->sk_rx_dst;
1294
1295 sock_rps_save_rxhash(sk, skb);
1296 if (dst) {
1297 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1298 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1299 dst_release(dst);
1300 sk->sk_rx_dst = NULL;
1301 }
1302 }
1303
1304 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1305 if (opt_skb)
1306 goto ipv6_pktoptions;
1307 return 0;
1308 }
1309
1310 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1311 goto csum_err;
1312
1313 if (sk->sk_state == TCP_LISTEN) {
1314 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1315 if (!nsk)
1316 goto discard;
1317
1318 /*
1319 * Queue it on the new socket if the new socket is active,
1320 * otherwise we just shortcircuit this and continue with
1321 * the new socket..
1322 */
1323 if (nsk != sk) {
1324 sock_rps_save_rxhash(nsk, skb);
1325 if (tcp_child_process(sk, nsk, skb))
1326 goto reset;
1327 if (opt_skb)
1328 __kfree_skb(opt_skb);
1329 return 0;
1330 }
1331 } else
1332 sock_rps_save_rxhash(sk, skb);
1333
1334 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1335 goto reset;
1336 if (opt_skb)
1337 goto ipv6_pktoptions;
1338 return 0;
1339
1340 reset:
1341 tcp_v6_send_reset(sk, skb);
1342 discard:
1343 if (opt_skb)
1344 __kfree_skb(opt_skb);
1345 kfree_skb(skb);
1346 return 0;
1347 csum_err:
1348 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1349 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1350 goto discard;
1351
1352
1353 ipv6_pktoptions:
1354 /* Do you ask, what is it?
1355
1356 1. skb was enqueued by tcp.
1357 2. skb is added to tail of read queue, rather than out of order.
1358 3. socket is not in passive state.
1359 4. Finally, it really contains options, which user wants to receive.
1360 */
1361 tp = tcp_sk(sk);
1362 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1363 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1364 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1365 np->mcast_oif = tcp_v6_iif(opt_skb);
1366 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1367 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1368 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1369 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1370 if (np->repflow)
1371 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1372 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1373 skb_set_owner_r(opt_skb, sk);
1374 opt_skb = xchg(&np->pktoptions, opt_skb);
1375 } else {
1376 __kfree_skb(opt_skb);
1377 opt_skb = xchg(&np->pktoptions, NULL);
1378 }
1379 }
1380
1381 kfree_skb(opt_skb);
1382 return 0;
1383 }
1384
1385 static int tcp_v6_rcv(struct sk_buff *skb)
1386 {
1387 const struct tcphdr *th;
1388 const struct ipv6hdr *hdr;
1389 struct sock *sk;
1390 int ret;
1391 struct net *net = dev_net(skb->dev);
1392
1393 if (skb->pkt_type != PACKET_HOST)
1394 goto discard_it;
1395
1396 /*
1397 * Count it even if it's bad.
1398 */
1399 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1400
1401 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1402 goto discard_it;
1403
1404 th = tcp_hdr(skb);
1405
1406 if (th->doff < sizeof(struct tcphdr)/4)
1407 goto bad_packet;
1408 if (!pskb_may_pull(skb, th->doff*4))
1409 goto discard_it;
1410
1411 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1412 goto csum_error;
1413
1414 th = tcp_hdr(skb);
1415 hdr = ipv6_hdr(skb);
1416 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1417 * barrier() makes sure compiler wont play fool^Waliasing games.
1418 */
1419 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1420 sizeof(struct inet6_skb_parm));
1421 barrier();
1422
1423 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1424 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1425 skb->len - th->doff*4);
1426 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1427 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1428 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1429 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1430 TCP_SKB_CB(skb)->sacked = 0;
1431
1432 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1433 tcp_v6_iif(skb));
1434 if (!sk)
1435 goto no_tcp_socket;
1436
1437 process:
1438 if (sk->sk_state == TCP_TIME_WAIT)
1439 goto do_time_wait;
1440
1441 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1442 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1443 goto discard_and_relse;
1444 }
1445
1446 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1447 goto discard_and_relse;
1448
1449 #ifdef CONFIG_TCP_MD5SIG
1450 if (tcp_v6_inbound_md5_hash(sk, skb))
1451 goto discard_and_relse;
1452 #endif
1453
1454 if (sk_filter(sk, skb))
1455 goto discard_and_relse;
1456
1457 sk_mark_napi_id(sk, skb);
1458 skb->dev = NULL;
1459
1460 bh_lock_sock_nested(sk);
1461 ret = 0;
1462 if (!sock_owned_by_user(sk)) {
1463 if (!tcp_prequeue(sk, skb))
1464 ret = tcp_v6_do_rcv(sk, skb);
1465 } else if (unlikely(sk_add_backlog(sk, skb,
1466 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1467 bh_unlock_sock(sk);
1468 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1469 goto discard_and_relse;
1470 }
1471 bh_unlock_sock(sk);
1472
1473 sock_put(sk);
1474 return ret ? -1 : 0;
1475
1476 no_tcp_socket:
1477 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1478 goto discard_it;
1479
1480 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1481 csum_error:
1482 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1483 bad_packet:
1484 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1485 } else {
1486 tcp_v6_send_reset(NULL, skb);
1487 }
1488
1489 discard_it:
1490 kfree_skb(skb);
1491 return 0;
1492
1493 discard_and_relse:
1494 sock_put(sk);
1495 goto discard_it;
1496
1497 do_time_wait:
1498 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1499 inet_twsk_put(inet_twsk(sk));
1500 goto discard_it;
1501 }
1502
1503 if (skb->len < (th->doff<<2)) {
1504 inet_twsk_put(inet_twsk(sk));
1505 goto bad_packet;
1506 }
1507 if (tcp_checksum_complete(skb)) {
1508 inet_twsk_put(inet_twsk(sk));
1509 goto csum_error;
1510 }
1511
1512 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1513 case TCP_TW_SYN:
1514 {
1515 struct sock *sk2;
1516
1517 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1518 &ipv6_hdr(skb)->saddr, th->source,
1519 &ipv6_hdr(skb)->daddr,
1520 ntohs(th->dest), tcp_v6_iif(skb));
1521 if (sk2 != NULL) {
1522 struct inet_timewait_sock *tw = inet_twsk(sk);
1523 inet_twsk_deschedule(tw, &tcp_death_row);
1524 inet_twsk_put(tw);
1525 sk = sk2;
1526 goto process;
1527 }
1528 /* Fall through to ACK */
1529 }
1530 case TCP_TW_ACK:
1531 tcp_v6_timewait_ack(sk, skb);
1532 break;
1533 case TCP_TW_RST:
1534 goto no_tcp_socket;
1535 case TCP_TW_SUCCESS:
1536 ;
1537 }
1538 goto discard_it;
1539 }
1540
1541 static void tcp_v6_early_demux(struct sk_buff *skb)
1542 {
1543 const struct ipv6hdr *hdr;
1544 const struct tcphdr *th;
1545 struct sock *sk;
1546
1547 if (skb->pkt_type != PACKET_HOST)
1548 return;
1549
1550 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1551 return;
1552
1553 hdr = ipv6_hdr(skb);
1554 th = tcp_hdr(skb);
1555
1556 if (th->doff < sizeof(struct tcphdr) / 4)
1557 return;
1558
1559 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1560 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1561 &hdr->saddr, th->source,
1562 &hdr->daddr, ntohs(th->dest),
1563 inet6_iif(skb));
1564 if (sk) {
1565 skb->sk = sk;
1566 skb->destructor = sock_edemux;
1567 if (sk->sk_state != TCP_TIME_WAIT) {
1568 struct dst_entry *dst = sk->sk_rx_dst;
1569
1570 if (dst)
1571 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1572 if (dst &&
1573 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1574 skb_dst_set_noref(skb, dst);
1575 }
1576 }
1577 }
1578
1579 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1580 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1581 .twsk_unique = tcp_twsk_unique,
1582 .twsk_destructor = tcp_twsk_destructor,
1583 };
1584
1585 static const struct inet_connection_sock_af_ops ipv6_specific = {
1586 .queue_xmit = inet6_csk_xmit,
1587 .send_check = tcp_v6_send_check,
1588 .rebuild_header = inet6_sk_rebuild_header,
1589 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1590 .conn_request = tcp_v6_conn_request,
1591 .syn_recv_sock = tcp_v6_syn_recv_sock,
1592 .net_header_len = sizeof(struct ipv6hdr),
1593 .net_frag_header_len = sizeof(struct frag_hdr),
1594 .setsockopt = ipv6_setsockopt,
1595 .getsockopt = ipv6_getsockopt,
1596 .addr2sockaddr = inet6_csk_addr2sockaddr,
1597 .sockaddr_len = sizeof(struct sockaddr_in6),
1598 .bind_conflict = inet6_csk_bind_conflict,
1599 #ifdef CONFIG_COMPAT
1600 .compat_setsockopt = compat_ipv6_setsockopt,
1601 .compat_getsockopt = compat_ipv6_getsockopt,
1602 #endif
1603 .mtu_reduced = tcp_v6_mtu_reduced,
1604 };
1605
1606 #ifdef CONFIG_TCP_MD5SIG
1607 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1608 .md5_lookup = tcp_v6_md5_lookup,
1609 .calc_md5_hash = tcp_v6_md5_hash_skb,
1610 .md5_parse = tcp_v6_parse_md5_keys,
1611 };
1612 #endif
1613
1614 /*
1615 * TCP over IPv4 via INET6 API
1616 */
1617 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1618 .queue_xmit = ip_queue_xmit,
1619 .send_check = tcp_v4_send_check,
1620 .rebuild_header = inet_sk_rebuild_header,
1621 .sk_rx_dst_set = inet_sk_rx_dst_set,
1622 .conn_request = tcp_v6_conn_request,
1623 .syn_recv_sock = tcp_v6_syn_recv_sock,
1624 .net_header_len = sizeof(struct iphdr),
1625 .setsockopt = ipv6_setsockopt,
1626 .getsockopt = ipv6_getsockopt,
1627 .addr2sockaddr = inet6_csk_addr2sockaddr,
1628 .sockaddr_len = sizeof(struct sockaddr_in6),
1629 .bind_conflict = inet6_csk_bind_conflict,
1630 #ifdef CONFIG_COMPAT
1631 .compat_setsockopt = compat_ipv6_setsockopt,
1632 .compat_getsockopt = compat_ipv6_getsockopt,
1633 #endif
1634 .mtu_reduced = tcp_v4_mtu_reduced,
1635 };
1636
1637 #ifdef CONFIG_TCP_MD5SIG
1638 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1639 .md5_lookup = tcp_v4_md5_lookup,
1640 .calc_md5_hash = tcp_v4_md5_hash_skb,
1641 .md5_parse = tcp_v6_parse_md5_keys,
1642 };
1643 #endif
1644
1645 /* NOTE: A lot of things set to zero explicitly by call to
1646 * sk_alloc() so need not be done here.
1647 */
1648 static int tcp_v6_init_sock(struct sock *sk)
1649 {
1650 struct inet_connection_sock *icsk = inet_csk(sk);
1651
1652 tcp_init_sock(sk);
1653
1654 icsk->icsk_af_ops = &ipv6_specific;
1655
1656 #ifdef CONFIG_TCP_MD5SIG
1657 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1658 #endif
1659
1660 return 0;
1661 }
1662
1663 static void tcp_v6_destroy_sock(struct sock *sk)
1664 {
1665 tcp_v4_destroy_sock(sk);
1666 inet6_destroy_sock(sk);
1667 }
1668
1669 #ifdef CONFIG_PROC_FS
1670 /* Proc filesystem TCPv6 sock list dumping. */
1671 static void get_openreq6(struct seq_file *seq,
1672 const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1673 {
1674 int ttd = req->expires - jiffies;
1675 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1676 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1677
1678 if (ttd < 0)
1679 ttd = 0;
1680
1681 seq_printf(seq,
1682 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1683 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1684 i,
1685 src->s6_addr32[0], src->s6_addr32[1],
1686 src->s6_addr32[2], src->s6_addr32[3],
1687 inet_rsk(req)->ir_num,
1688 dest->s6_addr32[0], dest->s6_addr32[1],
1689 dest->s6_addr32[2], dest->s6_addr32[3],
1690 ntohs(inet_rsk(req)->ir_rmt_port),
1691 TCP_SYN_RECV,
1692 0, 0, /* could print option size, but that is af dependent. */
1693 1, /* timers active (only the expire timer) */
1694 jiffies_to_clock_t(ttd),
1695 req->num_timeout,
1696 from_kuid_munged(seq_user_ns(seq), uid),
1697 0, /* non standard timer */
1698 0, /* open_requests have no inode */
1699 0, req);
1700 }
1701
1702 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1703 {
1704 const struct in6_addr *dest, *src;
1705 __u16 destp, srcp;
1706 int timer_active;
1707 unsigned long timer_expires;
1708 const struct inet_sock *inet = inet_sk(sp);
1709 const struct tcp_sock *tp = tcp_sk(sp);
1710 const struct inet_connection_sock *icsk = inet_csk(sp);
1711 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1712
1713 dest = &sp->sk_v6_daddr;
1714 src = &sp->sk_v6_rcv_saddr;
1715 destp = ntohs(inet->inet_dport);
1716 srcp = ntohs(inet->inet_sport);
1717
1718 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1719 timer_active = 1;
1720 timer_expires = icsk->icsk_timeout;
1721 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1722 timer_active = 4;
1723 timer_expires = icsk->icsk_timeout;
1724 } else if (timer_pending(&sp->sk_timer)) {
1725 timer_active = 2;
1726 timer_expires = sp->sk_timer.expires;
1727 } else {
1728 timer_active = 0;
1729 timer_expires = jiffies;
1730 }
1731
1732 seq_printf(seq,
1733 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1734 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1735 i,
1736 src->s6_addr32[0], src->s6_addr32[1],
1737 src->s6_addr32[2], src->s6_addr32[3], srcp,
1738 dest->s6_addr32[0], dest->s6_addr32[1],
1739 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1740 sp->sk_state,
1741 tp->write_seq-tp->snd_una,
1742 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1743 timer_active,
1744 jiffies_delta_to_clock_t(timer_expires - jiffies),
1745 icsk->icsk_retransmits,
1746 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1747 icsk->icsk_probes_out,
1748 sock_i_ino(sp),
1749 atomic_read(&sp->sk_refcnt), sp,
1750 jiffies_to_clock_t(icsk->icsk_rto),
1751 jiffies_to_clock_t(icsk->icsk_ack.ato),
1752 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1753 tp->snd_cwnd,
1754 sp->sk_state == TCP_LISTEN ?
1755 (fastopenq ? fastopenq->max_qlen : 0) :
1756 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1757 );
1758 }
1759
1760 static void get_timewait6_sock(struct seq_file *seq,
1761 struct inet_timewait_sock *tw, int i)
1762 {
1763 const struct in6_addr *dest, *src;
1764 __u16 destp, srcp;
1765 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
1766
1767 dest = &tw->tw_v6_daddr;
1768 src = &tw->tw_v6_rcv_saddr;
1769 destp = ntohs(tw->tw_dport);
1770 srcp = ntohs(tw->tw_sport);
1771
1772 seq_printf(seq,
1773 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1774 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1775 i,
1776 src->s6_addr32[0], src->s6_addr32[1],
1777 src->s6_addr32[2], src->s6_addr32[3], srcp,
1778 dest->s6_addr32[0], dest->s6_addr32[1],
1779 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1780 tw->tw_substate, 0, 0,
1781 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1782 atomic_read(&tw->tw_refcnt), tw);
1783 }
1784
1785 static int tcp6_seq_show(struct seq_file *seq, void *v)
1786 {
1787 struct tcp_iter_state *st;
1788 struct sock *sk = v;
1789
1790 if (v == SEQ_START_TOKEN) {
1791 seq_puts(seq,
1792 " sl "
1793 "local_address "
1794 "remote_address "
1795 "st tx_queue rx_queue tr tm->when retrnsmt"
1796 " uid timeout inode\n");
1797 goto out;
1798 }
1799 st = seq->private;
1800
1801 switch (st->state) {
1802 case TCP_SEQ_STATE_LISTENING:
1803 case TCP_SEQ_STATE_ESTABLISHED:
1804 if (sk->sk_state == TCP_TIME_WAIT)
1805 get_timewait6_sock(seq, v, st->num);
1806 else
1807 get_tcp6_sock(seq, v, st->num);
1808 break;
1809 case TCP_SEQ_STATE_OPENREQ:
1810 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1811 break;
1812 }
1813 out:
1814 return 0;
1815 }
1816
1817 static const struct file_operations tcp6_afinfo_seq_fops = {
1818 .owner = THIS_MODULE,
1819 .open = tcp_seq_open,
1820 .read = seq_read,
1821 .llseek = seq_lseek,
1822 .release = seq_release_net
1823 };
1824
1825 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1826 .name = "tcp6",
1827 .family = AF_INET6,
1828 .seq_fops = &tcp6_afinfo_seq_fops,
1829 .seq_ops = {
1830 .show = tcp6_seq_show,
1831 },
1832 };
1833
1834 int __net_init tcp6_proc_init(struct net *net)
1835 {
1836 return tcp_proc_register(net, &tcp6_seq_afinfo);
1837 }
1838
1839 void tcp6_proc_exit(struct net *net)
1840 {
1841 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1842 }
1843 #endif
1844
1845 static void tcp_v6_clear_sk(struct sock *sk, int size)
1846 {
1847 struct inet_sock *inet = inet_sk(sk);
1848
1849 /* we do not want to clear pinet6 field, because of RCU lookups */
1850 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1851
1852 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1853 memset(&inet->pinet6 + 1, 0, size);
1854 }
1855
1856 struct proto tcpv6_prot = {
1857 .name = "TCPv6",
1858 .owner = THIS_MODULE,
1859 .close = tcp_close,
1860 .connect = tcp_v6_connect,
1861 .disconnect = tcp_disconnect,
1862 .accept = inet_csk_accept,
1863 .ioctl = tcp_ioctl,
1864 .init = tcp_v6_init_sock,
1865 .destroy = tcp_v6_destroy_sock,
1866 .shutdown = tcp_shutdown,
1867 .setsockopt = tcp_setsockopt,
1868 .getsockopt = tcp_getsockopt,
1869 .recvmsg = tcp_recvmsg,
1870 .sendmsg = tcp_sendmsg,
1871 .sendpage = tcp_sendpage,
1872 .backlog_rcv = tcp_v6_do_rcv,
1873 .release_cb = tcp_release_cb,
1874 .hash = tcp_v6_hash,
1875 .unhash = inet_unhash,
1876 .get_port = inet_csk_get_port,
1877 .enter_memory_pressure = tcp_enter_memory_pressure,
1878 .stream_memory_free = tcp_stream_memory_free,
1879 .sockets_allocated = &tcp_sockets_allocated,
1880 .memory_allocated = &tcp_memory_allocated,
1881 .memory_pressure = &tcp_memory_pressure,
1882 .orphan_count = &tcp_orphan_count,
1883 .sysctl_mem = sysctl_tcp_mem,
1884 .sysctl_wmem = sysctl_tcp_wmem,
1885 .sysctl_rmem = sysctl_tcp_rmem,
1886 .max_header = MAX_TCP_HEADER,
1887 .obj_size = sizeof(struct tcp6_sock),
1888 .slab_flags = SLAB_DESTROY_BY_RCU,
1889 .twsk_prot = &tcp6_timewait_sock_ops,
1890 .rsk_prot = &tcp6_request_sock_ops,
1891 .h.hashinfo = &tcp_hashinfo,
1892 .no_autobind = true,
1893 #ifdef CONFIG_COMPAT
1894 .compat_setsockopt = compat_tcp_setsockopt,
1895 .compat_getsockopt = compat_tcp_getsockopt,
1896 #endif
1897 #ifdef CONFIG_MEMCG_KMEM
1898 .proto_cgroup = tcp_proto_cgroup,
1899 #endif
1900 .clear_sk = tcp_v6_clear_sk,
1901 };
1902
1903 static const struct inet6_protocol tcpv6_protocol = {
1904 .early_demux = tcp_v6_early_demux,
1905 .handler = tcp_v6_rcv,
1906 .err_handler = tcp_v6_err,
1907 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1908 };
1909
1910 static struct inet_protosw tcpv6_protosw = {
1911 .type = SOCK_STREAM,
1912 .protocol = IPPROTO_TCP,
1913 .prot = &tcpv6_prot,
1914 .ops = &inet6_stream_ops,
1915 .flags = INET_PROTOSW_PERMANENT |
1916 INET_PROTOSW_ICSK,
1917 };
1918
1919 static int __net_init tcpv6_net_init(struct net *net)
1920 {
1921 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1922 SOCK_RAW, IPPROTO_TCP, net);
1923 }
1924
1925 static void __net_exit tcpv6_net_exit(struct net *net)
1926 {
1927 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1928 }
1929
1930 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1931 {
1932 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1933 }
1934
1935 static struct pernet_operations tcpv6_net_ops = {
1936 .init = tcpv6_net_init,
1937 .exit = tcpv6_net_exit,
1938 .exit_batch = tcpv6_net_exit_batch,
1939 };
1940
1941 int __init tcpv6_init(void)
1942 {
1943 int ret;
1944
1945 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1946 if (ret)
1947 goto out;
1948
1949 /* register inet6 protocol */
1950 ret = inet6_register_protosw(&tcpv6_protosw);
1951 if (ret)
1952 goto out_tcpv6_protocol;
1953
1954 ret = register_pernet_subsys(&tcpv6_net_ops);
1955 if (ret)
1956 goto out_tcpv6_protosw;
1957 out:
1958 return ret;
1959
1960 out_tcpv6_protosw:
1961 inet6_unregister_protosw(&tcpv6_protosw);
1962 out_tcpv6_protocol:
1963 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1964 goto out;
1965 }
1966
1967 void tcpv6_exit(void)
1968 {
1969 unregister_pernet_subsys(&tcpv6_net_ops);
1970 inet6_unregister_protosw(&tcpv6_protosw);
1971 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1972 }
This page took 0.079025 seconds and 5 git commands to generate.