Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
[deliverable/linux.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
66 #include <net/busy_poll.h>
67
68 #include <linux/proc_fs.h>
69 #include <linux/seq_file.h>
70
71 #include <linux/crypto.h>
72 #include <linux/scatterlist.h>
73
74 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
75 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
76 struct request_sock *req);
77
78 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79
80 static const struct inet_connection_sock_af_ops ipv6_mapped;
81 static const struct inet_connection_sock_af_ops ipv6_specific;
82 #ifdef CONFIG_TCP_MD5SIG
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 #else
86 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
87 const struct in6_addr *addr)
88 {
89 return NULL;
90 }
91 #endif
92
93 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
94 {
95 struct dst_entry *dst = skb_dst(skb);
96 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
98 dst_hold(dst);
99 sk->sk_rx_dst = dst;
100 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
101 if (rt->rt6i_node)
102 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
103 }
104
105 static void tcp_v6_hash(struct sock *sk)
106 {
107 if (sk->sk_state != TCP_CLOSE) {
108 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
109 tcp_prot.hash(sk);
110 return;
111 }
112 local_bh_disable();
113 __inet6_hash(sk, NULL);
114 local_bh_enable();
115 }
116 }
117
118 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
119 {
120 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
121 ipv6_hdr(skb)->saddr.s6_addr32,
122 tcp_hdr(skb)->dest,
123 tcp_hdr(skb)->source);
124 }
125
126 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
127 int addr_len)
128 {
129 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
130 struct inet_sock *inet = inet_sk(sk);
131 struct inet_connection_sock *icsk = inet_csk(sk);
132 struct ipv6_pinfo *np = inet6_sk(sk);
133 struct tcp_sock *tp = tcp_sk(sk);
134 struct in6_addr *saddr = NULL, *final_p, final;
135 struct rt6_info *rt;
136 struct flowi6 fl6;
137 struct dst_entry *dst;
138 int addr_type;
139 int err;
140
141 if (addr_len < SIN6_LEN_RFC2133)
142 return -EINVAL;
143
144 if (usin->sin6_family != AF_INET6)
145 return -EAFNOSUPPORT;
146
147 memset(&fl6, 0, sizeof(fl6));
148
149 if (np->sndflow) {
150 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
151 IP6_ECN_flow_init(fl6.flowlabel);
152 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
153 struct ip6_flowlabel *flowlabel;
154 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
155 if (flowlabel == NULL)
156 return -EINVAL;
157 fl6_sock_release(flowlabel);
158 }
159 }
160
161 /*
162 * connect() to INADDR_ANY means loopback (BSD'ism).
163 */
164
165 if (ipv6_addr_any(&usin->sin6_addr))
166 usin->sin6_addr.s6_addr[15] = 0x1;
167
168 addr_type = ipv6_addr_type(&usin->sin6_addr);
169
170 if (addr_type & IPV6_ADDR_MULTICAST)
171 return -ENETUNREACH;
172
173 if (addr_type&IPV6_ADDR_LINKLOCAL) {
174 if (addr_len >= sizeof(struct sockaddr_in6) &&
175 usin->sin6_scope_id) {
176 /* If interface is set while binding, indices
177 * must coincide.
178 */
179 if (sk->sk_bound_dev_if &&
180 sk->sk_bound_dev_if != usin->sin6_scope_id)
181 return -EINVAL;
182
183 sk->sk_bound_dev_if = usin->sin6_scope_id;
184 }
185
186 /* Connect to link-local address requires an interface */
187 if (!sk->sk_bound_dev_if)
188 return -EINVAL;
189 }
190
191 if (tp->rx_opt.ts_recent_stamp &&
192 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
193 tp->rx_opt.ts_recent = 0;
194 tp->rx_opt.ts_recent_stamp = 0;
195 tp->write_seq = 0;
196 }
197
198 sk->sk_v6_daddr = usin->sin6_addr;
199 np->flow_label = fl6.flowlabel;
200
201 ip6_set_txhash(sk);
202
203 /*
204 * TCP over IPv4
205 */
206
207 if (addr_type == IPV6_ADDR_MAPPED) {
208 u32 exthdrlen = icsk->icsk_ext_hdr_len;
209 struct sockaddr_in sin;
210
211 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
212
213 if (__ipv6_only_sock(sk))
214 return -ENETUNREACH;
215
216 sin.sin_family = AF_INET;
217 sin.sin_port = usin->sin6_port;
218 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
219
220 icsk->icsk_af_ops = &ipv6_mapped;
221 sk->sk_backlog_rcv = tcp_v4_do_rcv;
222 #ifdef CONFIG_TCP_MD5SIG
223 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
224 #endif
225
226 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
227
228 if (err) {
229 icsk->icsk_ext_hdr_len = exthdrlen;
230 icsk->icsk_af_ops = &ipv6_specific;
231 sk->sk_backlog_rcv = tcp_v6_do_rcv;
232 #ifdef CONFIG_TCP_MD5SIG
233 tp->af_specific = &tcp_sock_ipv6_specific;
234 #endif
235 goto failure;
236 } else {
237 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
238 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
239 &sk->sk_v6_rcv_saddr);
240 }
241
242 return err;
243 }
244
245 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
246 saddr = &sk->sk_v6_rcv_saddr;
247
248 fl6.flowi6_proto = IPPROTO_TCP;
249 fl6.daddr = sk->sk_v6_daddr;
250 fl6.saddr = saddr ? *saddr : np->saddr;
251 fl6.flowi6_oif = sk->sk_bound_dev_if;
252 fl6.flowi6_mark = sk->sk_mark;
253 fl6.fl6_dport = usin->sin6_port;
254 fl6.fl6_sport = inet->inet_sport;
255
256 final_p = fl6_update_dst(&fl6, np->opt, &final);
257
258 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
259
260 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
261 if (IS_ERR(dst)) {
262 err = PTR_ERR(dst);
263 goto failure;
264 }
265
266 if (saddr == NULL) {
267 saddr = &fl6.saddr;
268 sk->sk_v6_rcv_saddr = *saddr;
269 }
270
271 /* set the source address */
272 np->saddr = *saddr;
273 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
274
275 sk->sk_gso_type = SKB_GSO_TCPV6;
276 __ip6_dst_store(sk, dst, NULL, NULL);
277
278 rt = (struct rt6_info *) dst;
279 if (tcp_death_row.sysctl_tw_recycle &&
280 !tp->rx_opt.ts_recent_stamp &&
281 ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
282 tcp_fetch_timewait_stamp(sk, dst);
283
284 icsk->icsk_ext_hdr_len = 0;
285 if (np->opt)
286 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
287 np->opt->opt_nflen);
288
289 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
290
291 inet->inet_dport = usin->sin6_port;
292
293 tcp_set_state(sk, TCP_SYN_SENT);
294 err = inet6_hash_connect(&tcp_death_row, sk);
295 if (err)
296 goto late_failure;
297
298 if (!tp->write_seq && likely(!tp->repair))
299 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
300 sk->sk_v6_daddr.s6_addr32,
301 inet->inet_sport,
302 inet->inet_dport);
303
304 err = tcp_connect(sk);
305 if (err)
306 goto late_failure;
307
308 return 0;
309
310 late_failure:
311 tcp_set_state(sk, TCP_CLOSE);
312 __sk_dst_reset(sk);
313 failure:
314 inet->inet_dport = 0;
315 sk->sk_route_caps = 0;
316 return err;
317 }
318
319 static void tcp_v6_mtu_reduced(struct sock *sk)
320 {
321 struct dst_entry *dst;
322
323 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
324 return;
325
326 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
327 if (!dst)
328 return;
329
330 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
331 tcp_sync_mss(sk, dst_mtu(dst));
332 tcp_simple_retransmit(sk);
333 }
334 }
335
336 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
337 u8 type, u8 code, int offset, __be32 info)
338 {
339 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
340 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
341 struct ipv6_pinfo *np;
342 struct sock *sk;
343 int err;
344 struct tcp_sock *tp;
345 struct request_sock *fastopen;
346 __u32 seq, snd_una;
347 struct net *net = dev_net(skb->dev);
348
349 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
350 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
351
352 if (sk == NULL) {
353 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
354 ICMP6_MIB_INERRORS);
355 return;
356 }
357
358 if (sk->sk_state == TCP_TIME_WAIT) {
359 inet_twsk_put(inet_twsk(sk));
360 return;
361 }
362
363 bh_lock_sock(sk);
364 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
365 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
366
367 if (sk->sk_state == TCP_CLOSE)
368 goto out;
369
370 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
371 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
372 goto out;
373 }
374
375 tp = tcp_sk(sk);
376 seq = ntohl(th->seq);
377 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
378 fastopen = tp->fastopen_rsk;
379 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
380 if (sk->sk_state != TCP_LISTEN &&
381 !between(seq, snd_una, tp->snd_nxt)) {
382 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
383 goto out;
384 }
385
386 np = inet6_sk(sk);
387
388 if (type == NDISC_REDIRECT) {
389 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
390
391 if (dst)
392 dst->ops->redirect(dst, sk, skb);
393 goto out;
394 }
395
396 if (type == ICMPV6_PKT_TOOBIG) {
397 /* We are not interested in TCP_LISTEN and open_requests
398 * (SYN-ACKs send out by Linux are always <576bytes so
399 * they should go through unfragmented).
400 */
401 if (sk->sk_state == TCP_LISTEN)
402 goto out;
403
404 if (!ip6_sk_accept_pmtu(sk))
405 goto out;
406
407 tp->mtu_info = ntohl(info);
408 if (!sock_owned_by_user(sk))
409 tcp_v6_mtu_reduced(sk);
410 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
411 &tp->tsq_flags))
412 sock_hold(sk);
413 goto out;
414 }
415
416 icmpv6_err_convert(type, code, &err);
417
418 /* Might be for an request_sock */
419 switch (sk->sk_state) {
420 struct request_sock *req, **prev;
421 case TCP_LISTEN:
422 if (sock_owned_by_user(sk))
423 goto out;
424
425 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
426 &hdr->saddr, inet6_iif(skb));
427 if (!req)
428 goto out;
429
430 /* ICMPs are not backlogged, hence we cannot get
431 * an established socket here.
432 */
433 WARN_ON(req->sk != NULL);
434
435 if (seq != tcp_rsk(req)->snt_isn) {
436 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
437 goto out;
438 }
439
440 inet_csk_reqsk_queue_drop(sk, req, prev);
441 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
442 goto out;
443
444 case TCP_SYN_SENT:
445 case TCP_SYN_RECV:
446 /* Only in fast or simultaneous open. If a fast open socket is
447 * is already accepted it is treated as a connected one below.
448 */
449 if (fastopen && fastopen->sk == NULL)
450 break;
451
452 if (!sock_owned_by_user(sk)) {
453 sk->sk_err = err;
454 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
455
456 tcp_done(sk);
457 } else
458 sk->sk_err_soft = err;
459 goto out;
460 }
461
462 if (!sock_owned_by_user(sk) && np->recverr) {
463 sk->sk_err = err;
464 sk->sk_error_report(sk);
465 } else
466 sk->sk_err_soft = err;
467
468 out:
469 bh_unlock_sock(sk);
470 sock_put(sk);
471 }
472
473
474 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
475 struct flowi *fl,
476 struct request_sock *req,
477 u16 queue_mapping,
478 struct tcp_fastopen_cookie *foc)
479 {
480 struct inet_request_sock *ireq = inet_rsk(req);
481 struct ipv6_pinfo *np = inet6_sk(sk);
482 struct flowi6 *fl6 = &fl->u.ip6;
483 struct sk_buff *skb;
484 int err = -ENOMEM;
485
486 /* First, grab a route. */
487 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
488 goto done;
489
490 skb = tcp_make_synack(sk, dst, req, foc);
491
492 if (skb) {
493 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
494 &ireq->ir_v6_rmt_addr);
495
496 fl6->daddr = ireq->ir_v6_rmt_addr;
497 if (np->repflow && (ireq->pktopts != NULL))
498 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
499
500 skb_set_queue_mapping(skb, queue_mapping);
501 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
502 err = net_xmit_eval(err);
503 }
504
505 done:
506 return err;
507 }
508
509
510 static void tcp_v6_reqsk_destructor(struct request_sock *req)
511 {
512 kfree_skb(inet_rsk(req)->pktopts);
513 }
514
515 #ifdef CONFIG_TCP_MD5SIG
516 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
517 const struct in6_addr *addr)
518 {
519 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
520 }
521
522 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
523 struct sock *addr_sk)
524 {
525 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
526 }
527
528 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
529 struct request_sock *req)
530 {
531 return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
532 }
533
534 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
535 int optlen)
536 {
537 struct tcp_md5sig cmd;
538 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
539
540 if (optlen < sizeof(cmd))
541 return -EINVAL;
542
543 if (copy_from_user(&cmd, optval, sizeof(cmd)))
544 return -EFAULT;
545
546 if (sin6->sin6_family != AF_INET6)
547 return -EINVAL;
548
549 if (!cmd.tcpm_keylen) {
550 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
551 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
552 AF_INET);
553 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
554 AF_INET6);
555 }
556
557 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
558 return -EINVAL;
559
560 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
561 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
562 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
563
564 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
565 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
566 }
567
568 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
569 const struct in6_addr *daddr,
570 const struct in6_addr *saddr, int nbytes)
571 {
572 struct tcp6_pseudohdr *bp;
573 struct scatterlist sg;
574
575 bp = &hp->md5_blk.ip6;
576 /* 1. TCP pseudo-header (RFC2460) */
577 bp->saddr = *saddr;
578 bp->daddr = *daddr;
579 bp->protocol = cpu_to_be32(IPPROTO_TCP);
580 bp->len = cpu_to_be32(nbytes);
581
582 sg_init_one(&sg, bp, sizeof(*bp));
583 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
584 }
585
586 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
587 const struct in6_addr *daddr, struct in6_addr *saddr,
588 const struct tcphdr *th)
589 {
590 struct tcp_md5sig_pool *hp;
591 struct hash_desc *desc;
592
593 hp = tcp_get_md5sig_pool();
594 if (!hp)
595 goto clear_hash_noput;
596 desc = &hp->md5_desc;
597
598 if (crypto_hash_init(desc))
599 goto clear_hash;
600 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
601 goto clear_hash;
602 if (tcp_md5_hash_header(hp, th))
603 goto clear_hash;
604 if (tcp_md5_hash_key(hp, key))
605 goto clear_hash;
606 if (crypto_hash_final(desc, md5_hash))
607 goto clear_hash;
608
609 tcp_put_md5sig_pool();
610 return 0;
611
612 clear_hash:
613 tcp_put_md5sig_pool();
614 clear_hash_noput:
615 memset(md5_hash, 0, 16);
616 return 1;
617 }
618
619 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
620 const struct sock *sk,
621 const struct request_sock *req,
622 const struct sk_buff *skb)
623 {
624 const struct in6_addr *saddr, *daddr;
625 struct tcp_md5sig_pool *hp;
626 struct hash_desc *desc;
627 const struct tcphdr *th = tcp_hdr(skb);
628
629 if (sk) {
630 saddr = &inet6_sk(sk)->saddr;
631 daddr = &sk->sk_v6_daddr;
632 } else if (req) {
633 saddr = &inet_rsk(req)->ir_v6_loc_addr;
634 daddr = &inet_rsk(req)->ir_v6_rmt_addr;
635 } else {
636 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
637 saddr = &ip6h->saddr;
638 daddr = &ip6h->daddr;
639 }
640
641 hp = tcp_get_md5sig_pool();
642 if (!hp)
643 goto clear_hash_noput;
644 desc = &hp->md5_desc;
645
646 if (crypto_hash_init(desc))
647 goto clear_hash;
648
649 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
650 goto clear_hash;
651 if (tcp_md5_hash_header(hp, th))
652 goto clear_hash;
653 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
654 goto clear_hash;
655 if (tcp_md5_hash_key(hp, key))
656 goto clear_hash;
657 if (crypto_hash_final(desc, md5_hash))
658 goto clear_hash;
659
660 tcp_put_md5sig_pool();
661 return 0;
662
663 clear_hash:
664 tcp_put_md5sig_pool();
665 clear_hash_noput:
666 memset(md5_hash, 0, 16);
667 return 1;
668 }
669
670 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
671 {
672 const __u8 *hash_location = NULL;
673 struct tcp_md5sig_key *hash_expected;
674 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
675 const struct tcphdr *th = tcp_hdr(skb);
676 int genhash;
677 u8 newhash[16];
678
679 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
680 hash_location = tcp_parse_md5sig_option(th);
681
682 /* We've parsed the options - do we have a hash? */
683 if (!hash_expected && !hash_location)
684 return 0;
685
686 if (hash_expected && !hash_location) {
687 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
688 return 1;
689 }
690
691 if (!hash_expected && hash_location) {
692 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
693 return 1;
694 }
695
696 /* check the signature */
697 genhash = tcp_v6_md5_hash_skb(newhash,
698 hash_expected,
699 NULL, NULL, skb);
700
701 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
702 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
703 genhash ? "failed" : "mismatch",
704 &ip6h->saddr, ntohs(th->source),
705 &ip6h->daddr, ntohs(th->dest));
706 return 1;
707 }
708 return 0;
709 }
710 #endif
711
712 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
713 struct sk_buff *skb)
714 {
715 struct inet_request_sock *ireq = inet_rsk(req);
716 struct ipv6_pinfo *np = inet6_sk(sk);
717
718 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
719 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
720
721 ireq->ir_iif = sk->sk_bound_dev_if;
722
723 /* So that link locals have meaning */
724 if (!sk->sk_bound_dev_if &&
725 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
726 ireq->ir_iif = inet6_iif(skb);
727
728 if (!TCP_SKB_CB(skb)->when &&
729 (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo ||
730 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
731 np->rxopt.bits.rxohlim || np->repflow)) {
732 atomic_inc(&skb->users);
733 ireq->pktopts = skb;
734 }
735 }
736
737 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
738 const struct request_sock *req,
739 bool *strict)
740 {
741 if (strict)
742 *strict = true;
743 return inet6_csk_route_req(sk, &fl->u.ip6, req);
744 }
745
746 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
747 .family = AF_INET6,
748 .obj_size = sizeof(struct tcp6_request_sock),
749 .rtx_syn_ack = tcp_rtx_synack,
750 .send_ack = tcp_v6_reqsk_send_ack,
751 .destructor = tcp_v6_reqsk_destructor,
752 .send_reset = tcp_v6_send_reset,
753 .syn_ack_timeout = tcp_syn_ack_timeout,
754 };
755
756 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
757 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
758 sizeof(struct ipv6hdr),
759 #ifdef CONFIG_TCP_MD5SIG
760 .md5_lookup = tcp_v6_reqsk_md5_lookup,
761 .calc_md5_hash = tcp_v6_md5_hash_skb,
762 #endif
763 .init_req = tcp_v6_init_req,
764 #ifdef CONFIG_SYN_COOKIES
765 .cookie_init_seq = cookie_v6_init_sequence,
766 #endif
767 .route_req = tcp_v6_route_req,
768 .init_seq = tcp_v6_init_sequence,
769 .send_synack = tcp_v6_send_synack,
770 .queue_hash_add = inet6_csk_reqsk_queue_hash_add,
771 };
772
773 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
774 u32 tsval, u32 tsecr, int oif,
775 struct tcp_md5sig_key *key, int rst, u8 tclass,
776 u32 label)
777 {
778 const struct tcphdr *th = tcp_hdr(skb);
779 struct tcphdr *t1;
780 struct sk_buff *buff;
781 struct flowi6 fl6;
782 struct net *net = dev_net(skb_dst(skb)->dev);
783 struct sock *ctl_sk = net->ipv6.tcp_sk;
784 unsigned int tot_len = sizeof(struct tcphdr);
785 struct dst_entry *dst;
786 __be32 *topt;
787
788 if (tsecr)
789 tot_len += TCPOLEN_TSTAMP_ALIGNED;
790 #ifdef CONFIG_TCP_MD5SIG
791 if (key)
792 tot_len += TCPOLEN_MD5SIG_ALIGNED;
793 #endif
794
795 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
796 GFP_ATOMIC);
797 if (buff == NULL)
798 return;
799
800 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
801
802 t1 = (struct tcphdr *) skb_push(buff, tot_len);
803 skb_reset_transport_header(buff);
804
805 /* Swap the send and the receive. */
806 memset(t1, 0, sizeof(*t1));
807 t1->dest = th->source;
808 t1->source = th->dest;
809 t1->doff = tot_len / 4;
810 t1->seq = htonl(seq);
811 t1->ack_seq = htonl(ack);
812 t1->ack = !rst || !th->ack;
813 t1->rst = rst;
814 t1->window = htons(win);
815
816 topt = (__be32 *)(t1 + 1);
817
818 if (tsecr) {
819 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
820 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
821 *topt++ = htonl(tsval);
822 *topt++ = htonl(tsecr);
823 }
824
825 #ifdef CONFIG_TCP_MD5SIG
826 if (key) {
827 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
828 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
829 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
830 &ipv6_hdr(skb)->saddr,
831 &ipv6_hdr(skb)->daddr, t1);
832 }
833 #endif
834
835 memset(&fl6, 0, sizeof(fl6));
836 fl6.daddr = ipv6_hdr(skb)->saddr;
837 fl6.saddr = ipv6_hdr(skb)->daddr;
838 fl6.flowlabel = label;
839
840 buff->ip_summed = CHECKSUM_PARTIAL;
841 buff->csum = 0;
842
843 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
844
845 fl6.flowi6_proto = IPPROTO_TCP;
846 if (rt6_need_strict(&fl6.daddr) && !oif)
847 fl6.flowi6_oif = inet6_iif(skb);
848 else
849 fl6.flowi6_oif = oif;
850 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
851 fl6.fl6_dport = t1->dest;
852 fl6.fl6_sport = t1->source;
853 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
854
855 /* Pass a socket to ip6_dst_lookup either it is for RST
856 * Underlying function will use this to retrieve the network
857 * namespace
858 */
859 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
860 if (!IS_ERR(dst)) {
861 skb_dst_set(buff, dst);
862 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
863 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
864 if (rst)
865 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
866 return;
867 }
868
869 kfree_skb(buff);
870 }
871
872 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
873 {
874 const struct tcphdr *th = tcp_hdr(skb);
875 u32 seq = 0, ack_seq = 0;
876 struct tcp_md5sig_key *key = NULL;
877 #ifdef CONFIG_TCP_MD5SIG
878 const __u8 *hash_location = NULL;
879 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
880 unsigned char newhash[16];
881 int genhash;
882 struct sock *sk1 = NULL;
883 #endif
884 int oif;
885
886 if (th->rst)
887 return;
888
889 if (!ipv6_unicast_destination(skb))
890 return;
891
892 #ifdef CONFIG_TCP_MD5SIG
893 hash_location = tcp_parse_md5sig_option(th);
894 if (!sk && hash_location) {
895 /*
896 * active side is lost. Try to find listening socket through
897 * source port, and then find md5 key through listening socket.
898 * we are not loose security here:
899 * Incoming packet is checked with md5 hash with finding key,
900 * no RST generated if md5 hash doesn't match.
901 */
902 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
903 &tcp_hashinfo, &ipv6h->saddr,
904 th->source, &ipv6h->daddr,
905 ntohs(th->source), inet6_iif(skb));
906 if (!sk1)
907 return;
908
909 rcu_read_lock();
910 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
911 if (!key)
912 goto release_sk1;
913
914 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
915 if (genhash || memcmp(hash_location, newhash, 16) != 0)
916 goto release_sk1;
917 } else {
918 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
919 }
920 #endif
921
922 if (th->ack)
923 seq = ntohl(th->ack_seq);
924 else
925 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
926 (th->doff << 2);
927
928 oif = sk ? sk->sk_bound_dev_if : 0;
929 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
930
931 #ifdef CONFIG_TCP_MD5SIG
932 release_sk1:
933 if (sk1) {
934 rcu_read_unlock();
935 sock_put(sk1);
936 }
937 #endif
938 }
939
940 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
941 u32 win, u32 tsval, u32 tsecr, int oif,
942 struct tcp_md5sig_key *key, u8 tclass,
943 u32 label)
944 {
945 tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, oif, key, 0, tclass,
946 label);
947 }
948
949 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
950 {
951 struct inet_timewait_sock *tw = inet_twsk(sk);
952 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
953
954 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
955 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
956 tcp_time_stamp + tcptw->tw_ts_offset,
957 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
958 tw->tw_tclass, (tw->tw_flowlabel << 12));
959
960 inet_twsk_put(tw);
961 }
962
963 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
964 struct request_sock *req)
965 {
966 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
967 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
968 */
969 tcp_v6_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
970 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
971 tcp_rsk(req)->rcv_nxt,
972 req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
973 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
974 0, 0);
975 }
976
977
978 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
979 {
980 struct request_sock *req, **prev;
981 const struct tcphdr *th = tcp_hdr(skb);
982 struct sock *nsk;
983
984 /* Find possible connection requests. */
985 req = inet6_csk_search_req(sk, &prev, th->source,
986 &ipv6_hdr(skb)->saddr,
987 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
988 if (req)
989 return tcp_check_req(sk, skb, req, prev, false);
990
991 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
992 &ipv6_hdr(skb)->saddr, th->source,
993 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
994
995 if (nsk) {
996 if (nsk->sk_state != TCP_TIME_WAIT) {
997 bh_lock_sock(nsk);
998 return nsk;
999 }
1000 inet_twsk_put(inet_twsk(nsk));
1001 return NULL;
1002 }
1003
1004 #ifdef CONFIG_SYN_COOKIES
1005 if (!th->syn)
1006 sk = cookie_v6_check(sk, skb);
1007 #endif
1008 return sk;
1009 }
1010
1011 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1012 {
1013 if (skb->protocol == htons(ETH_P_IP))
1014 return tcp_v4_conn_request(sk, skb);
1015
1016 if (!ipv6_unicast_destination(skb))
1017 goto drop;
1018
1019 return tcp_conn_request(&tcp6_request_sock_ops,
1020 &tcp_request_sock_ipv6_ops, sk, skb);
1021
1022 drop:
1023 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1024 return 0; /* don't send reset */
1025 }
1026
1027 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1028 struct request_sock *req,
1029 struct dst_entry *dst)
1030 {
1031 struct inet_request_sock *ireq;
1032 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1033 struct tcp6_sock *newtcp6sk;
1034 struct inet_sock *newinet;
1035 struct tcp_sock *newtp;
1036 struct sock *newsk;
1037 #ifdef CONFIG_TCP_MD5SIG
1038 struct tcp_md5sig_key *key;
1039 #endif
1040 struct flowi6 fl6;
1041
1042 if (skb->protocol == htons(ETH_P_IP)) {
1043 /*
1044 * v6 mapped
1045 */
1046
1047 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1048
1049 if (newsk == NULL)
1050 return NULL;
1051
1052 newtcp6sk = (struct tcp6_sock *)newsk;
1053 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1054
1055 newinet = inet_sk(newsk);
1056 newnp = inet6_sk(newsk);
1057 newtp = tcp_sk(newsk);
1058
1059 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1060
1061 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
1062
1063 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1064
1065 newsk->sk_v6_rcv_saddr = newnp->saddr;
1066
1067 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1068 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1069 #ifdef CONFIG_TCP_MD5SIG
1070 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1071 #endif
1072
1073 newnp->ipv6_ac_list = NULL;
1074 newnp->ipv6_fl_list = NULL;
1075 newnp->pktoptions = NULL;
1076 newnp->opt = NULL;
1077 newnp->mcast_oif = inet6_iif(skb);
1078 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1079 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1080 if (np->repflow)
1081 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1082
1083 /*
1084 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1085 * here, tcp_create_openreq_child now does this for us, see the comment in
1086 * that function for the gory details. -acme
1087 */
1088
1089 /* It is tricky place. Until this moment IPv4 tcp
1090 worked with IPv6 icsk.icsk_af_ops.
1091 Sync it now.
1092 */
1093 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1094
1095 return newsk;
1096 }
1097
1098 ireq = inet_rsk(req);
1099
1100 if (sk_acceptq_is_full(sk))
1101 goto out_overflow;
1102
1103 if (!dst) {
1104 dst = inet6_csk_route_req(sk, &fl6, req);
1105 if (!dst)
1106 goto out;
1107 }
1108
1109 newsk = tcp_create_openreq_child(sk, req, skb);
1110 if (newsk == NULL)
1111 goto out_nonewsk;
1112
1113 /*
1114 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1115 * count here, tcp_create_openreq_child now does this for us, see the
1116 * comment in that function for the gory details. -acme
1117 */
1118
1119 newsk->sk_gso_type = SKB_GSO_TCPV6;
1120 __ip6_dst_store(newsk, dst, NULL, NULL);
1121 inet6_sk_rx_dst_set(newsk, skb);
1122
1123 newtcp6sk = (struct tcp6_sock *)newsk;
1124 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1125
1126 newtp = tcp_sk(newsk);
1127 newinet = inet_sk(newsk);
1128 newnp = inet6_sk(newsk);
1129
1130 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1131
1132 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1133 newnp->saddr = ireq->ir_v6_loc_addr;
1134 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1135 newsk->sk_bound_dev_if = ireq->ir_iif;
1136
1137 ip6_set_txhash(newsk);
1138
1139 /* Now IPv6 options...
1140
1141 First: no IPv4 options.
1142 */
1143 newinet->inet_opt = NULL;
1144 newnp->ipv6_ac_list = NULL;
1145 newnp->ipv6_fl_list = NULL;
1146
1147 /* Clone RX bits */
1148 newnp->rxopt.all = np->rxopt.all;
1149
1150 /* Clone pktoptions received with SYN */
1151 newnp->pktoptions = NULL;
1152 if (ireq->pktopts != NULL) {
1153 newnp->pktoptions = skb_clone(ireq->pktopts,
1154 sk_gfp_atomic(sk, GFP_ATOMIC));
1155 consume_skb(ireq->pktopts);
1156 ireq->pktopts = NULL;
1157 if (newnp->pktoptions)
1158 skb_set_owner_r(newnp->pktoptions, newsk);
1159 }
1160 newnp->opt = NULL;
1161 newnp->mcast_oif = inet6_iif(skb);
1162 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1163 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1164 if (np->repflow)
1165 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1166
1167 /* Clone native IPv6 options from listening socket (if any)
1168
1169 Yes, keeping reference count would be much more clever,
1170 but we make one more one thing there: reattach optmem
1171 to newsk.
1172 */
1173 if (np->opt)
1174 newnp->opt = ipv6_dup_options(newsk, np->opt);
1175
1176 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1177 if (newnp->opt)
1178 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1179 newnp->opt->opt_flen);
1180
1181 tcp_sync_mss(newsk, dst_mtu(dst));
1182 newtp->advmss = dst_metric_advmss(dst);
1183 if (tcp_sk(sk)->rx_opt.user_mss &&
1184 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1185 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1186
1187 tcp_initialize_rcv_mss(newsk);
1188
1189 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1190 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1191
1192 #ifdef CONFIG_TCP_MD5SIG
1193 /* Copy over the MD5 key from the original socket */
1194 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1195 if (key != NULL) {
1196 /* We're using one, so create a matching key
1197 * on the newsk structure. If we fail to get
1198 * memory, then we end up not copying the key
1199 * across. Shucks.
1200 */
1201 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1202 AF_INET6, key->key, key->keylen,
1203 sk_gfp_atomic(sk, GFP_ATOMIC));
1204 }
1205 #endif
1206
1207 if (__inet_inherit_port(sk, newsk) < 0) {
1208 inet_csk_prepare_forced_close(newsk);
1209 tcp_done(newsk);
1210 goto out;
1211 }
1212 __inet6_hash(newsk, NULL);
1213
1214 return newsk;
1215
1216 out_overflow:
1217 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1218 out_nonewsk:
1219 dst_release(dst);
1220 out:
1221 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1222 return NULL;
1223 }
1224
1225 /* The socket must have it's spinlock held when we get
1226 * here.
1227 *
1228 * We have a potential double-lock case here, so even when
1229 * doing backlog processing we use the BH locking scheme.
1230 * This is because we cannot sleep with the original spinlock
1231 * held.
1232 */
1233 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1234 {
1235 struct ipv6_pinfo *np = inet6_sk(sk);
1236 struct tcp_sock *tp;
1237 struct sk_buff *opt_skb = NULL;
1238
1239 /* Imagine: socket is IPv6. IPv4 packet arrives,
1240 goes to IPv4 receive handler and backlogged.
1241 From backlog it always goes here. Kerboom...
1242 Fortunately, tcp_rcv_established and rcv_established
1243 handle them correctly, but it is not case with
1244 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1245 */
1246
1247 if (skb->protocol == htons(ETH_P_IP))
1248 return tcp_v4_do_rcv(sk, skb);
1249
1250 #ifdef CONFIG_TCP_MD5SIG
1251 if (tcp_v6_inbound_md5_hash(sk, skb))
1252 goto discard;
1253 #endif
1254
1255 if (sk_filter(sk, skb))
1256 goto discard;
1257
1258 /*
1259 * socket locking is here for SMP purposes as backlog rcv
1260 * is currently called with bh processing disabled.
1261 */
1262
1263 /* Do Stevens' IPV6_PKTOPTIONS.
1264
1265 Yes, guys, it is the only place in our code, where we
1266 may make it not affecting IPv4.
1267 The rest of code is protocol independent,
1268 and I do not like idea to uglify IPv4.
1269
1270 Actually, all the idea behind IPV6_PKTOPTIONS
1271 looks not very well thought. For now we latch
1272 options, received in the last packet, enqueued
1273 by tcp. Feel free to propose better solution.
1274 --ANK (980728)
1275 */
1276 if (np->rxopt.all)
1277 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1278
1279 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1280 struct dst_entry *dst = sk->sk_rx_dst;
1281
1282 sock_rps_save_rxhash(sk, skb);
1283 if (dst) {
1284 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1285 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1286 dst_release(dst);
1287 sk->sk_rx_dst = NULL;
1288 }
1289 }
1290
1291 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1292 if (opt_skb)
1293 goto ipv6_pktoptions;
1294 return 0;
1295 }
1296
1297 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1298 goto csum_err;
1299
1300 if (sk->sk_state == TCP_LISTEN) {
1301 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1302 if (!nsk)
1303 goto discard;
1304
1305 /*
1306 * Queue it on the new socket if the new socket is active,
1307 * otherwise we just shortcircuit this and continue with
1308 * the new socket..
1309 */
1310 if (nsk != sk) {
1311 sock_rps_save_rxhash(nsk, skb);
1312 if (tcp_child_process(sk, nsk, skb))
1313 goto reset;
1314 if (opt_skb)
1315 __kfree_skb(opt_skb);
1316 return 0;
1317 }
1318 } else
1319 sock_rps_save_rxhash(sk, skb);
1320
1321 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1322 goto reset;
1323 if (opt_skb)
1324 goto ipv6_pktoptions;
1325 return 0;
1326
1327 reset:
1328 tcp_v6_send_reset(sk, skb);
1329 discard:
1330 if (opt_skb)
1331 __kfree_skb(opt_skb);
1332 kfree_skb(skb);
1333 return 0;
1334 csum_err:
1335 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1336 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1337 goto discard;
1338
1339
1340 ipv6_pktoptions:
1341 /* Do you ask, what is it?
1342
1343 1. skb was enqueued by tcp.
1344 2. skb is added to tail of read queue, rather than out of order.
1345 3. socket is not in passive state.
1346 4. Finally, it really contains options, which user wants to receive.
1347 */
1348 tp = tcp_sk(sk);
1349 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1350 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1351 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1352 np->mcast_oif = inet6_iif(opt_skb);
1353 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1354 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1355 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1356 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1357 if (np->repflow)
1358 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1359 if (ipv6_opt_accepted(sk, opt_skb)) {
1360 skb_set_owner_r(opt_skb, sk);
1361 opt_skb = xchg(&np->pktoptions, opt_skb);
1362 } else {
1363 __kfree_skb(opt_skb);
1364 opt_skb = xchg(&np->pktoptions, NULL);
1365 }
1366 }
1367
1368 kfree_skb(opt_skb);
1369 return 0;
1370 }
1371
1372 static int tcp_v6_rcv(struct sk_buff *skb)
1373 {
1374 const struct tcphdr *th;
1375 const struct ipv6hdr *hdr;
1376 struct sock *sk;
1377 int ret;
1378 struct net *net = dev_net(skb->dev);
1379
1380 if (skb->pkt_type != PACKET_HOST)
1381 goto discard_it;
1382
1383 /*
1384 * Count it even if it's bad.
1385 */
1386 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1387
1388 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1389 goto discard_it;
1390
1391 th = tcp_hdr(skb);
1392
1393 if (th->doff < sizeof(struct tcphdr)/4)
1394 goto bad_packet;
1395 if (!pskb_may_pull(skb, th->doff*4))
1396 goto discard_it;
1397
1398 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1399 goto csum_error;
1400
1401 th = tcp_hdr(skb);
1402 hdr = ipv6_hdr(skb);
1403 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1404 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1405 skb->len - th->doff*4);
1406 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1407 TCP_SKB_CB(skb)->when = 0;
1408 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1409 TCP_SKB_CB(skb)->sacked = 0;
1410
1411 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1412 if (!sk)
1413 goto no_tcp_socket;
1414
1415 process:
1416 if (sk->sk_state == TCP_TIME_WAIT)
1417 goto do_time_wait;
1418
1419 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1420 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1421 goto discard_and_relse;
1422 }
1423
1424 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1425 goto discard_and_relse;
1426
1427 if (sk_filter(sk, skb))
1428 goto discard_and_relse;
1429
1430 sk_mark_napi_id(sk, skb);
1431 skb->dev = NULL;
1432
1433 bh_lock_sock_nested(sk);
1434 ret = 0;
1435 if (!sock_owned_by_user(sk)) {
1436 #ifdef CONFIG_NET_DMA
1437 struct tcp_sock *tp = tcp_sk(sk);
1438 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1439 tp->ucopy.dma_chan = net_dma_find_channel();
1440 if (tp->ucopy.dma_chan)
1441 ret = tcp_v6_do_rcv(sk, skb);
1442 else
1443 #endif
1444 {
1445 if (!tcp_prequeue(sk, skb))
1446 ret = tcp_v6_do_rcv(sk, skb);
1447 }
1448 } else if (unlikely(sk_add_backlog(sk, skb,
1449 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1450 bh_unlock_sock(sk);
1451 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1452 goto discard_and_relse;
1453 }
1454 bh_unlock_sock(sk);
1455
1456 sock_put(sk);
1457 return ret ? -1 : 0;
1458
1459 no_tcp_socket:
1460 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1461 goto discard_it;
1462
1463 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1464 csum_error:
1465 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1466 bad_packet:
1467 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1468 } else {
1469 tcp_v6_send_reset(NULL, skb);
1470 }
1471
1472 discard_it:
1473 kfree_skb(skb);
1474 return 0;
1475
1476 discard_and_relse:
1477 sock_put(sk);
1478 goto discard_it;
1479
1480 do_time_wait:
1481 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1482 inet_twsk_put(inet_twsk(sk));
1483 goto discard_it;
1484 }
1485
1486 if (skb->len < (th->doff<<2)) {
1487 inet_twsk_put(inet_twsk(sk));
1488 goto bad_packet;
1489 }
1490 if (tcp_checksum_complete(skb)) {
1491 inet_twsk_put(inet_twsk(sk));
1492 goto csum_error;
1493 }
1494
1495 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1496 case TCP_TW_SYN:
1497 {
1498 struct sock *sk2;
1499
1500 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1501 &ipv6_hdr(skb)->saddr, th->source,
1502 &ipv6_hdr(skb)->daddr,
1503 ntohs(th->dest), inet6_iif(skb));
1504 if (sk2 != NULL) {
1505 struct inet_timewait_sock *tw = inet_twsk(sk);
1506 inet_twsk_deschedule(tw, &tcp_death_row);
1507 inet_twsk_put(tw);
1508 sk = sk2;
1509 goto process;
1510 }
1511 /* Fall through to ACK */
1512 }
1513 case TCP_TW_ACK:
1514 tcp_v6_timewait_ack(sk, skb);
1515 break;
1516 case TCP_TW_RST:
1517 goto no_tcp_socket;
1518 case TCP_TW_SUCCESS:
1519 ;
1520 }
1521 goto discard_it;
1522 }
1523
1524 static void tcp_v6_early_demux(struct sk_buff *skb)
1525 {
1526 const struct ipv6hdr *hdr;
1527 const struct tcphdr *th;
1528 struct sock *sk;
1529
1530 if (skb->pkt_type != PACKET_HOST)
1531 return;
1532
1533 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1534 return;
1535
1536 hdr = ipv6_hdr(skb);
1537 th = tcp_hdr(skb);
1538
1539 if (th->doff < sizeof(struct tcphdr) / 4)
1540 return;
1541
1542 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1543 &hdr->saddr, th->source,
1544 &hdr->daddr, ntohs(th->dest),
1545 inet6_iif(skb));
1546 if (sk) {
1547 skb->sk = sk;
1548 skb->destructor = sock_edemux;
1549 if (sk->sk_state != TCP_TIME_WAIT) {
1550 struct dst_entry *dst = sk->sk_rx_dst;
1551
1552 if (dst)
1553 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1554 if (dst &&
1555 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1556 skb_dst_set_noref(skb, dst);
1557 }
1558 }
1559 }
1560
1561 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1562 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1563 .twsk_unique = tcp_twsk_unique,
1564 .twsk_destructor = tcp_twsk_destructor,
1565 };
1566
1567 static const struct inet_connection_sock_af_ops ipv6_specific = {
1568 .queue_xmit = inet6_csk_xmit,
1569 .send_check = tcp_v6_send_check,
1570 .rebuild_header = inet6_sk_rebuild_header,
1571 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1572 .conn_request = tcp_v6_conn_request,
1573 .syn_recv_sock = tcp_v6_syn_recv_sock,
1574 .net_header_len = sizeof(struct ipv6hdr),
1575 .net_frag_header_len = sizeof(struct frag_hdr),
1576 .setsockopt = ipv6_setsockopt,
1577 .getsockopt = ipv6_getsockopt,
1578 .addr2sockaddr = inet6_csk_addr2sockaddr,
1579 .sockaddr_len = sizeof(struct sockaddr_in6),
1580 .bind_conflict = inet6_csk_bind_conflict,
1581 #ifdef CONFIG_COMPAT
1582 .compat_setsockopt = compat_ipv6_setsockopt,
1583 .compat_getsockopt = compat_ipv6_getsockopt,
1584 #endif
1585 };
1586
1587 #ifdef CONFIG_TCP_MD5SIG
1588 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1589 .md5_lookup = tcp_v6_md5_lookup,
1590 .calc_md5_hash = tcp_v6_md5_hash_skb,
1591 .md5_parse = tcp_v6_parse_md5_keys,
1592 };
1593 #endif
1594
1595 /*
1596 * TCP over IPv4 via INET6 API
1597 */
1598 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1599 .queue_xmit = ip_queue_xmit,
1600 .send_check = tcp_v4_send_check,
1601 .rebuild_header = inet_sk_rebuild_header,
1602 .sk_rx_dst_set = inet_sk_rx_dst_set,
1603 .conn_request = tcp_v6_conn_request,
1604 .syn_recv_sock = tcp_v6_syn_recv_sock,
1605 .net_header_len = sizeof(struct iphdr),
1606 .setsockopt = ipv6_setsockopt,
1607 .getsockopt = ipv6_getsockopt,
1608 .addr2sockaddr = inet6_csk_addr2sockaddr,
1609 .sockaddr_len = sizeof(struct sockaddr_in6),
1610 .bind_conflict = inet6_csk_bind_conflict,
1611 #ifdef CONFIG_COMPAT
1612 .compat_setsockopt = compat_ipv6_setsockopt,
1613 .compat_getsockopt = compat_ipv6_getsockopt,
1614 #endif
1615 };
1616
1617 #ifdef CONFIG_TCP_MD5SIG
1618 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1619 .md5_lookup = tcp_v4_md5_lookup,
1620 .calc_md5_hash = tcp_v4_md5_hash_skb,
1621 .md5_parse = tcp_v6_parse_md5_keys,
1622 };
1623 #endif
1624
1625 /* NOTE: A lot of things set to zero explicitly by call to
1626 * sk_alloc() so need not be done here.
1627 */
1628 static int tcp_v6_init_sock(struct sock *sk)
1629 {
1630 struct inet_connection_sock *icsk = inet_csk(sk);
1631
1632 tcp_init_sock(sk);
1633
1634 icsk->icsk_af_ops = &ipv6_specific;
1635
1636 #ifdef CONFIG_TCP_MD5SIG
1637 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1638 #endif
1639
1640 return 0;
1641 }
1642
1643 static void tcp_v6_destroy_sock(struct sock *sk)
1644 {
1645 tcp_v4_destroy_sock(sk);
1646 inet6_destroy_sock(sk);
1647 }
1648
1649 #ifdef CONFIG_PROC_FS
1650 /* Proc filesystem TCPv6 sock list dumping. */
1651 static void get_openreq6(struct seq_file *seq,
1652 const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1653 {
1654 int ttd = req->expires - jiffies;
1655 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1656 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1657
1658 if (ttd < 0)
1659 ttd = 0;
1660
1661 seq_printf(seq,
1662 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1663 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1664 i,
1665 src->s6_addr32[0], src->s6_addr32[1],
1666 src->s6_addr32[2], src->s6_addr32[3],
1667 inet_rsk(req)->ir_num,
1668 dest->s6_addr32[0], dest->s6_addr32[1],
1669 dest->s6_addr32[2], dest->s6_addr32[3],
1670 ntohs(inet_rsk(req)->ir_rmt_port),
1671 TCP_SYN_RECV,
1672 0, 0, /* could print option size, but that is af dependent. */
1673 1, /* timers active (only the expire timer) */
1674 jiffies_to_clock_t(ttd),
1675 req->num_timeout,
1676 from_kuid_munged(seq_user_ns(seq), uid),
1677 0, /* non standard timer */
1678 0, /* open_requests have no inode */
1679 0, req);
1680 }
1681
1682 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1683 {
1684 const struct in6_addr *dest, *src;
1685 __u16 destp, srcp;
1686 int timer_active;
1687 unsigned long timer_expires;
1688 const struct inet_sock *inet = inet_sk(sp);
1689 const struct tcp_sock *tp = tcp_sk(sp);
1690 const struct inet_connection_sock *icsk = inet_csk(sp);
1691 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1692
1693 dest = &sp->sk_v6_daddr;
1694 src = &sp->sk_v6_rcv_saddr;
1695 destp = ntohs(inet->inet_dport);
1696 srcp = ntohs(inet->inet_sport);
1697
1698 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1699 timer_active = 1;
1700 timer_expires = icsk->icsk_timeout;
1701 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1702 timer_active = 4;
1703 timer_expires = icsk->icsk_timeout;
1704 } else if (timer_pending(&sp->sk_timer)) {
1705 timer_active = 2;
1706 timer_expires = sp->sk_timer.expires;
1707 } else {
1708 timer_active = 0;
1709 timer_expires = jiffies;
1710 }
1711
1712 seq_printf(seq,
1713 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1714 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1715 i,
1716 src->s6_addr32[0], src->s6_addr32[1],
1717 src->s6_addr32[2], src->s6_addr32[3], srcp,
1718 dest->s6_addr32[0], dest->s6_addr32[1],
1719 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1720 sp->sk_state,
1721 tp->write_seq-tp->snd_una,
1722 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1723 timer_active,
1724 jiffies_delta_to_clock_t(timer_expires - jiffies),
1725 icsk->icsk_retransmits,
1726 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1727 icsk->icsk_probes_out,
1728 sock_i_ino(sp),
1729 atomic_read(&sp->sk_refcnt), sp,
1730 jiffies_to_clock_t(icsk->icsk_rto),
1731 jiffies_to_clock_t(icsk->icsk_ack.ato),
1732 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1733 tp->snd_cwnd,
1734 sp->sk_state == TCP_LISTEN ?
1735 (fastopenq ? fastopenq->max_qlen : 0) :
1736 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1737 );
1738 }
1739
1740 static void get_timewait6_sock(struct seq_file *seq,
1741 struct inet_timewait_sock *tw, int i)
1742 {
1743 const struct in6_addr *dest, *src;
1744 __u16 destp, srcp;
1745 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
1746
1747 dest = &tw->tw_v6_daddr;
1748 src = &tw->tw_v6_rcv_saddr;
1749 destp = ntohs(tw->tw_dport);
1750 srcp = ntohs(tw->tw_sport);
1751
1752 seq_printf(seq,
1753 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1754 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1755 i,
1756 src->s6_addr32[0], src->s6_addr32[1],
1757 src->s6_addr32[2], src->s6_addr32[3], srcp,
1758 dest->s6_addr32[0], dest->s6_addr32[1],
1759 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1760 tw->tw_substate, 0, 0,
1761 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1762 atomic_read(&tw->tw_refcnt), tw);
1763 }
1764
1765 static int tcp6_seq_show(struct seq_file *seq, void *v)
1766 {
1767 struct tcp_iter_state *st;
1768 struct sock *sk = v;
1769
1770 if (v == SEQ_START_TOKEN) {
1771 seq_puts(seq,
1772 " sl "
1773 "local_address "
1774 "remote_address "
1775 "st tx_queue rx_queue tr tm->when retrnsmt"
1776 " uid timeout inode\n");
1777 goto out;
1778 }
1779 st = seq->private;
1780
1781 switch (st->state) {
1782 case TCP_SEQ_STATE_LISTENING:
1783 case TCP_SEQ_STATE_ESTABLISHED:
1784 if (sk->sk_state == TCP_TIME_WAIT)
1785 get_timewait6_sock(seq, v, st->num);
1786 else
1787 get_tcp6_sock(seq, v, st->num);
1788 break;
1789 case TCP_SEQ_STATE_OPENREQ:
1790 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1791 break;
1792 }
1793 out:
1794 return 0;
1795 }
1796
1797 static const struct file_operations tcp6_afinfo_seq_fops = {
1798 .owner = THIS_MODULE,
1799 .open = tcp_seq_open,
1800 .read = seq_read,
1801 .llseek = seq_lseek,
1802 .release = seq_release_net
1803 };
1804
1805 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1806 .name = "tcp6",
1807 .family = AF_INET6,
1808 .seq_fops = &tcp6_afinfo_seq_fops,
1809 .seq_ops = {
1810 .show = tcp6_seq_show,
1811 },
1812 };
1813
1814 int __net_init tcp6_proc_init(struct net *net)
1815 {
1816 return tcp_proc_register(net, &tcp6_seq_afinfo);
1817 }
1818
1819 void tcp6_proc_exit(struct net *net)
1820 {
1821 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1822 }
1823 #endif
1824
1825 static void tcp_v6_clear_sk(struct sock *sk, int size)
1826 {
1827 struct inet_sock *inet = inet_sk(sk);
1828
1829 /* we do not want to clear pinet6 field, because of RCU lookups */
1830 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1831
1832 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1833 memset(&inet->pinet6 + 1, 0, size);
1834 }
1835
1836 struct proto tcpv6_prot = {
1837 .name = "TCPv6",
1838 .owner = THIS_MODULE,
1839 .close = tcp_close,
1840 .connect = tcp_v6_connect,
1841 .disconnect = tcp_disconnect,
1842 .accept = inet_csk_accept,
1843 .ioctl = tcp_ioctl,
1844 .init = tcp_v6_init_sock,
1845 .destroy = tcp_v6_destroy_sock,
1846 .shutdown = tcp_shutdown,
1847 .setsockopt = tcp_setsockopt,
1848 .getsockopt = tcp_getsockopt,
1849 .recvmsg = tcp_recvmsg,
1850 .sendmsg = tcp_sendmsg,
1851 .sendpage = tcp_sendpage,
1852 .backlog_rcv = tcp_v6_do_rcv,
1853 .release_cb = tcp_release_cb,
1854 .mtu_reduced = tcp_v6_mtu_reduced,
1855 .hash = tcp_v6_hash,
1856 .unhash = inet_unhash,
1857 .get_port = inet_csk_get_port,
1858 .enter_memory_pressure = tcp_enter_memory_pressure,
1859 .stream_memory_free = tcp_stream_memory_free,
1860 .sockets_allocated = &tcp_sockets_allocated,
1861 .memory_allocated = &tcp_memory_allocated,
1862 .memory_pressure = &tcp_memory_pressure,
1863 .orphan_count = &tcp_orphan_count,
1864 .sysctl_mem = sysctl_tcp_mem,
1865 .sysctl_wmem = sysctl_tcp_wmem,
1866 .sysctl_rmem = sysctl_tcp_rmem,
1867 .max_header = MAX_TCP_HEADER,
1868 .obj_size = sizeof(struct tcp6_sock),
1869 .slab_flags = SLAB_DESTROY_BY_RCU,
1870 .twsk_prot = &tcp6_timewait_sock_ops,
1871 .rsk_prot = &tcp6_request_sock_ops,
1872 .h.hashinfo = &tcp_hashinfo,
1873 .no_autobind = true,
1874 #ifdef CONFIG_COMPAT
1875 .compat_setsockopt = compat_tcp_setsockopt,
1876 .compat_getsockopt = compat_tcp_getsockopt,
1877 #endif
1878 #ifdef CONFIG_MEMCG_KMEM
1879 .proto_cgroup = tcp_proto_cgroup,
1880 #endif
1881 .clear_sk = tcp_v6_clear_sk,
1882 };
1883
1884 static const struct inet6_protocol tcpv6_protocol = {
1885 .early_demux = tcp_v6_early_demux,
1886 .handler = tcp_v6_rcv,
1887 .err_handler = tcp_v6_err,
1888 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1889 };
1890
1891 static struct inet_protosw tcpv6_protosw = {
1892 .type = SOCK_STREAM,
1893 .protocol = IPPROTO_TCP,
1894 .prot = &tcpv6_prot,
1895 .ops = &inet6_stream_ops,
1896 .flags = INET_PROTOSW_PERMANENT |
1897 INET_PROTOSW_ICSK,
1898 };
1899
1900 static int __net_init tcpv6_net_init(struct net *net)
1901 {
1902 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1903 SOCK_RAW, IPPROTO_TCP, net);
1904 }
1905
1906 static void __net_exit tcpv6_net_exit(struct net *net)
1907 {
1908 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1909 }
1910
1911 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1912 {
1913 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1914 }
1915
1916 static struct pernet_operations tcpv6_net_ops = {
1917 .init = tcpv6_net_init,
1918 .exit = tcpv6_net_exit,
1919 .exit_batch = tcpv6_net_exit_batch,
1920 };
1921
1922 int __init tcpv6_init(void)
1923 {
1924 int ret;
1925
1926 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1927 if (ret)
1928 goto out;
1929
1930 /* register inet6 protocol */
1931 ret = inet6_register_protosw(&tcpv6_protosw);
1932 if (ret)
1933 goto out_tcpv6_protocol;
1934
1935 ret = register_pernet_subsys(&tcpv6_net_ops);
1936 if (ret)
1937 goto out_tcpv6_protosw;
1938 out:
1939 return ret;
1940
1941 out_tcpv6_protosw:
1942 inet6_unregister_protosw(&tcpv6_protosw);
1943 out_tcpv6_protocol:
1944 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1945 goto out;
1946 }
1947
1948 void tcpv6_exit(void)
1949 {
1950 unregister_pernet_subsys(&tcpv6_net_ops);
1951 inet6_unregister_protosw(&tcpv6_protosw);
1952 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1953 }
This page took 0.074459 seconds and 6 git commands to generate.