tcp md5sig: Remove redundant protocol argument.
[deliverable/linux.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on:
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
14 *
15 * Fixes:
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/jiffies.h>
35 #include <linux/in.h>
36 #include <linux/in6.h>
37 #include <linux/netdevice.h>
38 #include <linux/init.h>
39 #include <linux/jhash.h>
40 #include <linux/ipsec.h>
41 #include <linux/times.h>
42
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64
65 #include <asm/uaccess.h>
66
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
69
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
72
73 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
75 static void tcp_v6_send_check(struct sock *sk, int len,
76 struct sk_buff *skb);
77
78 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79
80 static struct inet_connection_sock_af_ops ipv6_mapped;
81 static struct inet_connection_sock_af_ops ipv6_specific;
82 #ifdef CONFIG_TCP_MD5SIG
83 static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 #endif
86
87 static void tcp_v6_hash(struct sock *sk)
88 {
89 if (sk->sk_state != TCP_CLOSE) {
90 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
91 tcp_prot.hash(sk);
92 return;
93 }
94 local_bh_disable();
95 __inet6_hash(sk);
96 local_bh_enable();
97 }
98 }
99
100 static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len,
101 struct in6_addr *saddr,
102 struct in6_addr *daddr,
103 __wsum base)
104 {
105 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
106 }
107
108 static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
109 {
110 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
111 ipv6_hdr(skb)->saddr.s6_addr32,
112 tcp_hdr(skb)->dest,
113 tcp_hdr(skb)->source);
114 }
115
116 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
117 int addr_len)
118 {
119 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
120 struct inet_sock *inet = inet_sk(sk);
121 struct inet_connection_sock *icsk = inet_csk(sk);
122 struct ipv6_pinfo *np = inet6_sk(sk);
123 struct tcp_sock *tp = tcp_sk(sk);
124 struct in6_addr *saddr = NULL, *final_p = NULL, final;
125 struct flowi fl;
126 struct dst_entry *dst;
127 int addr_type;
128 int err;
129
130 if (addr_len < SIN6_LEN_RFC2133)
131 return -EINVAL;
132
133 if (usin->sin6_family != AF_INET6)
134 return(-EAFNOSUPPORT);
135
136 memset(&fl, 0, sizeof(fl));
137
138 if (np->sndflow) {
139 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
140 IP6_ECN_flow_init(fl.fl6_flowlabel);
141 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
142 struct ip6_flowlabel *flowlabel;
143 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
144 if (flowlabel == NULL)
145 return -EINVAL;
146 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
147 fl6_sock_release(flowlabel);
148 }
149 }
150
151 /*
152 * connect() to INADDR_ANY means loopback (BSD'ism).
153 */
154
155 if(ipv6_addr_any(&usin->sin6_addr))
156 usin->sin6_addr.s6_addr[15] = 0x1;
157
158 addr_type = ipv6_addr_type(&usin->sin6_addr);
159
160 if(addr_type & IPV6_ADDR_MULTICAST)
161 return -ENETUNREACH;
162
163 if (addr_type&IPV6_ADDR_LINKLOCAL) {
164 if (addr_len >= sizeof(struct sockaddr_in6) &&
165 usin->sin6_scope_id) {
166 /* If interface is set while binding, indices
167 * must coincide.
168 */
169 if (sk->sk_bound_dev_if &&
170 sk->sk_bound_dev_if != usin->sin6_scope_id)
171 return -EINVAL;
172
173 sk->sk_bound_dev_if = usin->sin6_scope_id;
174 }
175
176 /* Connect to link-local address requires an interface */
177 if (!sk->sk_bound_dev_if)
178 return -EINVAL;
179 }
180
181 if (tp->rx_opt.ts_recent_stamp &&
182 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
183 tp->rx_opt.ts_recent = 0;
184 tp->rx_opt.ts_recent_stamp = 0;
185 tp->write_seq = 0;
186 }
187
188 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
189 np->flow_label = fl.fl6_flowlabel;
190
191 /*
192 * TCP over IPv4
193 */
194
195 if (addr_type == IPV6_ADDR_MAPPED) {
196 u32 exthdrlen = icsk->icsk_ext_hdr_len;
197 struct sockaddr_in sin;
198
199 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
200
201 if (__ipv6_only_sock(sk))
202 return -ENETUNREACH;
203
204 sin.sin_family = AF_INET;
205 sin.sin_port = usin->sin6_port;
206 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
207
208 icsk->icsk_af_ops = &ipv6_mapped;
209 sk->sk_backlog_rcv = tcp_v4_do_rcv;
210 #ifdef CONFIG_TCP_MD5SIG
211 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
212 #endif
213
214 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
215
216 if (err) {
217 icsk->icsk_ext_hdr_len = exthdrlen;
218 icsk->icsk_af_ops = &ipv6_specific;
219 sk->sk_backlog_rcv = tcp_v6_do_rcv;
220 #ifdef CONFIG_TCP_MD5SIG
221 tp->af_specific = &tcp_sock_ipv6_specific;
222 #endif
223 goto failure;
224 } else {
225 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
226 inet->saddr);
227 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
228 inet->rcv_saddr);
229 }
230
231 return err;
232 }
233
234 if (!ipv6_addr_any(&np->rcv_saddr))
235 saddr = &np->rcv_saddr;
236
237 fl.proto = IPPROTO_TCP;
238 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
239 ipv6_addr_copy(&fl.fl6_src,
240 (saddr ? saddr : &np->saddr));
241 fl.oif = sk->sk_bound_dev_if;
242 fl.fl_ip_dport = usin->sin6_port;
243 fl.fl_ip_sport = inet->sport;
244
245 if (np->opt && np->opt->srcrt) {
246 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
247 ipv6_addr_copy(&final, &fl.fl6_dst);
248 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
249 final_p = &final;
250 }
251
252 security_sk_classify_flow(sk, &fl);
253
254 err = ip6_dst_lookup(sk, &dst, &fl);
255 if (err)
256 goto failure;
257 if (final_p)
258 ipv6_addr_copy(&fl.fl6_dst, final_p);
259
260 if ((err = __xfrm_lookup(&dst, &fl, sk, XFRM_LOOKUP_WAIT)) < 0) {
261 if (err == -EREMOTE)
262 err = ip6_dst_blackhole(sk, &dst, &fl);
263 if (err < 0)
264 goto failure;
265 }
266
267 if (saddr == NULL) {
268 saddr = &fl.fl6_src;
269 ipv6_addr_copy(&np->rcv_saddr, saddr);
270 }
271
272 /* set the source address */
273 ipv6_addr_copy(&np->saddr, saddr);
274 inet->rcv_saddr = LOOPBACK4_IPV6;
275
276 sk->sk_gso_type = SKB_GSO_TCPV6;
277 __ip6_dst_store(sk, dst, NULL, NULL);
278
279 icsk->icsk_ext_hdr_len = 0;
280 if (np->opt)
281 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
282 np->opt->opt_nflen);
283
284 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
285
286 inet->dport = usin->sin6_port;
287
288 tcp_set_state(sk, TCP_SYN_SENT);
289 err = inet6_hash_connect(&tcp_death_row, sk);
290 if (err)
291 goto late_failure;
292
293 if (!tp->write_seq)
294 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
295 np->daddr.s6_addr32,
296 inet->sport,
297 inet->dport);
298
299 err = tcp_connect(sk);
300 if (err)
301 goto late_failure;
302
303 return 0;
304
305 late_failure:
306 tcp_set_state(sk, TCP_CLOSE);
307 __sk_dst_reset(sk);
308 failure:
309 inet->dport = 0;
310 sk->sk_route_caps = 0;
311 return err;
312 }
313
314 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
315 int type, int code, int offset, __be32 info)
316 {
317 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
318 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
319 struct ipv6_pinfo *np;
320 struct sock *sk;
321 int err;
322 struct tcp_sock *tp;
323 __u32 seq;
324
325 sk = inet6_lookup(dev_net(skb->dev), &tcp_hashinfo, &hdr->daddr,
326 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
327
328 if (sk == NULL) {
329 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
330 return;
331 }
332
333 if (sk->sk_state == TCP_TIME_WAIT) {
334 inet_twsk_put(inet_twsk(sk));
335 return;
336 }
337
338 bh_lock_sock(sk);
339 if (sock_owned_by_user(sk))
340 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
341
342 if (sk->sk_state == TCP_CLOSE)
343 goto out;
344
345 tp = tcp_sk(sk);
346 seq = ntohl(th->seq);
347 if (sk->sk_state != TCP_LISTEN &&
348 !between(seq, tp->snd_una, tp->snd_nxt)) {
349 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
350 goto out;
351 }
352
353 np = inet6_sk(sk);
354
355 if (type == ICMPV6_PKT_TOOBIG) {
356 struct dst_entry *dst = NULL;
357
358 if (sock_owned_by_user(sk))
359 goto out;
360 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
361 goto out;
362
363 /* icmp should have updated the destination cache entry */
364 dst = __sk_dst_check(sk, np->dst_cookie);
365
366 if (dst == NULL) {
367 struct inet_sock *inet = inet_sk(sk);
368 struct flowi fl;
369
370 /* BUGGG_FUTURE: Again, it is not clear how
371 to handle rthdr case. Ignore this complexity
372 for now.
373 */
374 memset(&fl, 0, sizeof(fl));
375 fl.proto = IPPROTO_TCP;
376 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
377 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
378 fl.oif = sk->sk_bound_dev_if;
379 fl.fl_ip_dport = inet->dport;
380 fl.fl_ip_sport = inet->sport;
381 security_skb_classify_flow(skb, &fl);
382
383 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
384 sk->sk_err_soft = -err;
385 goto out;
386 }
387
388 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
389 sk->sk_err_soft = -err;
390 goto out;
391 }
392
393 } else
394 dst_hold(dst);
395
396 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
397 tcp_sync_mss(sk, dst_mtu(dst));
398 tcp_simple_retransmit(sk);
399 } /* else let the usual retransmit timer handle it */
400 dst_release(dst);
401 goto out;
402 }
403
404 icmpv6_err_convert(type, code, &err);
405
406 /* Might be for an request_sock */
407 switch (sk->sk_state) {
408 struct request_sock *req, **prev;
409 case TCP_LISTEN:
410 if (sock_owned_by_user(sk))
411 goto out;
412
413 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
414 &hdr->saddr, inet6_iif(skb));
415 if (!req)
416 goto out;
417
418 /* ICMPs are not backlogged, hence we cannot get
419 * an established socket here.
420 */
421 BUG_TRAP(req->sk == NULL);
422
423 if (seq != tcp_rsk(req)->snt_isn) {
424 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
425 goto out;
426 }
427
428 inet_csk_reqsk_queue_drop(sk, req, prev);
429 goto out;
430
431 case TCP_SYN_SENT:
432 case TCP_SYN_RECV: /* Cannot happen.
433 It can, it SYNs are crossed. --ANK */
434 if (!sock_owned_by_user(sk)) {
435 sk->sk_err = err;
436 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
437
438 tcp_done(sk);
439 } else
440 sk->sk_err_soft = err;
441 goto out;
442 }
443
444 if (!sock_owned_by_user(sk) && np->recverr) {
445 sk->sk_err = err;
446 sk->sk_error_report(sk);
447 } else
448 sk->sk_err_soft = err;
449
450 out:
451 bh_unlock_sock(sk);
452 sock_put(sk);
453 }
454
455
456 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
457 {
458 struct inet6_request_sock *treq = inet6_rsk(req);
459 struct ipv6_pinfo *np = inet6_sk(sk);
460 struct sk_buff * skb;
461 struct ipv6_txoptions *opt = NULL;
462 struct in6_addr * final_p = NULL, final;
463 struct flowi fl;
464 struct dst_entry *dst;
465 int err = -1;
466
467 memset(&fl, 0, sizeof(fl));
468 fl.proto = IPPROTO_TCP;
469 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
470 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
471 fl.fl6_flowlabel = 0;
472 fl.oif = treq->iif;
473 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
474 fl.fl_ip_sport = inet_sk(sk)->sport;
475 security_req_classify_flow(req, &fl);
476
477 opt = np->opt;
478 if (opt && opt->srcrt) {
479 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
480 ipv6_addr_copy(&final, &fl.fl6_dst);
481 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
482 final_p = &final;
483 }
484
485 err = ip6_dst_lookup(sk, &dst, &fl);
486 if (err)
487 goto done;
488 if (final_p)
489 ipv6_addr_copy(&fl.fl6_dst, final_p);
490 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
491 goto done;
492
493 skb = tcp_make_synack(sk, dst, req);
494 if (skb) {
495 struct tcphdr *th = tcp_hdr(skb);
496
497 th->check = tcp_v6_check(th, skb->len,
498 &treq->loc_addr, &treq->rmt_addr,
499 csum_partial((char *)th, skb->len, skb->csum));
500
501 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
502 err = ip6_xmit(sk, skb, &fl, opt, 0);
503 err = net_xmit_eval(err);
504 }
505
506 done:
507 if (opt && opt != np->opt)
508 sock_kfree_s(sk, opt, opt->tot_len);
509 dst_release(dst);
510 return err;
511 }
512
513 static inline void syn_flood_warning(struct sk_buff *skb)
514 {
515 #ifdef CONFIG_SYN_COOKIES
516 if (sysctl_tcp_syncookies)
517 printk(KERN_INFO
518 "TCPv6: Possible SYN flooding on port %d. "
519 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
520 else
521 #endif
522 printk(KERN_INFO
523 "TCPv6: Possible SYN flooding on port %d. "
524 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
525 }
526
527 static void tcp_v6_reqsk_destructor(struct request_sock *req)
528 {
529 if (inet6_rsk(req)->pktopts)
530 kfree_skb(inet6_rsk(req)->pktopts);
531 }
532
533 #ifdef CONFIG_TCP_MD5SIG
534 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
535 struct in6_addr *addr)
536 {
537 struct tcp_sock *tp = tcp_sk(sk);
538 int i;
539
540 BUG_ON(tp == NULL);
541
542 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
543 return NULL;
544
545 for (i = 0; i < tp->md5sig_info->entries6; i++) {
546 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
547 return &tp->md5sig_info->keys6[i].base;
548 }
549 return NULL;
550 }
551
552 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
553 struct sock *addr_sk)
554 {
555 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
556 }
557
558 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
559 struct request_sock *req)
560 {
561 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
562 }
563
564 static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
565 char *newkey, u8 newkeylen)
566 {
567 /* Add key to the list */
568 struct tcp_md5sig_key *key;
569 struct tcp_sock *tp = tcp_sk(sk);
570 struct tcp6_md5sig_key *keys;
571
572 key = tcp_v6_md5_do_lookup(sk, peer);
573 if (key) {
574 /* modify existing entry - just update that one */
575 kfree(key->key);
576 key->key = newkey;
577 key->keylen = newkeylen;
578 } else {
579 /* reallocate new list if current one is full. */
580 if (!tp->md5sig_info) {
581 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
582 if (!tp->md5sig_info) {
583 kfree(newkey);
584 return -ENOMEM;
585 }
586 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
587 }
588 if (tcp_alloc_md5sig_pool() == NULL) {
589 kfree(newkey);
590 return -ENOMEM;
591 }
592 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
593 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
594 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
595
596 if (!keys) {
597 tcp_free_md5sig_pool();
598 kfree(newkey);
599 return -ENOMEM;
600 }
601
602 if (tp->md5sig_info->entries6)
603 memmove(keys, tp->md5sig_info->keys6,
604 (sizeof (tp->md5sig_info->keys6[0]) *
605 tp->md5sig_info->entries6));
606
607 kfree(tp->md5sig_info->keys6);
608 tp->md5sig_info->keys6 = keys;
609 tp->md5sig_info->alloced6++;
610 }
611
612 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
613 peer);
614 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
615 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
616
617 tp->md5sig_info->entries6++;
618 }
619 return 0;
620 }
621
622 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
623 u8 *newkey, __u8 newkeylen)
624 {
625 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
626 newkey, newkeylen);
627 }
628
629 static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
630 {
631 struct tcp_sock *tp = tcp_sk(sk);
632 int i;
633
634 for (i = 0; i < tp->md5sig_info->entries6; i++) {
635 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
636 /* Free the key */
637 kfree(tp->md5sig_info->keys6[i].base.key);
638 tp->md5sig_info->entries6--;
639
640 if (tp->md5sig_info->entries6 == 0) {
641 kfree(tp->md5sig_info->keys6);
642 tp->md5sig_info->keys6 = NULL;
643 tp->md5sig_info->alloced6 = 0;
644 } else {
645 /* shrink the database */
646 if (tp->md5sig_info->entries6 != i)
647 memmove(&tp->md5sig_info->keys6[i],
648 &tp->md5sig_info->keys6[i+1],
649 (tp->md5sig_info->entries6 - i)
650 * sizeof (tp->md5sig_info->keys6[0]));
651 }
652 tcp_free_md5sig_pool();
653 return 0;
654 }
655 }
656 return -ENOENT;
657 }
658
659 static void tcp_v6_clear_md5_list (struct sock *sk)
660 {
661 struct tcp_sock *tp = tcp_sk(sk);
662 int i;
663
664 if (tp->md5sig_info->entries6) {
665 for (i = 0; i < tp->md5sig_info->entries6; i++)
666 kfree(tp->md5sig_info->keys6[i].base.key);
667 tp->md5sig_info->entries6 = 0;
668 tcp_free_md5sig_pool();
669 }
670
671 kfree(tp->md5sig_info->keys6);
672 tp->md5sig_info->keys6 = NULL;
673 tp->md5sig_info->alloced6 = 0;
674
675 if (tp->md5sig_info->entries4) {
676 for (i = 0; i < tp->md5sig_info->entries4; i++)
677 kfree(tp->md5sig_info->keys4[i].base.key);
678 tp->md5sig_info->entries4 = 0;
679 tcp_free_md5sig_pool();
680 }
681
682 kfree(tp->md5sig_info->keys4);
683 tp->md5sig_info->keys4 = NULL;
684 tp->md5sig_info->alloced4 = 0;
685 }
686
687 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
688 int optlen)
689 {
690 struct tcp_md5sig cmd;
691 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
692 u8 *newkey;
693
694 if (optlen < sizeof(cmd))
695 return -EINVAL;
696
697 if (copy_from_user(&cmd, optval, sizeof(cmd)))
698 return -EFAULT;
699
700 if (sin6->sin6_family != AF_INET6)
701 return -EINVAL;
702
703 if (!cmd.tcpm_keylen) {
704 if (!tcp_sk(sk)->md5sig_info)
705 return -ENOENT;
706 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
707 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
708 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
709 }
710
711 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
712 return -EINVAL;
713
714 if (!tcp_sk(sk)->md5sig_info) {
715 struct tcp_sock *tp = tcp_sk(sk);
716 struct tcp_md5sig_info *p;
717
718 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
719 if (!p)
720 return -ENOMEM;
721
722 tp->md5sig_info = p;
723 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
724 }
725
726 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
727 if (!newkey)
728 return -ENOMEM;
729 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
730 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
731 newkey, cmd.tcpm_keylen);
732 }
733 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
734 }
735
736 static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
737 struct in6_addr *saddr,
738 struct in6_addr *daddr,
739 struct tcphdr *th, unsigned int tcplen)
740 {
741 struct scatterlist sg[4];
742 __u16 data_len;
743 int block = 0;
744 __sum16 cksum;
745 struct tcp_md5sig_pool *hp;
746 struct tcp6_pseudohdr *bp;
747 struct hash_desc *desc;
748 int err;
749 unsigned int nbytes = 0;
750
751 hp = tcp_get_md5sig_pool();
752 if (!hp) {
753 printk(KERN_WARNING "%s(): hash pool not found...\n", __func__);
754 goto clear_hash_noput;
755 }
756 bp = &hp->md5_blk.ip6;
757 desc = &hp->md5_desc;
758
759 /* 1. TCP pseudo-header (RFC2460) */
760 ipv6_addr_copy(&bp->saddr, saddr);
761 ipv6_addr_copy(&bp->daddr, daddr);
762 bp->len = htonl(tcplen);
763 bp->protocol = htonl(IPPROTO_TCP);
764
765 sg_init_table(sg, 4);
766
767 sg_set_buf(&sg[block++], bp, sizeof(*bp));
768 nbytes += sizeof(*bp);
769
770 /* 2. TCP header, excluding options */
771 cksum = th->check;
772 th->check = 0;
773 sg_set_buf(&sg[block++], th, sizeof(*th));
774 nbytes += sizeof(*th);
775
776 /* 3. TCP segment data (if any) */
777 data_len = tcplen - (th->doff << 2);
778 if (data_len > 0) {
779 u8 *data = (u8 *)th + (th->doff << 2);
780 sg_set_buf(&sg[block++], data, data_len);
781 nbytes += data_len;
782 }
783
784 /* 4. shared key */
785 sg_set_buf(&sg[block++], key->key, key->keylen);
786 nbytes += key->keylen;
787
788 sg_mark_end(&sg[block - 1]);
789
790 /* Now store the hash into the packet */
791 err = crypto_hash_init(desc);
792 if (err) {
793 printk(KERN_WARNING "%s(): hash_init failed\n", __func__);
794 goto clear_hash;
795 }
796 err = crypto_hash_update(desc, sg, nbytes);
797 if (err) {
798 printk(KERN_WARNING "%s(): hash_update failed\n", __func__);
799 goto clear_hash;
800 }
801 err = crypto_hash_final(desc, md5_hash);
802 if (err) {
803 printk(KERN_WARNING "%s(): hash_final failed\n", __func__);
804 goto clear_hash;
805 }
806
807 /* Reset header, and free up the crypto */
808 tcp_put_md5sig_pool();
809 th->check = cksum;
810 out:
811 return 0;
812 clear_hash:
813 tcp_put_md5sig_pool();
814 clear_hash_noput:
815 memset(md5_hash, 0, 16);
816 goto out;
817 }
818
819 static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
820 struct sock *sk,
821 struct dst_entry *dst,
822 struct request_sock *req,
823 struct tcphdr *th, unsigned int tcplen)
824 {
825 struct in6_addr *saddr, *daddr;
826
827 if (sk) {
828 saddr = &inet6_sk(sk)->saddr;
829 daddr = &inet6_sk(sk)->daddr;
830 } else {
831 saddr = &inet6_rsk(req)->loc_addr;
832 daddr = &inet6_rsk(req)->rmt_addr;
833 }
834 return tcp_v6_do_calc_md5_hash(md5_hash, key,
835 saddr, daddr,
836 th, tcplen);
837 }
838
839 static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
840 {
841 __u8 *hash_location = NULL;
842 struct tcp_md5sig_key *hash_expected;
843 struct ipv6hdr *ip6h = ipv6_hdr(skb);
844 struct tcphdr *th = tcp_hdr(skb);
845 int genhash;
846 u8 newhash[16];
847
848 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
849 hash_location = tcp_parse_md5sig_option(th);
850
851 /* do we have a hash as expected? */
852 if (!hash_expected) {
853 if (!hash_location)
854 return 0;
855 if (net_ratelimit()) {
856 printk(KERN_INFO "MD5 Hash NOT expected but found "
857 "(" NIP6_FMT ", %u)->"
858 "(" NIP6_FMT ", %u)\n",
859 NIP6(ip6h->saddr), ntohs(th->source),
860 NIP6(ip6h->daddr), ntohs(th->dest));
861 }
862 return 1;
863 }
864
865 if (!hash_location) {
866 if (net_ratelimit()) {
867 printk(KERN_INFO "MD5 Hash expected but NOT found "
868 "(" NIP6_FMT ", %u)->"
869 "(" NIP6_FMT ", %u)\n",
870 NIP6(ip6h->saddr), ntohs(th->source),
871 NIP6(ip6h->daddr), ntohs(th->dest));
872 }
873 return 1;
874 }
875
876 /* check the signature */
877 genhash = tcp_v6_do_calc_md5_hash(newhash,
878 hash_expected,
879 &ip6h->saddr, &ip6h->daddr,
880 th, skb->len);
881 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
882 if (net_ratelimit()) {
883 printk(KERN_INFO "MD5 Hash %s for "
884 "(" NIP6_FMT ", %u)->"
885 "(" NIP6_FMT ", %u)\n",
886 genhash ? "failed" : "mismatch",
887 NIP6(ip6h->saddr), ntohs(th->source),
888 NIP6(ip6h->daddr), ntohs(th->dest));
889 }
890 return 1;
891 }
892 return 0;
893 }
894 #endif
895
896 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
897 .family = AF_INET6,
898 .obj_size = sizeof(struct tcp6_request_sock),
899 .rtx_syn_ack = tcp_v6_send_synack,
900 .send_ack = tcp_v6_reqsk_send_ack,
901 .destructor = tcp_v6_reqsk_destructor,
902 .send_reset = tcp_v6_send_reset
903 };
904
905 #ifdef CONFIG_TCP_MD5SIG
906 static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
907 .md5_lookup = tcp_v6_reqsk_md5_lookup,
908 };
909 #endif
910
911 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
912 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
913 .twsk_unique = tcp_twsk_unique,
914 .twsk_destructor= tcp_twsk_destructor,
915 };
916
917 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
918 {
919 struct ipv6_pinfo *np = inet6_sk(sk);
920 struct tcphdr *th = tcp_hdr(skb);
921
922 if (skb->ip_summed == CHECKSUM_PARTIAL) {
923 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
924 skb->csum_start = skb_transport_header(skb) - skb->head;
925 skb->csum_offset = offsetof(struct tcphdr, check);
926 } else {
927 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
928 csum_partial((char *)th, th->doff<<2,
929 skb->csum));
930 }
931 }
932
933 static int tcp_v6_gso_send_check(struct sk_buff *skb)
934 {
935 struct ipv6hdr *ipv6h;
936 struct tcphdr *th;
937
938 if (!pskb_may_pull(skb, sizeof(*th)))
939 return -EINVAL;
940
941 ipv6h = ipv6_hdr(skb);
942 th = tcp_hdr(skb);
943
944 th->check = 0;
945 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
946 IPPROTO_TCP, 0);
947 skb->csum_start = skb_transport_header(skb) - skb->head;
948 skb->csum_offset = offsetof(struct tcphdr, check);
949 skb->ip_summed = CHECKSUM_PARTIAL;
950 return 0;
951 }
952
953 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
954 {
955 struct tcphdr *th = tcp_hdr(skb), *t1;
956 struct sk_buff *buff;
957 struct flowi fl;
958 struct net *net = dev_net(skb->dst->dev);
959 struct sock *ctl_sk = net->ipv6.tcp_sk;
960 unsigned int tot_len = sizeof(*th);
961 #ifdef CONFIG_TCP_MD5SIG
962 struct tcp_md5sig_key *key;
963 #endif
964
965 if (th->rst)
966 return;
967
968 if (!ipv6_unicast_destination(skb))
969 return;
970
971 #ifdef CONFIG_TCP_MD5SIG
972 if (sk)
973 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
974 else
975 key = NULL;
976
977 if (key)
978 tot_len += TCPOLEN_MD5SIG_ALIGNED;
979 #endif
980
981 /*
982 * We need to grab some memory, and put together an RST,
983 * and then put it into the queue to be sent.
984 */
985
986 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
987 GFP_ATOMIC);
988 if (buff == NULL)
989 return;
990
991 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
992
993 t1 = (struct tcphdr *) skb_push(buff, tot_len);
994
995 /* Swap the send and the receive. */
996 memset(t1, 0, sizeof(*t1));
997 t1->dest = th->source;
998 t1->source = th->dest;
999 t1->doff = tot_len / 4;
1000 t1->rst = 1;
1001
1002 if(th->ack) {
1003 t1->seq = th->ack_seq;
1004 } else {
1005 t1->ack = 1;
1006 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
1007 + skb->len - (th->doff<<2));
1008 }
1009
1010 #ifdef CONFIG_TCP_MD5SIG
1011 if (key) {
1012 __be32 *opt = (__be32*)(t1 + 1);
1013 opt[0] = htonl((TCPOPT_NOP << 24) |
1014 (TCPOPT_NOP << 16) |
1015 (TCPOPT_MD5SIG << 8) |
1016 TCPOLEN_MD5SIG);
1017 tcp_v6_do_calc_md5_hash((__u8 *)&opt[1], key,
1018 &ipv6_hdr(skb)->daddr,
1019 &ipv6_hdr(skb)->saddr,
1020 t1, tot_len);
1021 }
1022 #endif
1023
1024 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
1025
1026 memset(&fl, 0, sizeof(fl));
1027 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1028 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1029
1030 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1031 sizeof(*t1), IPPROTO_TCP,
1032 buff->csum);
1033
1034 fl.proto = IPPROTO_TCP;
1035 fl.oif = inet6_iif(skb);
1036 fl.fl_ip_dport = t1->dest;
1037 fl.fl_ip_sport = t1->source;
1038 security_skb_classify_flow(skb, &fl);
1039
1040 /* Pass a socket to ip6_dst_lookup either it is for RST
1041 * Underlying function will use this to retrieve the network
1042 * namespace
1043 */
1044 if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) {
1045
1046 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1047 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
1048 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1049 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1050 return;
1051 }
1052 }
1053
1054 kfree_skb(buff);
1055 }
1056
1057 static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1058 struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
1059 {
1060 struct tcphdr *th = tcp_hdr(skb), *t1;
1061 struct sk_buff *buff;
1062 struct flowi fl;
1063 struct net *net = dev_net(skb->dev);
1064 struct sock *ctl_sk = net->ipv6.tcp_sk;
1065 unsigned int tot_len = sizeof(struct tcphdr);
1066 __be32 *topt;
1067 #ifdef CONFIG_TCP_MD5SIG
1068 struct tcp_md5sig_key *key;
1069 struct tcp_md5sig_key tw_key;
1070 #endif
1071
1072 #ifdef CONFIG_TCP_MD5SIG
1073 if (!tw && skb->sk) {
1074 key = tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr);
1075 } else if (tw && tw->tw_md5_keylen) {
1076 tw_key.key = tw->tw_md5_key;
1077 tw_key.keylen = tw->tw_md5_keylen;
1078 key = &tw_key;
1079 } else {
1080 key = NULL;
1081 }
1082 #endif
1083
1084 if (ts)
1085 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1086 #ifdef CONFIG_TCP_MD5SIG
1087 if (key)
1088 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1089 #endif
1090
1091 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1092 GFP_ATOMIC);
1093 if (buff == NULL)
1094 return;
1095
1096 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1097
1098 t1 = (struct tcphdr *) skb_push(buff,tot_len);
1099
1100 /* Swap the send and the receive. */
1101 memset(t1, 0, sizeof(*t1));
1102 t1->dest = th->source;
1103 t1->source = th->dest;
1104 t1->doff = tot_len/4;
1105 t1->seq = htonl(seq);
1106 t1->ack_seq = htonl(ack);
1107 t1->ack = 1;
1108 t1->window = htons(win);
1109
1110 topt = (__be32 *)(t1 + 1);
1111
1112 if (ts) {
1113 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1114 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1115 *topt++ = htonl(tcp_time_stamp);
1116 *topt = htonl(ts);
1117 }
1118
1119 #ifdef CONFIG_TCP_MD5SIG
1120 if (key) {
1121 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1122 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1123 tcp_v6_do_calc_md5_hash((__u8 *)topt, key,
1124 &ipv6_hdr(skb)->daddr,
1125 &ipv6_hdr(skb)->saddr,
1126 t1, tot_len);
1127 }
1128 #endif
1129
1130 buff->csum = csum_partial((char *)t1, tot_len, 0);
1131
1132 memset(&fl, 0, sizeof(fl));
1133 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1134 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1135
1136 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1137 tot_len, IPPROTO_TCP,
1138 buff->csum);
1139
1140 fl.proto = IPPROTO_TCP;
1141 fl.oif = inet6_iif(skb);
1142 fl.fl_ip_dport = t1->dest;
1143 fl.fl_ip_sport = t1->source;
1144 security_skb_classify_flow(skb, &fl);
1145
1146 if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) {
1147 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1148 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
1149 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1150 return;
1151 }
1152 }
1153
1154 kfree_skb(buff);
1155 }
1156
1157 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1158 {
1159 struct inet_timewait_sock *tw = inet_twsk(sk);
1160 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1161
1162 tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1163 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1164 tcptw->tw_ts_recent);
1165
1166 inet_twsk_put(tw);
1167 }
1168
1169 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1170 {
1171 tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
1172 }
1173
1174
1175 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1176 {
1177 struct request_sock *req, **prev;
1178 const struct tcphdr *th = tcp_hdr(skb);
1179 struct sock *nsk;
1180
1181 /* Find possible connection requests. */
1182 req = inet6_csk_search_req(sk, &prev, th->source,
1183 &ipv6_hdr(skb)->saddr,
1184 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1185 if (req)
1186 return tcp_check_req(sk, skb, req, prev);
1187
1188 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1189 &ipv6_hdr(skb)->saddr, th->source,
1190 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1191
1192 if (nsk) {
1193 if (nsk->sk_state != TCP_TIME_WAIT) {
1194 bh_lock_sock(nsk);
1195 return nsk;
1196 }
1197 inet_twsk_put(inet_twsk(nsk));
1198 return NULL;
1199 }
1200
1201 #ifdef CONFIG_SYN_COOKIES
1202 if (!th->rst && !th->syn && th->ack)
1203 sk = cookie_v6_check(sk, skb);
1204 #endif
1205 return sk;
1206 }
1207
1208 /* FIXME: this is substantially similar to the ipv4 code.
1209 * Can some kind of merge be done? -- erics
1210 */
1211 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1212 {
1213 struct inet6_request_sock *treq;
1214 struct ipv6_pinfo *np = inet6_sk(sk);
1215 struct tcp_options_received tmp_opt;
1216 struct tcp_sock *tp = tcp_sk(sk);
1217 struct request_sock *req = NULL;
1218 __u32 isn = TCP_SKB_CB(skb)->when;
1219 #ifdef CONFIG_SYN_COOKIES
1220 int want_cookie = 0;
1221 #else
1222 #define want_cookie 0
1223 #endif
1224
1225 if (skb->protocol == htons(ETH_P_IP))
1226 return tcp_v4_conn_request(sk, skb);
1227
1228 if (!ipv6_unicast_destination(skb))
1229 goto drop;
1230
1231 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1232 if (net_ratelimit())
1233 syn_flood_warning(skb);
1234 #ifdef CONFIG_SYN_COOKIES
1235 if (sysctl_tcp_syncookies)
1236 want_cookie = 1;
1237 else
1238 #endif
1239 goto drop;
1240 }
1241
1242 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1243 goto drop;
1244
1245 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1246 if (req == NULL)
1247 goto drop;
1248
1249 #ifdef CONFIG_TCP_MD5SIG
1250 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1251 #endif
1252
1253 tcp_clear_options(&tmp_opt);
1254 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1255 tmp_opt.user_mss = tp->rx_opt.user_mss;
1256
1257 tcp_parse_options(skb, &tmp_opt, 0);
1258
1259 if (want_cookie && !tmp_opt.saw_tstamp)
1260 tcp_clear_options(&tmp_opt);
1261
1262 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1263 tcp_openreq_init(req, &tmp_opt, skb);
1264
1265 treq = inet6_rsk(req);
1266 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1267 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1268 treq->pktopts = NULL;
1269 if (!want_cookie)
1270 TCP_ECN_create_request(req, tcp_hdr(skb));
1271
1272 if (want_cookie) {
1273 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1274 req->cookie_ts = tmp_opt.tstamp_ok;
1275 } else if (!isn) {
1276 if (ipv6_opt_accepted(sk, skb) ||
1277 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1278 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1279 atomic_inc(&skb->users);
1280 treq->pktopts = skb;
1281 }
1282 treq->iif = sk->sk_bound_dev_if;
1283
1284 /* So that link locals have meaning */
1285 if (!sk->sk_bound_dev_if &&
1286 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1287 treq->iif = inet6_iif(skb);
1288
1289 isn = tcp_v6_init_sequence(skb);
1290 }
1291
1292 tcp_rsk(req)->snt_isn = isn;
1293
1294 security_inet_conn_request(sk, skb, req);
1295
1296 if (tcp_v6_send_synack(sk, req))
1297 goto drop;
1298
1299 if (!want_cookie) {
1300 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1301 return 0;
1302 }
1303
1304 drop:
1305 if (req)
1306 reqsk_free(req);
1307
1308 return 0; /* don't send reset */
1309 }
1310
1311 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1312 struct request_sock *req,
1313 struct dst_entry *dst)
1314 {
1315 struct inet6_request_sock *treq = inet6_rsk(req);
1316 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1317 struct tcp6_sock *newtcp6sk;
1318 struct inet_sock *newinet;
1319 struct tcp_sock *newtp;
1320 struct sock *newsk;
1321 struct ipv6_txoptions *opt;
1322 #ifdef CONFIG_TCP_MD5SIG
1323 struct tcp_md5sig_key *key;
1324 #endif
1325
1326 if (skb->protocol == htons(ETH_P_IP)) {
1327 /*
1328 * v6 mapped
1329 */
1330
1331 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1332
1333 if (newsk == NULL)
1334 return NULL;
1335
1336 newtcp6sk = (struct tcp6_sock *)newsk;
1337 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1338
1339 newinet = inet_sk(newsk);
1340 newnp = inet6_sk(newsk);
1341 newtp = tcp_sk(newsk);
1342
1343 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1344
1345 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1346 newinet->daddr);
1347
1348 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1349 newinet->saddr);
1350
1351 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1352
1353 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1354 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1355 #ifdef CONFIG_TCP_MD5SIG
1356 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1357 #endif
1358
1359 newnp->pktoptions = NULL;
1360 newnp->opt = NULL;
1361 newnp->mcast_oif = inet6_iif(skb);
1362 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1363
1364 /*
1365 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1366 * here, tcp_create_openreq_child now does this for us, see the comment in
1367 * that function for the gory details. -acme
1368 */
1369
1370 /* It is tricky place. Until this moment IPv4 tcp
1371 worked with IPv6 icsk.icsk_af_ops.
1372 Sync it now.
1373 */
1374 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1375
1376 return newsk;
1377 }
1378
1379 opt = np->opt;
1380
1381 if (sk_acceptq_is_full(sk))
1382 goto out_overflow;
1383
1384 if (dst == NULL) {
1385 struct in6_addr *final_p = NULL, final;
1386 struct flowi fl;
1387
1388 memset(&fl, 0, sizeof(fl));
1389 fl.proto = IPPROTO_TCP;
1390 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1391 if (opt && opt->srcrt) {
1392 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1393 ipv6_addr_copy(&final, &fl.fl6_dst);
1394 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1395 final_p = &final;
1396 }
1397 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1398 fl.oif = sk->sk_bound_dev_if;
1399 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1400 fl.fl_ip_sport = inet_sk(sk)->sport;
1401 security_req_classify_flow(req, &fl);
1402
1403 if (ip6_dst_lookup(sk, &dst, &fl))
1404 goto out;
1405
1406 if (final_p)
1407 ipv6_addr_copy(&fl.fl6_dst, final_p);
1408
1409 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1410 goto out;
1411 }
1412
1413 newsk = tcp_create_openreq_child(sk, req, skb);
1414 if (newsk == NULL)
1415 goto out;
1416
1417 /*
1418 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1419 * count here, tcp_create_openreq_child now does this for us, see the
1420 * comment in that function for the gory details. -acme
1421 */
1422
1423 newsk->sk_gso_type = SKB_GSO_TCPV6;
1424 __ip6_dst_store(newsk, dst, NULL, NULL);
1425
1426 newtcp6sk = (struct tcp6_sock *)newsk;
1427 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1428
1429 newtp = tcp_sk(newsk);
1430 newinet = inet_sk(newsk);
1431 newnp = inet6_sk(newsk);
1432
1433 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1434
1435 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1436 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1437 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1438 newsk->sk_bound_dev_if = treq->iif;
1439
1440 /* Now IPv6 options...
1441
1442 First: no IPv4 options.
1443 */
1444 newinet->opt = NULL;
1445 newnp->ipv6_fl_list = NULL;
1446
1447 /* Clone RX bits */
1448 newnp->rxopt.all = np->rxopt.all;
1449
1450 /* Clone pktoptions received with SYN */
1451 newnp->pktoptions = NULL;
1452 if (treq->pktopts != NULL) {
1453 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1454 kfree_skb(treq->pktopts);
1455 treq->pktopts = NULL;
1456 if (newnp->pktoptions)
1457 skb_set_owner_r(newnp->pktoptions, newsk);
1458 }
1459 newnp->opt = NULL;
1460 newnp->mcast_oif = inet6_iif(skb);
1461 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1462
1463 /* Clone native IPv6 options from listening socket (if any)
1464
1465 Yes, keeping reference count would be much more clever,
1466 but we make one more one thing there: reattach optmem
1467 to newsk.
1468 */
1469 if (opt) {
1470 newnp->opt = ipv6_dup_options(newsk, opt);
1471 if (opt != np->opt)
1472 sock_kfree_s(sk, opt, opt->tot_len);
1473 }
1474
1475 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1476 if (newnp->opt)
1477 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1478 newnp->opt->opt_flen);
1479
1480 tcp_mtup_init(newsk);
1481 tcp_sync_mss(newsk, dst_mtu(dst));
1482 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1483 tcp_initialize_rcv_mss(newsk);
1484
1485 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1486
1487 #ifdef CONFIG_TCP_MD5SIG
1488 /* Copy over the MD5 key from the original socket */
1489 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1490 /* We're using one, so create a matching key
1491 * on the newsk structure. If we fail to get
1492 * memory, then we end up not copying the key
1493 * across. Shucks.
1494 */
1495 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1496 if (newkey != NULL)
1497 tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
1498 newkey, key->keylen);
1499 }
1500 #endif
1501
1502 __inet6_hash(newsk);
1503 __inet_inherit_port(sk, newsk);
1504
1505 return newsk;
1506
1507 out_overflow:
1508 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1509 out:
1510 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1511 if (opt && opt != np->opt)
1512 sock_kfree_s(sk, opt, opt->tot_len);
1513 dst_release(dst);
1514 return NULL;
1515 }
1516
1517 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1518 {
1519 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1520 if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr,
1521 &ipv6_hdr(skb)->daddr, skb->csum)) {
1522 skb->ip_summed = CHECKSUM_UNNECESSARY;
1523 return 0;
1524 }
1525 }
1526
1527 skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len,
1528 &ipv6_hdr(skb)->saddr,
1529 &ipv6_hdr(skb)->daddr, 0));
1530
1531 if (skb->len <= 76) {
1532 return __skb_checksum_complete(skb);
1533 }
1534 return 0;
1535 }
1536
1537 /* The socket must have it's spinlock held when we get
1538 * here.
1539 *
1540 * We have a potential double-lock case here, so even when
1541 * doing backlog processing we use the BH locking scheme.
1542 * This is because we cannot sleep with the original spinlock
1543 * held.
1544 */
1545 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1546 {
1547 struct ipv6_pinfo *np = inet6_sk(sk);
1548 struct tcp_sock *tp;
1549 struct sk_buff *opt_skb = NULL;
1550
1551 /* Imagine: socket is IPv6. IPv4 packet arrives,
1552 goes to IPv4 receive handler and backlogged.
1553 From backlog it always goes here. Kerboom...
1554 Fortunately, tcp_rcv_established and rcv_established
1555 handle them correctly, but it is not case with
1556 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1557 */
1558
1559 if (skb->protocol == htons(ETH_P_IP))
1560 return tcp_v4_do_rcv(sk, skb);
1561
1562 #ifdef CONFIG_TCP_MD5SIG
1563 if (tcp_v6_inbound_md5_hash (sk, skb))
1564 goto discard;
1565 #endif
1566
1567 if (sk_filter(sk, skb))
1568 goto discard;
1569
1570 /*
1571 * socket locking is here for SMP purposes as backlog rcv
1572 * is currently called with bh processing disabled.
1573 */
1574
1575 /* Do Stevens' IPV6_PKTOPTIONS.
1576
1577 Yes, guys, it is the only place in our code, where we
1578 may make it not affecting IPv4.
1579 The rest of code is protocol independent,
1580 and I do not like idea to uglify IPv4.
1581
1582 Actually, all the idea behind IPV6_PKTOPTIONS
1583 looks not very well thought. For now we latch
1584 options, received in the last packet, enqueued
1585 by tcp. Feel free to propose better solution.
1586 --ANK (980728)
1587 */
1588 if (np->rxopt.all)
1589 opt_skb = skb_clone(skb, GFP_ATOMIC);
1590
1591 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1592 TCP_CHECK_TIMER(sk);
1593 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1594 goto reset;
1595 TCP_CHECK_TIMER(sk);
1596 if (opt_skb)
1597 goto ipv6_pktoptions;
1598 return 0;
1599 }
1600
1601 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1602 goto csum_err;
1603
1604 if (sk->sk_state == TCP_LISTEN) {
1605 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1606 if (!nsk)
1607 goto discard;
1608
1609 /*
1610 * Queue it on the new socket if the new socket is active,
1611 * otherwise we just shortcircuit this and continue with
1612 * the new socket..
1613 */
1614 if(nsk != sk) {
1615 if (tcp_child_process(sk, nsk, skb))
1616 goto reset;
1617 if (opt_skb)
1618 __kfree_skb(opt_skb);
1619 return 0;
1620 }
1621 }
1622
1623 TCP_CHECK_TIMER(sk);
1624 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1625 goto reset;
1626 TCP_CHECK_TIMER(sk);
1627 if (opt_skb)
1628 goto ipv6_pktoptions;
1629 return 0;
1630
1631 reset:
1632 tcp_v6_send_reset(sk, skb);
1633 discard:
1634 if (opt_skb)
1635 __kfree_skb(opt_skb);
1636 kfree_skb(skb);
1637 return 0;
1638 csum_err:
1639 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1640 goto discard;
1641
1642
1643 ipv6_pktoptions:
1644 /* Do you ask, what is it?
1645
1646 1. skb was enqueued by tcp.
1647 2. skb is added to tail of read queue, rather than out of order.
1648 3. socket is not in passive state.
1649 4. Finally, it really contains options, which user wants to receive.
1650 */
1651 tp = tcp_sk(sk);
1652 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1653 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1654 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1655 np->mcast_oif = inet6_iif(opt_skb);
1656 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1657 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1658 if (ipv6_opt_accepted(sk, opt_skb)) {
1659 skb_set_owner_r(opt_skb, sk);
1660 opt_skb = xchg(&np->pktoptions, opt_skb);
1661 } else {
1662 __kfree_skb(opt_skb);
1663 opt_skb = xchg(&np->pktoptions, NULL);
1664 }
1665 }
1666
1667 if (opt_skb)
1668 kfree_skb(opt_skb);
1669 return 0;
1670 }
1671
1672 static int tcp_v6_rcv(struct sk_buff *skb)
1673 {
1674 struct tcphdr *th;
1675 struct sock *sk;
1676 int ret;
1677
1678 if (skb->pkt_type != PACKET_HOST)
1679 goto discard_it;
1680
1681 /*
1682 * Count it even if it's bad.
1683 */
1684 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1685
1686 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1687 goto discard_it;
1688
1689 th = tcp_hdr(skb);
1690
1691 if (th->doff < sizeof(struct tcphdr)/4)
1692 goto bad_packet;
1693 if (!pskb_may_pull(skb, th->doff*4))
1694 goto discard_it;
1695
1696 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1697 goto bad_packet;
1698
1699 th = tcp_hdr(skb);
1700 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1701 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1702 skb->len - th->doff*4);
1703 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1704 TCP_SKB_CB(skb)->when = 0;
1705 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1706 TCP_SKB_CB(skb)->sacked = 0;
1707
1708 sk = __inet6_lookup(dev_net(skb->dev), &tcp_hashinfo,
1709 &ipv6_hdr(skb)->saddr, th->source,
1710 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1711 inet6_iif(skb));
1712
1713 if (!sk)
1714 goto no_tcp_socket;
1715
1716 process:
1717 if (sk->sk_state == TCP_TIME_WAIT)
1718 goto do_time_wait;
1719
1720 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1721 goto discard_and_relse;
1722
1723 if (sk_filter(sk, skb))
1724 goto discard_and_relse;
1725
1726 skb->dev = NULL;
1727
1728 bh_lock_sock_nested(sk);
1729 ret = 0;
1730 if (!sock_owned_by_user(sk)) {
1731 #ifdef CONFIG_NET_DMA
1732 struct tcp_sock *tp = tcp_sk(sk);
1733 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1734 tp->ucopy.dma_chan = get_softnet_dma();
1735 if (tp->ucopy.dma_chan)
1736 ret = tcp_v6_do_rcv(sk, skb);
1737 else
1738 #endif
1739 {
1740 if (!tcp_prequeue(sk, skb))
1741 ret = tcp_v6_do_rcv(sk, skb);
1742 }
1743 } else
1744 sk_add_backlog(sk, skb);
1745 bh_unlock_sock(sk);
1746
1747 sock_put(sk);
1748 return ret ? -1 : 0;
1749
1750 no_tcp_socket:
1751 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1752 goto discard_it;
1753
1754 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1755 bad_packet:
1756 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1757 } else {
1758 tcp_v6_send_reset(NULL, skb);
1759 }
1760
1761 discard_it:
1762
1763 /*
1764 * Discard frame
1765 */
1766
1767 kfree_skb(skb);
1768 return 0;
1769
1770 discard_and_relse:
1771 sock_put(sk);
1772 goto discard_it;
1773
1774 do_time_wait:
1775 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1776 inet_twsk_put(inet_twsk(sk));
1777 goto discard_it;
1778 }
1779
1780 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1781 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1782 inet_twsk_put(inet_twsk(sk));
1783 goto discard_it;
1784 }
1785
1786 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1787 case TCP_TW_SYN:
1788 {
1789 struct sock *sk2;
1790
1791 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1792 &ipv6_hdr(skb)->daddr,
1793 ntohs(th->dest), inet6_iif(skb));
1794 if (sk2 != NULL) {
1795 struct inet_timewait_sock *tw = inet_twsk(sk);
1796 inet_twsk_deschedule(tw, &tcp_death_row);
1797 inet_twsk_put(tw);
1798 sk = sk2;
1799 goto process;
1800 }
1801 /* Fall through to ACK */
1802 }
1803 case TCP_TW_ACK:
1804 tcp_v6_timewait_ack(sk, skb);
1805 break;
1806 case TCP_TW_RST:
1807 goto no_tcp_socket;
1808 case TCP_TW_SUCCESS:;
1809 }
1810 goto discard_it;
1811 }
1812
1813 static int tcp_v6_remember_stamp(struct sock *sk)
1814 {
1815 /* Alas, not yet... */
1816 return 0;
1817 }
1818
1819 static struct inet_connection_sock_af_ops ipv6_specific = {
1820 .queue_xmit = inet6_csk_xmit,
1821 .send_check = tcp_v6_send_check,
1822 .rebuild_header = inet6_sk_rebuild_header,
1823 .conn_request = tcp_v6_conn_request,
1824 .syn_recv_sock = tcp_v6_syn_recv_sock,
1825 .remember_stamp = tcp_v6_remember_stamp,
1826 .net_header_len = sizeof(struct ipv6hdr),
1827 .setsockopt = ipv6_setsockopt,
1828 .getsockopt = ipv6_getsockopt,
1829 .addr2sockaddr = inet6_csk_addr2sockaddr,
1830 .sockaddr_len = sizeof(struct sockaddr_in6),
1831 .bind_conflict = inet6_csk_bind_conflict,
1832 #ifdef CONFIG_COMPAT
1833 .compat_setsockopt = compat_ipv6_setsockopt,
1834 .compat_getsockopt = compat_ipv6_getsockopt,
1835 #endif
1836 };
1837
1838 #ifdef CONFIG_TCP_MD5SIG
1839 static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1840 .md5_lookup = tcp_v6_md5_lookup,
1841 .calc_md5_hash = tcp_v6_calc_md5_hash,
1842 .md5_add = tcp_v6_md5_add_func,
1843 .md5_parse = tcp_v6_parse_md5_keys,
1844 };
1845 #endif
1846
1847 /*
1848 * TCP over IPv4 via INET6 API
1849 */
1850
1851 static struct inet_connection_sock_af_ops ipv6_mapped = {
1852 .queue_xmit = ip_queue_xmit,
1853 .send_check = tcp_v4_send_check,
1854 .rebuild_header = inet_sk_rebuild_header,
1855 .conn_request = tcp_v6_conn_request,
1856 .syn_recv_sock = tcp_v6_syn_recv_sock,
1857 .remember_stamp = tcp_v4_remember_stamp,
1858 .net_header_len = sizeof(struct iphdr),
1859 .setsockopt = ipv6_setsockopt,
1860 .getsockopt = ipv6_getsockopt,
1861 .addr2sockaddr = inet6_csk_addr2sockaddr,
1862 .sockaddr_len = sizeof(struct sockaddr_in6),
1863 .bind_conflict = inet6_csk_bind_conflict,
1864 #ifdef CONFIG_COMPAT
1865 .compat_setsockopt = compat_ipv6_setsockopt,
1866 .compat_getsockopt = compat_ipv6_getsockopt,
1867 #endif
1868 };
1869
1870 #ifdef CONFIG_TCP_MD5SIG
1871 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1872 .md5_lookup = tcp_v4_md5_lookup,
1873 .calc_md5_hash = tcp_v4_calc_md5_hash,
1874 .md5_add = tcp_v6_md5_add_func,
1875 .md5_parse = tcp_v6_parse_md5_keys,
1876 };
1877 #endif
1878
1879 /* NOTE: A lot of things set to zero explicitly by call to
1880 * sk_alloc() so need not be done here.
1881 */
1882 static int tcp_v6_init_sock(struct sock *sk)
1883 {
1884 struct inet_connection_sock *icsk = inet_csk(sk);
1885 struct tcp_sock *tp = tcp_sk(sk);
1886
1887 skb_queue_head_init(&tp->out_of_order_queue);
1888 tcp_init_xmit_timers(sk);
1889 tcp_prequeue_init(tp);
1890
1891 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1892 tp->mdev = TCP_TIMEOUT_INIT;
1893
1894 /* So many TCP implementations out there (incorrectly) count the
1895 * initial SYN frame in their delayed-ACK and congestion control
1896 * algorithms that we must have the following bandaid to talk
1897 * efficiently to them. -DaveM
1898 */
1899 tp->snd_cwnd = 2;
1900
1901 /* See draft-stevens-tcpca-spec-01 for discussion of the
1902 * initialization of these values.
1903 */
1904 tp->snd_ssthresh = 0x7fffffff;
1905 tp->snd_cwnd_clamp = ~0;
1906 tp->mss_cache = 536;
1907
1908 tp->reordering = sysctl_tcp_reordering;
1909
1910 sk->sk_state = TCP_CLOSE;
1911
1912 icsk->icsk_af_ops = &ipv6_specific;
1913 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1914 icsk->icsk_sync_mss = tcp_sync_mss;
1915 sk->sk_write_space = sk_stream_write_space;
1916 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1917
1918 #ifdef CONFIG_TCP_MD5SIG
1919 tp->af_specific = &tcp_sock_ipv6_specific;
1920 #endif
1921
1922 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1923 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1924
1925 atomic_inc(&tcp_sockets_allocated);
1926
1927 return 0;
1928 }
1929
1930 static int tcp_v6_destroy_sock(struct sock *sk)
1931 {
1932 #ifdef CONFIG_TCP_MD5SIG
1933 /* Clean up the MD5 key list */
1934 if (tcp_sk(sk)->md5sig_info)
1935 tcp_v6_clear_md5_list(sk);
1936 #endif
1937 tcp_v4_destroy_sock(sk);
1938 return inet6_destroy_sock(sk);
1939 }
1940
1941 #ifdef CONFIG_PROC_FS
1942 /* Proc filesystem TCPv6 sock list dumping. */
1943 static void get_openreq6(struct seq_file *seq,
1944 struct sock *sk, struct request_sock *req, int i, int uid)
1945 {
1946 int ttd = req->expires - jiffies;
1947 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1948 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1949
1950 if (ttd < 0)
1951 ttd = 0;
1952
1953 seq_printf(seq,
1954 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1955 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1956 i,
1957 src->s6_addr32[0], src->s6_addr32[1],
1958 src->s6_addr32[2], src->s6_addr32[3],
1959 ntohs(inet_sk(sk)->sport),
1960 dest->s6_addr32[0], dest->s6_addr32[1],
1961 dest->s6_addr32[2], dest->s6_addr32[3],
1962 ntohs(inet_rsk(req)->rmt_port),
1963 TCP_SYN_RECV,
1964 0,0, /* could print option size, but that is af dependent. */
1965 1, /* timers active (only the expire timer) */
1966 jiffies_to_clock_t(ttd),
1967 req->retrans,
1968 uid,
1969 0, /* non standard timer */
1970 0, /* open_requests have no inode */
1971 0, req);
1972 }
1973
1974 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1975 {
1976 struct in6_addr *dest, *src;
1977 __u16 destp, srcp;
1978 int timer_active;
1979 unsigned long timer_expires;
1980 struct inet_sock *inet = inet_sk(sp);
1981 struct tcp_sock *tp = tcp_sk(sp);
1982 const struct inet_connection_sock *icsk = inet_csk(sp);
1983 struct ipv6_pinfo *np = inet6_sk(sp);
1984
1985 dest = &np->daddr;
1986 src = &np->rcv_saddr;
1987 destp = ntohs(inet->dport);
1988 srcp = ntohs(inet->sport);
1989
1990 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1991 timer_active = 1;
1992 timer_expires = icsk->icsk_timeout;
1993 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1994 timer_active = 4;
1995 timer_expires = icsk->icsk_timeout;
1996 } else if (timer_pending(&sp->sk_timer)) {
1997 timer_active = 2;
1998 timer_expires = sp->sk_timer.expires;
1999 } else {
2000 timer_active = 0;
2001 timer_expires = jiffies;
2002 }
2003
2004 seq_printf(seq,
2005 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2006 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
2007 i,
2008 src->s6_addr32[0], src->s6_addr32[1],
2009 src->s6_addr32[2], src->s6_addr32[3], srcp,
2010 dest->s6_addr32[0], dest->s6_addr32[1],
2011 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2012 sp->sk_state,
2013 tp->write_seq-tp->snd_una,
2014 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2015 timer_active,
2016 jiffies_to_clock_t(timer_expires - jiffies),
2017 icsk->icsk_retransmits,
2018 sock_i_uid(sp),
2019 icsk->icsk_probes_out,
2020 sock_i_ino(sp),
2021 atomic_read(&sp->sk_refcnt), sp,
2022 icsk->icsk_rto,
2023 icsk->icsk_ack.ato,
2024 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2025 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
2026 );
2027 }
2028
2029 static void get_timewait6_sock(struct seq_file *seq,
2030 struct inet_timewait_sock *tw, int i)
2031 {
2032 struct in6_addr *dest, *src;
2033 __u16 destp, srcp;
2034 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2035 int ttd = tw->tw_ttd - jiffies;
2036
2037 if (ttd < 0)
2038 ttd = 0;
2039
2040 dest = &tw6->tw_v6_daddr;
2041 src = &tw6->tw_v6_rcv_saddr;
2042 destp = ntohs(tw->tw_dport);
2043 srcp = ntohs(tw->tw_sport);
2044
2045 seq_printf(seq,
2046 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2047 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2048 i,
2049 src->s6_addr32[0], src->s6_addr32[1],
2050 src->s6_addr32[2], src->s6_addr32[3], srcp,
2051 dest->s6_addr32[0], dest->s6_addr32[1],
2052 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2053 tw->tw_substate, 0, 0,
2054 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2055 atomic_read(&tw->tw_refcnt), tw);
2056 }
2057
2058 static int tcp6_seq_show(struct seq_file *seq, void *v)
2059 {
2060 struct tcp_iter_state *st;
2061
2062 if (v == SEQ_START_TOKEN) {
2063 seq_puts(seq,
2064 " sl "
2065 "local_address "
2066 "remote_address "
2067 "st tx_queue rx_queue tr tm->when retrnsmt"
2068 " uid timeout inode\n");
2069 goto out;
2070 }
2071 st = seq->private;
2072
2073 switch (st->state) {
2074 case TCP_SEQ_STATE_LISTENING:
2075 case TCP_SEQ_STATE_ESTABLISHED:
2076 get_tcp6_sock(seq, v, st->num);
2077 break;
2078 case TCP_SEQ_STATE_OPENREQ:
2079 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2080 break;
2081 case TCP_SEQ_STATE_TIME_WAIT:
2082 get_timewait6_sock(seq, v, st->num);
2083 break;
2084 }
2085 out:
2086 return 0;
2087 }
2088
2089 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2090 .name = "tcp6",
2091 .family = AF_INET6,
2092 .seq_fops = {
2093 .owner = THIS_MODULE,
2094 },
2095 .seq_ops = {
2096 .show = tcp6_seq_show,
2097 },
2098 };
2099
2100 int tcp6_proc_init(struct net *net)
2101 {
2102 return tcp_proc_register(net, &tcp6_seq_afinfo);
2103 }
2104
2105 void tcp6_proc_exit(struct net *net)
2106 {
2107 tcp_proc_unregister(net, &tcp6_seq_afinfo);
2108 }
2109 #endif
2110
2111 struct proto tcpv6_prot = {
2112 .name = "TCPv6",
2113 .owner = THIS_MODULE,
2114 .close = tcp_close,
2115 .connect = tcp_v6_connect,
2116 .disconnect = tcp_disconnect,
2117 .accept = inet_csk_accept,
2118 .ioctl = tcp_ioctl,
2119 .init = tcp_v6_init_sock,
2120 .destroy = tcp_v6_destroy_sock,
2121 .shutdown = tcp_shutdown,
2122 .setsockopt = tcp_setsockopt,
2123 .getsockopt = tcp_getsockopt,
2124 .recvmsg = tcp_recvmsg,
2125 .backlog_rcv = tcp_v6_do_rcv,
2126 .hash = tcp_v6_hash,
2127 .unhash = inet_unhash,
2128 .get_port = inet_csk_get_port,
2129 .enter_memory_pressure = tcp_enter_memory_pressure,
2130 .sockets_allocated = &tcp_sockets_allocated,
2131 .memory_allocated = &tcp_memory_allocated,
2132 .memory_pressure = &tcp_memory_pressure,
2133 .orphan_count = &tcp_orphan_count,
2134 .sysctl_mem = sysctl_tcp_mem,
2135 .sysctl_wmem = sysctl_tcp_wmem,
2136 .sysctl_rmem = sysctl_tcp_rmem,
2137 .max_header = MAX_TCP_HEADER,
2138 .obj_size = sizeof(struct tcp6_sock),
2139 .twsk_prot = &tcp6_timewait_sock_ops,
2140 .rsk_prot = &tcp6_request_sock_ops,
2141 .h.hashinfo = &tcp_hashinfo,
2142 #ifdef CONFIG_COMPAT
2143 .compat_setsockopt = compat_tcp_setsockopt,
2144 .compat_getsockopt = compat_tcp_getsockopt,
2145 #endif
2146 };
2147
2148 static struct inet6_protocol tcpv6_protocol = {
2149 .handler = tcp_v6_rcv,
2150 .err_handler = tcp_v6_err,
2151 .gso_send_check = tcp_v6_gso_send_check,
2152 .gso_segment = tcp_tso_segment,
2153 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2154 };
2155
2156 static struct inet_protosw tcpv6_protosw = {
2157 .type = SOCK_STREAM,
2158 .protocol = IPPROTO_TCP,
2159 .prot = &tcpv6_prot,
2160 .ops = &inet6_stream_ops,
2161 .capability = -1,
2162 .no_check = 0,
2163 .flags = INET_PROTOSW_PERMANENT |
2164 INET_PROTOSW_ICSK,
2165 };
2166
2167 static int tcpv6_net_init(struct net *net)
2168 {
2169 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2170 SOCK_RAW, IPPROTO_TCP, net);
2171 }
2172
2173 static void tcpv6_net_exit(struct net *net)
2174 {
2175 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2176 }
2177
2178 static struct pernet_operations tcpv6_net_ops = {
2179 .init = tcpv6_net_init,
2180 .exit = tcpv6_net_exit,
2181 };
2182
2183 int __init tcpv6_init(void)
2184 {
2185 int ret;
2186
2187 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2188 if (ret)
2189 goto out;
2190
2191 /* register inet6 protocol */
2192 ret = inet6_register_protosw(&tcpv6_protosw);
2193 if (ret)
2194 goto out_tcpv6_protocol;
2195
2196 ret = register_pernet_subsys(&tcpv6_net_ops);
2197 if (ret)
2198 goto out_tcpv6_protosw;
2199 out:
2200 return ret;
2201
2202 out_tcpv6_protocol:
2203 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2204 out_tcpv6_protosw:
2205 inet6_unregister_protosw(&tcpv6_protosw);
2206 goto out;
2207 }
2208
2209 void tcpv6_exit(void)
2210 {
2211 unregister_pernet_subsys(&tcpv6_net_ops);
2212 inet6_unregister_protosw(&tcpv6_protosw);
2213 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2214 }
This page took 0.294543 seconds and 6 git commands to generate.