Merge branch 'pm-qos'
[deliverable/linux.git] / net / dccp / ipv6.c
1 /*
2 * DCCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Based on net/dccp6/ipv6.c
6 *
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/xfrm.h>
19
20 #include <net/addrconf.h>
21 #include <net/inet_common.h>
22 #include <net/inet_hashtables.h>
23 #include <net/inet_sock.h>
24 #include <net/inet6_connection_sock.h>
25 #include <net/inet6_hashtables.h>
26 #include <net/ip6_route.h>
27 #include <net/ipv6.h>
28 #include <net/protocol.h>
29 #include <net/transp_v6.h>
30 #include <net/ip6_checksum.h>
31 #include <net/xfrm.h>
32 #include <net/secure_seq.h>
33
34 #include "dccp.h"
35 #include "ipv6.h"
36 #include "feat.h"
37
38 /* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */
39
40 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped;
41 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
42
43 static void dccp_v6_hash(struct sock *sk)
44 {
45 if (sk->sk_state != DCCP_CLOSED) {
46 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
47 inet_hash(sk);
48 return;
49 }
50 local_bh_disable();
51 __inet6_hash(sk, NULL);
52 local_bh_enable();
53 }
54 }
55
56 /* add pseudo-header to DCCP checksum stored in skb->csum */
57 static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
58 const struct in6_addr *saddr,
59 const struct in6_addr *daddr)
60 {
61 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
62 }
63
64 static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
65 {
66 struct ipv6_pinfo *np = inet6_sk(sk);
67 struct dccp_hdr *dh = dccp_hdr(skb);
68
69 dccp_csum_outgoing(skb);
70 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &sk->sk_v6_daddr);
71 }
72
73 static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
74 {
75 return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
76 ipv6_hdr(skb)->saddr.s6_addr32,
77 dccp_hdr(skb)->dccph_dport,
78 dccp_hdr(skb)->dccph_sport );
79
80 }
81
82 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
83 u8 type, u8 code, int offset, __be32 info)
84 {
85 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
86 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
87 struct dccp_sock *dp;
88 struct ipv6_pinfo *np;
89 struct sock *sk;
90 int err;
91 __u64 seq;
92 struct net *net = dev_net(skb->dev);
93
94 if (skb->len < offset + sizeof(*dh) ||
95 skb->len < offset + __dccp_basic_hdr_len(dh)) {
96 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
97 ICMP6_MIB_INERRORS);
98 return;
99 }
100
101 sk = inet6_lookup(net, &dccp_hashinfo,
102 &hdr->daddr, dh->dccph_dport,
103 &hdr->saddr, dh->dccph_sport, inet6_iif(skb));
104
105 if (sk == NULL) {
106 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
107 ICMP6_MIB_INERRORS);
108 return;
109 }
110
111 if (sk->sk_state == DCCP_TIME_WAIT) {
112 inet_twsk_put(inet_twsk(sk));
113 return;
114 }
115
116 bh_lock_sock(sk);
117 if (sock_owned_by_user(sk))
118 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
119
120 if (sk->sk_state == DCCP_CLOSED)
121 goto out;
122
123 dp = dccp_sk(sk);
124 seq = dccp_hdr_seq(dh);
125 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
126 !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
127 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
128 goto out;
129 }
130
131 np = inet6_sk(sk);
132
133 if (type == NDISC_REDIRECT) {
134 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
135
136 if (dst)
137 dst->ops->redirect(dst, sk, skb);
138 goto out;
139 }
140
141 if (type == ICMPV6_PKT_TOOBIG) {
142 struct dst_entry *dst = NULL;
143
144 if (!ip6_sk_accept_pmtu(sk))
145 goto out;
146
147 if (sock_owned_by_user(sk))
148 goto out;
149 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
150 goto out;
151
152 dst = inet6_csk_update_pmtu(sk, ntohl(info));
153 if (!dst)
154 goto out;
155
156 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst))
157 dccp_sync_mss(sk, dst_mtu(dst));
158 goto out;
159 }
160
161 icmpv6_err_convert(type, code, &err);
162
163 /* Might be for an request_sock */
164 switch (sk->sk_state) {
165 struct request_sock *req, **prev;
166 case DCCP_LISTEN:
167 if (sock_owned_by_user(sk))
168 goto out;
169
170 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
171 &hdr->daddr, &hdr->saddr,
172 inet6_iif(skb));
173 if (req == NULL)
174 goto out;
175
176 /*
177 * ICMPs are not backlogged, hence we cannot get an established
178 * socket here.
179 */
180 WARN_ON(req->sk != NULL);
181
182 if (!between48(seq, dccp_rsk(req)->dreq_iss,
183 dccp_rsk(req)->dreq_gss)) {
184 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
185 goto out;
186 }
187
188 inet_csk_reqsk_queue_drop(sk, req, prev);
189 goto out;
190
191 case DCCP_REQUESTING:
192 case DCCP_RESPOND: /* Cannot happen.
193 It can, it SYNs are crossed. --ANK */
194 if (!sock_owned_by_user(sk)) {
195 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
196 sk->sk_err = err;
197 /*
198 * Wake people up to see the error
199 * (see connect in sock.c)
200 */
201 sk->sk_error_report(sk);
202 dccp_done(sk);
203 } else
204 sk->sk_err_soft = err;
205 goto out;
206 }
207
208 if (!sock_owned_by_user(sk) && np->recverr) {
209 sk->sk_err = err;
210 sk->sk_error_report(sk);
211 } else
212 sk->sk_err_soft = err;
213
214 out:
215 bh_unlock_sock(sk);
216 sock_put(sk);
217 }
218
219
220 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
221 {
222 struct inet_request_sock *ireq = inet_rsk(req);
223 struct ipv6_pinfo *np = inet6_sk(sk);
224 struct sk_buff *skb;
225 struct in6_addr *final_p, final;
226 struct flowi6 fl6;
227 int err = -1;
228 struct dst_entry *dst;
229
230 memset(&fl6, 0, sizeof(fl6));
231 fl6.flowi6_proto = IPPROTO_DCCP;
232 fl6.daddr = ireq->ir_v6_rmt_addr;
233 fl6.saddr = ireq->ir_v6_loc_addr;
234 fl6.flowlabel = 0;
235 fl6.flowi6_oif = ireq->ir_iif;
236 fl6.fl6_dport = ireq->ir_rmt_port;
237 fl6.fl6_sport = htons(ireq->ir_num);
238 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
239
240
241 final_p = fl6_update_dst(&fl6, np->opt, &final);
242
243 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
244 if (IS_ERR(dst)) {
245 err = PTR_ERR(dst);
246 dst = NULL;
247 goto done;
248 }
249
250 skb = dccp_make_response(sk, dst, req);
251 if (skb != NULL) {
252 struct dccp_hdr *dh = dccp_hdr(skb);
253
254 dh->dccph_checksum = dccp_v6_csum_finish(skb,
255 &ireq->ir_v6_loc_addr,
256 &ireq->ir_v6_rmt_addr);
257 fl6.daddr = ireq->ir_v6_rmt_addr;
258 err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
259 err = net_xmit_eval(err);
260 }
261
262 done:
263 dst_release(dst);
264 return err;
265 }
266
267 static void dccp_v6_reqsk_destructor(struct request_sock *req)
268 {
269 dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
270 kfree_skb(inet_rsk(req)->pktopts);
271 }
272
273 static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
274 {
275 const struct ipv6hdr *rxip6h;
276 struct sk_buff *skb;
277 struct flowi6 fl6;
278 struct net *net = dev_net(skb_dst(rxskb)->dev);
279 struct sock *ctl_sk = net->dccp.v6_ctl_sk;
280 struct dst_entry *dst;
281
282 if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
283 return;
284
285 if (!ipv6_unicast_destination(rxskb))
286 return;
287
288 skb = dccp_ctl_make_reset(ctl_sk, rxskb);
289 if (skb == NULL)
290 return;
291
292 rxip6h = ipv6_hdr(rxskb);
293 dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
294 &rxip6h->daddr);
295
296 memset(&fl6, 0, sizeof(fl6));
297 fl6.daddr = rxip6h->saddr;
298 fl6.saddr = rxip6h->daddr;
299
300 fl6.flowi6_proto = IPPROTO_DCCP;
301 fl6.flowi6_oif = inet6_iif(rxskb);
302 fl6.fl6_dport = dccp_hdr(skb)->dccph_dport;
303 fl6.fl6_sport = dccp_hdr(skb)->dccph_sport;
304 security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
305
306 /* sk = NULL, but it is safe for now. RST socket required. */
307 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
308 if (!IS_ERR(dst)) {
309 skb_dst_set(skb, dst);
310 ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
311 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
312 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
313 return;
314 }
315
316 kfree_skb(skb);
317 }
318
319 static struct request_sock_ops dccp6_request_sock_ops = {
320 .family = AF_INET6,
321 .obj_size = sizeof(struct dccp6_request_sock),
322 .rtx_syn_ack = dccp_v6_send_response,
323 .send_ack = dccp_reqsk_send_ack,
324 .destructor = dccp_v6_reqsk_destructor,
325 .send_reset = dccp_v6_ctl_send_reset,
326 .syn_ack_timeout = dccp_syn_ack_timeout,
327 };
328
329 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
330 {
331 const struct dccp_hdr *dh = dccp_hdr(skb);
332 const struct ipv6hdr *iph = ipv6_hdr(skb);
333 struct sock *nsk;
334 struct request_sock **prev;
335 /* Find possible connection requests. */
336 struct request_sock *req = inet6_csk_search_req(sk, &prev,
337 dh->dccph_sport,
338 &iph->saddr,
339 &iph->daddr,
340 inet6_iif(skb));
341 if (req != NULL)
342 return dccp_check_req(sk, skb, req, prev);
343
344 nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
345 &iph->saddr, dh->dccph_sport,
346 &iph->daddr, ntohs(dh->dccph_dport),
347 inet6_iif(skb));
348 if (nsk != NULL) {
349 if (nsk->sk_state != DCCP_TIME_WAIT) {
350 bh_lock_sock(nsk);
351 return nsk;
352 }
353 inet_twsk_put(inet_twsk(nsk));
354 return NULL;
355 }
356
357 return sk;
358 }
359
360 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
361 {
362 struct request_sock *req;
363 struct dccp_request_sock *dreq;
364 struct inet_request_sock *ireq;
365 struct ipv6_pinfo *np = inet6_sk(sk);
366 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
367 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
368
369 if (skb->protocol == htons(ETH_P_IP))
370 return dccp_v4_conn_request(sk, skb);
371
372 if (!ipv6_unicast_destination(skb))
373 return 0; /* discard, don't send a reset here */
374
375 if (dccp_bad_service_code(sk, service)) {
376 dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
377 goto drop;
378 }
379 /*
380 * There are no SYN attacks on IPv6, yet...
381 */
382 dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
383 if (inet_csk_reqsk_queue_is_full(sk))
384 goto drop;
385
386 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
387 goto drop;
388
389 req = inet_reqsk_alloc(&dccp6_request_sock_ops);
390 if (req == NULL)
391 goto drop;
392
393 if (dccp_reqsk_init(req, dccp_sk(sk), skb))
394 goto drop_and_free;
395
396 dreq = dccp_rsk(req);
397 if (dccp_parse_options(sk, dreq, skb))
398 goto drop_and_free;
399
400 if (security_inet_conn_request(sk, skb, req))
401 goto drop_and_free;
402
403 ireq = inet_rsk(req);
404 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
405 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
406
407 if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
408 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
409 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
410 atomic_inc(&skb->users);
411 ireq->pktopts = skb;
412 }
413 ireq->ir_iif = sk->sk_bound_dev_if;
414
415 /* So that link locals have meaning */
416 if (!sk->sk_bound_dev_if &&
417 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
418 ireq->ir_iif = inet6_iif(skb);
419
420 /*
421 * Step 3: Process LISTEN state
422 *
423 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
424 *
425 * Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child().
426 */
427 dreq->dreq_isr = dcb->dccpd_seq;
428 dreq->dreq_gsr = dreq->dreq_isr;
429 dreq->dreq_iss = dccp_v6_init_sequence(skb);
430 dreq->dreq_gss = dreq->dreq_iss;
431 dreq->dreq_service = service;
432
433 if (dccp_v6_send_response(sk, req))
434 goto drop_and_free;
435
436 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
437 return 0;
438
439 drop_and_free:
440 reqsk_free(req);
441 drop:
442 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
443 return -1;
444 }
445
446 static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
447 struct sk_buff *skb,
448 struct request_sock *req,
449 struct dst_entry *dst)
450 {
451 struct inet_request_sock *ireq = inet_rsk(req);
452 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
453 struct inet_sock *newinet;
454 struct dccp6_sock *newdp6;
455 struct sock *newsk;
456
457 if (skb->protocol == htons(ETH_P_IP)) {
458 /*
459 * v6 mapped
460 */
461 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
462 if (newsk == NULL)
463 return NULL;
464
465 newdp6 = (struct dccp6_sock *)newsk;
466 newinet = inet_sk(newsk);
467 newinet->pinet6 = &newdp6->inet6;
468 newnp = inet6_sk(newsk);
469
470 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
471
472 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
473
474 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
475
476 newsk->sk_v6_rcv_saddr = newnp->saddr;
477
478 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
479 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
480 newnp->pktoptions = NULL;
481 newnp->opt = NULL;
482 newnp->mcast_oif = inet6_iif(skb);
483 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
484
485 /*
486 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
487 * here, dccp_create_openreq_child now does this for us, see the comment in
488 * that function for the gory details. -acme
489 */
490
491 /* It is tricky place. Until this moment IPv4 tcp
492 worked with IPv6 icsk.icsk_af_ops.
493 Sync it now.
494 */
495 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
496
497 return newsk;
498 }
499
500
501 if (sk_acceptq_is_full(sk))
502 goto out_overflow;
503
504 if (dst == NULL) {
505 struct in6_addr *final_p, final;
506 struct flowi6 fl6;
507
508 memset(&fl6, 0, sizeof(fl6));
509 fl6.flowi6_proto = IPPROTO_DCCP;
510 fl6.daddr = ireq->ir_v6_rmt_addr;
511 final_p = fl6_update_dst(&fl6, np->opt, &final);
512 fl6.saddr = ireq->ir_v6_loc_addr;
513 fl6.flowi6_oif = sk->sk_bound_dev_if;
514 fl6.fl6_dport = ireq->ir_rmt_port;
515 fl6.fl6_sport = htons(ireq->ir_num);
516 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
517
518 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
519 if (IS_ERR(dst))
520 goto out;
521 }
522
523 newsk = dccp_create_openreq_child(sk, req, skb);
524 if (newsk == NULL)
525 goto out_nonewsk;
526
527 /*
528 * No need to charge this sock to the relevant IPv6 refcnt debug socks
529 * count here, dccp_create_openreq_child now does this for us, see the
530 * comment in that function for the gory details. -acme
531 */
532
533 __ip6_dst_store(newsk, dst, NULL, NULL);
534 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
535 NETIF_F_TSO);
536 newdp6 = (struct dccp6_sock *)newsk;
537 newinet = inet_sk(newsk);
538 newinet->pinet6 = &newdp6->inet6;
539 newnp = inet6_sk(newsk);
540
541 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
542
543 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
544 newnp->saddr = ireq->ir_v6_loc_addr;
545 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
546 newsk->sk_bound_dev_if = ireq->ir_iif;
547
548 /* Now IPv6 options...
549
550 First: no IPv4 options.
551 */
552 newinet->inet_opt = NULL;
553
554 /* Clone RX bits */
555 newnp->rxopt.all = np->rxopt.all;
556
557 /* Clone pktoptions received with SYN */
558 newnp->pktoptions = NULL;
559 if (ireq->pktopts != NULL) {
560 newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
561 consume_skb(ireq->pktopts);
562 ireq->pktopts = NULL;
563 if (newnp->pktoptions)
564 skb_set_owner_r(newnp->pktoptions, newsk);
565 }
566 newnp->opt = NULL;
567 newnp->mcast_oif = inet6_iif(skb);
568 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
569
570 /*
571 * Clone native IPv6 options from listening socket (if any)
572 *
573 * Yes, keeping reference count would be much more clever, but we make
574 * one more one thing there: reattach optmem to newsk.
575 */
576 if (np->opt != NULL)
577 newnp->opt = ipv6_dup_options(newsk, np->opt);
578
579 inet_csk(newsk)->icsk_ext_hdr_len = 0;
580 if (newnp->opt != NULL)
581 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
582 newnp->opt->opt_flen);
583
584 dccp_sync_mss(newsk, dst_mtu(dst));
585
586 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
587 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
588
589 if (__inet_inherit_port(sk, newsk) < 0) {
590 inet_csk_prepare_forced_close(newsk);
591 dccp_done(newsk);
592 goto out;
593 }
594 __inet6_hash(newsk, NULL);
595
596 return newsk;
597
598 out_overflow:
599 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
600 out_nonewsk:
601 dst_release(dst);
602 out:
603 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
604 return NULL;
605 }
606
607 /* The socket must have it's spinlock held when we get
608 * here.
609 *
610 * We have a potential double-lock case here, so even when
611 * doing backlog processing we use the BH locking scheme.
612 * This is because we cannot sleep with the original spinlock
613 * held.
614 */
615 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
616 {
617 struct ipv6_pinfo *np = inet6_sk(sk);
618 struct sk_buff *opt_skb = NULL;
619
620 /* Imagine: socket is IPv6. IPv4 packet arrives,
621 goes to IPv4 receive handler and backlogged.
622 From backlog it always goes here. Kerboom...
623 Fortunately, dccp_rcv_established and rcv_established
624 handle them correctly, but it is not case with
625 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
626 */
627
628 if (skb->protocol == htons(ETH_P_IP))
629 return dccp_v4_do_rcv(sk, skb);
630
631 if (sk_filter(sk, skb))
632 goto discard;
633
634 /*
635 * socket locking is here for SMP purposes as backlog rcv is currently
636 * called with bh processing disabled.
637 */
638
639 /* Do Stevens' IPV6_PKTOPTIONS.
640
641 Yes, guys, it is the only place in our code, where we
642 may make it not affecting IPv4.
643 The rest of code is protocol independent,
644 and I do not like idea to uglify IPv4.
645
646 Actually, all the idea behind IPV6_PKTOPTIONS
647 looks not very well thought. For now we latch
648 options, received in the last packet, enqueued
649 by tcp. Feel free to propose better solution.
650 --ANK (980728)
651 */
652 if (np->rxopt.all)
653 /*
654 * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
655 * (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
656 */
657 opt_skb = skb_clone(skb, GFP_ATOMIC);
658
659 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
660 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
661 goto reset;
662 if (opt_skb) {
663 /* XXX This is where we would goto ipv6_pktoptions. */
664 __kfree_skb(opt_skb);
665 }
666 return 0;
667 }
668
669 /*
670 * Step 3: Process LISTEN state
671 * If S.state == LISTEN,
672 * If P.type == Request or P contains a valid Init Cookie option,
673 * (* Must scan the packet's options to check for Init
674 * Cookies. Only Init Cookies are processed here,
675 * however; other options are processed in Step 8. This
676 * scan need only be performed if the endpoint uses Init
677 * Cookies *)
678 * (* Generate a new socket and switch to that socket *)
679 * Set S := new socket for this port pair
680 * S.state = RESPOND
681 * Choose S.ISS (initial seqno) or set from Init Cookies
682 * Initialize S.GAR := S.ISS
683 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
684 * Continue with S.state == RESPOND
685 * (* A Response packet will be generated in Step 11 *)
686 * Otherwise,
687 * Generate Reset(No Connection) unless P.type == Reset
688 * Drop packet and return
689 *
690 * NOTE: the check for the packet types is done in
691 * dccp_rcv_state_process
692 */
693 if (sk->sk_state == DCCP_LISTEN) {
694 struct sock *nsk = dccp_v6_hnd_req(sk, skb);
695
696 if (nsk == NULL)
697 goto discard;
698 /*
699 * Queue it on the new socket if the new socket is active,
700 * otherwise we just shortcircuit this and continue with
701 * the new socket..
702 */
703 if (nsk != sk) {
704 if (dccp_child_process(sk, nsk, skb))
705 goto reset;
706 if (opt_skb != NULL)
707 __kfree_skb(opt_skb);
708 return 0;
709 }
710 }
711
712 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
713 goto reset;
714 if (opt_skb) {
715 /* XXX This is where we would goto ipv6_pktoptions. */
716 __kfree_skb(opt_skb);
717 }
718 return 0;
719
720 reset:
721 dccp_v6_ctl_send_reset(sk, skb);
722 discard:
723 if (opt_skb != NULL)
724 __kfree_skb(opt_skb);
725 kfree_skb(skb);
726 return 0;
727 }
728
729 static int dccp_v6_rcv(struct sk_buff *skb)
730 {
731 const struct dccp_hdr *dh;
732 struct sock *sk;
733 int min_cov;
734
735 /* Step 1: Check header basics */
736
737 if (dccp_invalid_packet(skb))
738 goto discard_it;
739
740 /* Step 1: If header checksum is incorrect, drop packet and return. */
741 if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
742 &ipv6_hdr(skb)->daddr)) {
743 DCCP_WARN("dropped packet with invalid checksum\n");
744 goto discard_it;
745 }
746
747 dh = dccp_hdr(skb);
748
749 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
750 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
751
752 if (dccp_packet_without_ack(skb))
753 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
754 else
755 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
756
757 /* Step 2:
758 * Look up flow ID in table and get corresponding socket */
759 sk = __inet6_lookup_skb(&dccp_hashinfo, skb,
760 dh->dccph_sport, dh->dccph_dport,
761 inet6_iif(skb));
762 /*
763 * Step 2:
764 * If no socket ...
765 */
766 if (sk == NULL) {
767 dccp_pr_debug("failed to look up flow ID in table and "
768 "get corresponding socket\n");
769 goto no_dccp_socket;
770 }
771
772 /*
773 * Step 2:
774 * ... or S.state == TIMEWAIT,
775 * Generate Reset(No Connection) unless P.type == Reset
776 * Drop packet and return
777 */
778 if (sk->sk_state == DCCP_TIME_WAIT) {
779 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
780 inet_twsk_put(inet_twsk(sk));
781 goto no_dccp_socket;
782 }
783
784 /*
785 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
786 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted
787 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
788 */
789 min_cov = dccp_sk(sk)->dccps_pcrlen;
790 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
791 dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
792 dh->dccph_cscov, min_cov);
793 /* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
794 goto discard_and_relse;
795 }
796
797 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
798 goto discard_and_relse;
799
800 return sk_receive_skb(sk, skb, 1) ? -1 : 0;
801
802 no_dccp_socket:
803 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
804 goto discard_it;
805 /*
806 * Step 2:
807 * If no socket ...
808 * Generate Reset(No Connection) unless P.type == Reset
809 * Drop packet and return
810 */
811 if (dh->dccph_type != DCCP_PKT_RESET) {
812 DCCP_SKB_CB(skb)->dccpd_reset_code =
813 DCCP_RESET_CODE_NO_CONNECTION;
814 dccp_v6_ctl_send_reset(sk, skb);
815 }
816
817 discard_it:
818 kfree_skb(skb);
819 return 0;
820
821 discard_and_relse:
822 sock_put(sk);
823 goto discard_it;
824 }
825
826 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
827 int addr_len)
828 {
829 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
830 struct inet_connection_sock *icsk = inet_csk(sk);
831 struct inet_sock *inet = inet_sk(sk);
832 struct ipv6_pinfo *np = inet6_sk(sk);
833 struct dccp_sock *dp = dccp_sk(sk);
834 struct in6_addr *saddr = NULL, *final_p, final;
835 struct flowi6 fl6;
836 struct dst_entry *dst;
837 int addr_type;
838 int err;
839
840 dp->dccps_role = DCCP_ROLE_CLIENT;
841
842 if (addr_len < SIN6_LEN_RFC2133)
843 return -EINVAL;
844
845 if (usin->sin6_family != AF_INET6)
846 return -EAFNOSUPPORT;
847
848 memset(&fl6, 0, sizeof(fl6));
849
850 if (np->sndflow) {
851 fl6.flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
852 IP6_ECN_flow_init(fl6.flowlabel);
853 if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
854 struct ip6_flowlabel *flowlabel;
855 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
856 if (flowlabel == NULL)
857 return -EINVAL;
858 fl6_sock_release(flowlabel);
859 }
860 }
861 /*
862 * connect() to INADDR_ANY means loopback (BSD'ism).
863 */
864 if (ipv6_addr_any(&usin->sin6_addr))
865 usin->sin6_addr.s6_addr[15] = 1;
866
867 addr_type = ipv6_addr_type(&usin->sin6_addr);
868
869 if (addr_type & IPV6_ADDR_MULTICAST)
870 return -ENETUNREACH;
871
872 if (addr_type & IPV6_ADDR_LINKLOCAL) {
873 if (addr_len >= sizeof(struct sockaddr_in6) &&
874 usin->sin6_scope_id) {
875 /* If interface is set while binding, indices
876 * must coincide.
877 */
878 if (sk->sk_bound_dev_if &&
879 sk->sk_bound_dev_if != usin->sin6_scope_id)
880 return -EINVAL;
881
882 sk->sk_bound_dev_if = usin->sin6_scope_id;
883 }
884
885 /* Connect to link-local address requires an interface */
886 if (!sk->sk_bound_dev_if)
887 return -EINVAL;
888 }
889
890 sk->sk_v6_daddr = usin->sin6_addr;
891 np->flow_label = fl6.flowlabel;
892
893 /*
894 * DCCP over IPv4
895 */
896 if (addr_type == IPV6_ADDR_MAPPED) {
897 u32 exthdrlen = icsk->icsk_ext_hdr_len;
898 struct sockaddr_in sin;
899
900 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
901
902 if (__ipv6_only_sock(sk))
903 return -ENETUNREACH;
904
905 sin.sin_family = AF_INET;
906 sin.sin_port = usin->sin6_port;
907 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
908
909 icsk->icsk_af_ops = &dccp_ipv6_mapped;
910 sk->sk_backlog_rcv = dccp_v4_do_rcv;
911
912 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
913 if (err) {
914 icsk->icsk_ext_hdr_len = exthdrlen;
915 icsk->icsk_af_ops = &dccp_ipv6_af_ops;
916 sk->sk_backlog_rcv = dccp_v6_do_rcv;
917 goto failure;
918 }
919 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
920 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &sk->sk_v6_rcv_saddr);
921
922 return err;
923 }
924
925 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
926 saddr = &sk->sk_v6_rcv_saddr;
927
928 fl6.flowi6_proto = IPPROTO_DCCP;
929 fl6.daddr = sk->sk_v6_daddr;
930 fl6.saddr = saddr ? *saddr : np->saddr;
931 fl6.flowi6_oif = sk->sk_bound_dev_if;
932 fl6.fl6_dport = usin->sin6_port;
933 fl6.fl6_sport = inet->inet_sport;
934 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
935
936 final_p = fl6_update_dst(&fl6, np->opt, &final);
937
938 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
939 if (IS_ERR(dst)) {
940 err = PTR_ERR(dst);
941 goto failure;
942 }
943
944 if (saddr == NULL) {
945 saddr = &fl6.saddr;
946 sk->sk_v6_rcv_saddr = *saddr;
947 }
948
949 /* set the source address */
950 np->saddr = *saddr;
951 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
952
953 __ip6_dst_store(sk, dst, NULL, NULL);
954
955 icsk->icsk_ext_hdr_len = 0;
956 if (np->opt != NULL)
957 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
958 np->opt->opt_nflen);
959
960 inet->inet_dport = usin->sin6_port;
961
962 dccp_set_state(sk, DCCP_REQUESTING);
963 err = inet6_hash_connect(&dccp_death_row, sk);
964 if (err)
965 goto late_failure;
966
967 dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
968 sk->sk_v6_daddr.s6_addr32,
969 inet->inet_sport,
970 inet->inet_dport);
971 err = dccp_connect(sk);
972 if (err)
973 goto late_failure;
974
975 return 0;
976
977 late_failure:
978 dccp_set_state(sk, DCCP_CLOSED);
979 __sk_dst_reset(sk);
980 failure:
981 inet->inet_dport = 0;
982 sk->sk_route_caps = 0;
983 return err;
984 }
985
986 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
987 .queue_xmit = inet6_csk_xmit,
988 .send_check = dccp_v6_send_check,
989 .rebuild_header = inet6_sk_rebuild_header,
990 .conn_request = dccp_v6_conn_request,
991 .syn_recv_sock = dccp_v6_request_recv_sock,
992 .net_header_len = sizeof(struct ipv6hdr),
993 .setsockopt = ipv6_setsockopt,
994 .getsockopt = ipv6_getsockopt,
995 .addr2sockaddr = inet6_csk_addr2sockaddr,
996 .sockaddr_len = sizeof(struct sockaddr_in6),
997 .bind_conflict = inet6_csk_bind_conflict,
998 #ifdef CONFIG_COMPAT
999 .compat_setsockopt = compat_ipv6_setsockopt,
1000 .compat_getsockopt = compat_ipv6_getsockopt,
1001 #endif
1002 };
1003
1004 /*
1005 * DCCP over IPv4 via INET6 API
1006 */
1007 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1008 .queue_xmit = ip_queue_xmit,
1009 .send_check = dccp_v4_send_check,
1010 .rebuild_header = inet_sk_rebuild_header,
1011 .conn_request = dccp_v6_conn_request,
1012 .syn_recv_sock = dccp_v6_request_recv_sock,
1013 .net_header_len = sizeof(struct iphdr),
1014 .setsockopt = ipv6_setsockopt,
1015 .getsockopt = ipv6_getsockopt,
1016 .addr2sockaddr = inet6_csk_addr2sockaddr,
1017 .sockaddr_len = sizeof(struct sockaddr_in6),
1018 #ifdef CONFIG_COMPAT
1019 .compat_setsockopt = compat_ipv6_setsockopt,
1020 .compat_getsockopt = compat_ipv6_getsockopt,
1021 #endif
1022 };
1023
1024 /* NOTE: A lot of things set to zero explicitly by call to
1025 * sk_alloc() so need not be done here.
1026 */
1027 static int dccp_v6_init_sock(struct sock *sk)
1028 {
1029 static __u8 dccp_v6_ctl_sock_initialized;
1030 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
1031
1032 if (err == 0) {
1033 if (unlikely(!dccp_v6_ctl_sock_initialized))
1034 dccp_v6_ctl_sock_initialized = 1;
1035 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1036 }
1037
1038 return err;
1039 }
1040
1041 static void dccp_v6_destroy_sock(struct sock *sk)
1042 {
1043 dccp_destroy_sock(sk);
1044 inet6_destroy_sock(sk);
1045 }
1046
1047 static struct timewait_sock_ops dccp6_timewait_sock_ops = {
1048 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
1049 };
1050
1051 static struct proto dccp_v6_prot = {
1052 .name = "DCCPv6",
1053 .owner = THIS_MODULE,
1054 .close = dccp_close,
1055 .connect = dccp_v6_connect,
1056 .disconnect = dccp_disconnect,
1057 .ioctl = dccp_ioctl,
1058 .init = dccp_v6_init_sock,
1059 .setsockopt = dccp_setsockopt,
1060 .getsockopt = dccp_getsockopt,
1061 .sendmsg = dccp_sendmsg,
1062 .recvmsg = dccp_recvmsg,
1063 .backlog_rcv = dccp_v6_do_rcv,
1064 .hash = dccp_v6_hash,
1065 .unhash = inet_unhash,
1066 .accept = inet_csk_accept,
1067 .get_port = inet_csk_get_port,
1068 .shutdown = dccp_shutdown,
1069 .destroy = dccp_v6_destroy_sock,
1070 .orphan_count = &dccp_orphan_count,
1071 .max_header = MAX_DCCP_HEADER,
1072 .obj_size = sizeof(struct dccp6_sock),
1073 .slab_flags = SLAB_DESTROY_BY_RCU,
1074 .rsk_prot = &dccp6_request_sock_ops,
1075 .twsk_prot = &dccp6_timewait_sock_ops,
1076 .h.hashinfo = &dccp_hashinfo,
1077 #ifdef CONFIG_COMPAT
1078 .compat_setsockopt = compat_dccp_setsockopt,
1079 .compat_getsockopt = compat_dccp_getsockopt,
1080 #endif
1081 };
1082
1083 static const struct inet6_protocol dccp_v6_protocol = {
1084 .handler = dccp_v6_rcv,
1085 .err_handler = dccp_v6_err,
1086 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1087 };
1088
1089 static const struct proto_ops inet6_dccp_ops = {
1090 .family = PF_INET6,
1091 .owner = THIS_MODULE,
1092 .release = inet6_release,
1093 .bind = inet6_bind,
1094 .connect = inet_stream_connect,
1095 .socketpair = sock_no_socketpair,
1096 .accept = inet_accept,
1097 .getname = inet6_getname,
1098 .poll = dccp_poll,
1099 .ioctl = inet6_ioctl,
1100 .listen = inet_dccp_listen,
1101 .shutdown = inet_shutdown,
1102 .setsockopt = sock_common_setsockopt,
1103 .getsockopt = sock_common_getsockopt,
1104 .sendmsg = inet_sendmsg,
1105 .recvmsg = sock_common_recvmsg,
1106 .mmap = sock_no_mmap,
1107 .sendpage = sock_no_sendpage,
1108 #ifdef CONFIG_COMPAT
1109 .compat_setsockopt = compat_sock_common_setsockopt,
1110 .compat_getsockopt = compat_sock_common_getsockopt,
1111 #endif
1112 };
1113
1114 static struct inet_protosw dccp_v6_protosw = {
1115 .type = SOCK_DCCP,
1116 .protocol = IPPROTO_DCCP,
1117 .prot = &dccp_v6_prot,
1118 .ops = &inet6_dccp_ops,
1119 .flags = INET_PROTOSW_ICSK,
1120 };
1121
1122 static int __net_init dccp_v6_init_net(struct net *net)
1123 {
1124 if (dccp_hashinfo.bhash == NULL)
1125 return -ESOCKTNOSUPPORT;
1126
1127 return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6,
1128 SOCK_DCCP, IPPROTO_DCCP, net);
1129 }
1130
1131 static void __net_exit dccp_v6_exit_net(struct net *net)
1132 {
1133 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
1134 }
1135
1136 static struct pernet_operations dccp_v6_ops = {
1137 .init = dccp_v6_init_net,
1138 .exit = dccp_v6_exit_net,
1139 };
1140
1141 static int __init dccp_v6_init(void)
1142 {
1143 int err = proto_register(&dccp_v6_prot, 1);
1144
1145 if (err != 0)
1146 goto out;
1147
1148 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1149 if (err != 0)
1150 goto out_unregister_proto;
1151
1152 inet6_register_protosw(&dccp_v6_protosw);
1153
1154 err = register_pernet_subsys(&dccp_v6_ops);
1155 if (err != 0)
1156 goto out_destroy_ctl_sock;
1157 out:
1158 return err;
1159
1160 out_destroy_ctl_sock:
1161 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1162 inet6_unregister_protosw(&dccp_v6_protosw);
1163 out_unregister_proto:
1164 proto_unregister(&dccp_v6_prot);
1165 goto out;
1166 }
1167
1168 static void __exit dccp_v6_exit(void)
1169 {
1170 unregister_pernet_subsys(&dccp_v6_ops);
1171 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1172 inet6_unregister_protosw(&dccp_v6_protosw);
1173 proto_unregister(&dccp_v6_prot);
1174 }
1175
1176 module_init(dccp_v6_init);
1177 module_exit(dccp_v6_exit);
1178
1179 /*
1180 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1181 * values directly, Also cover the case where the protocol is not specified,
1182 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1183 */
1184 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
1185 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
1186 MODULE_LICENSE("GPL");
1187 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1188 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");
This page took 0.054137 seconds and 6 git commands to generate.