Pull bugfix into test branch
[deliverable/linux.git] / net / dccp / ipv6.c
1 /*
2 * DCCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Based on net/dccp6/ipv6.c
6 *
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/xfrm.h>
18
19 #include <net/addrconf.h>
20 #include <net/inet_common.h>
21 #include <net/inet_hashtables.h>
22 #include <net/inet_sock.h>
23 #include <net/inet6_connection_sock.h>
24 #include <net/inet6_hashtables.h>
25 #include <net/ip6_route.h>
26 #include <net/ipv6.h>
27 #include <net/protocol.h>
28 #include <net/transp_v6.h>
29 #include <net/ip6_checksum.h>
30 #include <net/xfrm.h>
31
32 #include "dccp.h"
33 #include "ipv6.h"
34 #include "feat.h"
35
36 /* Socket used for sending RSTs and ACKs */
37 static struct socket *dccp_v6_ctl_socket;
38
39 static struct inet_connection_sock_af_ops dccp_ipv6_mapped;
40 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
41
42 static int dccp_v6_get_port(struct sock *sk, unsigned short snum)
43 {
44 return inet_csk_get_port(&dccp_hashinfo, sk, snum,
45 inet6_csk_bind_conflict);
46 }
47
48 static void dccp_v6_hash(struct sock *sk)
49 {
50 if (sk->sk_state != DCCP_CLOSED) {
51 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
52 dccp_hash(sk);
53 return;
54 }
55 local_bh_disable();
56 __inet6_hash(&dccp_hashinfo, sk);
57 local_bh_enable();
58 }
59 }
60
61 /* add pseudo-header to DCCP checksum stored in skb->csum */
62 static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
63 struct in6_addr *saddr,
64 struct in6_addr *daddr)
65 {
66 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
67 }
68
69 static inline void dccp_v6_send_check(struct sock *sk, int unused_value,
70 struct sk_buff *skb)
71 {
72 struct ipv6_pinfo *np = inet6_sk(sk);
73 struct dccp_hdr *dh = dccp_hdr(skb);
74
75 dccp_csum_outgoing(skb);
76 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
77 }
78
79 static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
80 __be16 sport, __be16 dport )
81 {
82 return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
83 }
84
85 static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
86 {
87 return secure_dccpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
88 skb->nh.ipv6h->saddr.s6_addr32,
89 dccp_hdr(skb)->dccph_dport,
90 dccp_hdr(skb)->dccph_sport );
91
92 }
93
94 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
95 int type, int code, int offset, __be32 info)
96 {
97 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data;
98 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
99 struct ipv6_pinfo *np;
100 struct sock *sk;
101 int err;
102 __u64 seq;
103
104 sk = inet6_lookup(&dccp_hashinfo, &hdr->daddr, dh->dccph_dport,
105 &hdr->saddr, dh->dccph_sport, inet6_iif(skb));
106
107 if (sk == NULL) {
108 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
109 return;
110 }
111
112 if (sk->sk_state == DCCP_TIME_WAIT) {
113 inet_twsk_put(inet_twsk(sk));
114 return;
115 }
116
117 bh_lock_sock(sk);
118 if (sock_owned_by_user(sk))
119 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
120
121 if (sk->sk_state == DCCP_CLOSED)
122 goto out;
123
124 np = inet6_sk(sk);
125
126 if (type == ICMPV6_PKT_TOOBIG) {
127 struct dst_entry *dst = NULL;
128
129 if (sock_owned_by_user(sk))
130 goto out;
131 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
132 goto out;
133
134 /* icmp should have updated the destination cache entry */
135 dst = __sk_dst_check(sk, np->dst_cookie);
136 if (dst == NULL) {
137 struct inet_sock *inet = inet_sk(sk);
138 struct flowi fl;
139
140 /* BUGGG_FUTURE: Again, it is not clear how
141 to handle rthdr case. Ignore this complexity
142 for now.
143 */
144 memset(&fl, 0, sizeof(fl));
145 fl.proto = IPPROTO_DCCP;
146 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
147 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
148 fl.oif = sk->sk_bound_dev_if;
149 fl.fl_ip_dport = inet->dport;
150 fl.fl_ip_sport = inet->sport;
151 security_sk_classify_flow(sk, &fl);
152
153 err = ip6_dst_lookup(sk, &dst, &fl);
154 if (err) {
155 sk->sk_err_soft = -err;
156 goto out;
157 }
158
159 err = xfrm_lookup(&dst, &fl, sk, 0);
160 if (err < 0) {
161 sk->sk_err_soft = -err;
162 goto out;
163 }
164 } else
165 dst_hold(dst);
166
167 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
168 dccp_sync_mss(sk, dst_mtu(dst));
169 } /* else let the usual retransmit timer handle it */
170 dst_release(dst);
171 goto out;
172 }
173
174 icmpv6_err_convert(type, code, &err);
175
176 seq = DCCP_SKB_CB(skb)->dccpd_seq;
177 /* Might be for an request_sock */
178 switch (sk->sk_state) {
179 struct request_sock *req, **prev;
180 case DCCP_LISTEN:
181 if (sock_owned_by_user(sk))
182 goto out;
183
184 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
185 &hdr->daddr, &hdr->saddr,
186 inet6_iif(skb));
187 if (req == NULL)
188 goto out;
189
190 /*
191 * ICMPs are not backlogged, hence we cannot get an established
192 * socket here.
193 */
194 BUG_TRAP(req->sk == NULL);
195
196 if (seq != dccp_rsk(req)->dreq_iss) {
197 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
198 goto out;
199 }
200
201 inet_csk_reqsk_queue_drop(sk, req, prev);
202 goto out;
203
204 case DCCP_REQUESTING:
205 case DCCP_RESPOND: /* Cannot happen.
206 It can, it SYNs are crossed. --ANK */
207 if (!sock_owned_by_user(sk)) {
208 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
209 sk->sk_err = err;
210 /*
211 * Wake people up to see the error
212 * (see connect in sock.c)
213 */
214 sk->sk_error_report(sk);
215 dccp_done(sk);
216 } else
217 sk->sk_err_soft = err;
218 goto out;
219 }
220
221 if (!sock_owned_by_user(sk) && np->recverr) {
222 sk->sk_err = err;
223 sk->sk_error_report(sk);
224 } else
225 sk->sk_err_soft = err;
226
227 out:
228 bh_unlock_sock(sk);
229 sock_put(sk);
230 }
231
232
233 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
234 struct dst_entry *dst)
235 {
236 struct inet6_request_sock *ireq6 = inet6_rsk(req);
237 struct ipv6_pinfo *np = inet6_sk(sk);
238 struct sk_buff *skb;
239 struct ipv6_txoptions *opt = NULL;
240 struct in6_addr *final_p = NULL, final;
241 struct flowi fl;
242 int err = -1;
243
244 memset(&fl, 0, sizeof(fl));
245 fl.proto = IPPROTO_DCCP;
246 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
247 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
248 fl.fl6_flowlabel = 0;
249 fl.oif = ireq6->iif;
250 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
251 fl.fl_ip_sport = inet_sk(sk)->sport;
252 security_req_classify_flow(req, &fl);
253
254 if (dst == NULL) {
255 opt = np->opt;
256 if (opt == NULL &&
257 np->rxopt.bits.osrcrt == 2 &&
258 ireq6->pktopts) {
259 struct sk_buff *pktopts = ireq6->pktopts;
260 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
261
262 if (rxopt->srcrt)
263 opt = ipv6_invert_rthdr(sk,
264 (struct ipv6_rt_hdr *)(pktopts->nh.raw +
265 rxopt->srcrt));
266 }
267
268 if (opt != NULL && opt->srcrt != NULL) {
269 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
270
271 ipv6_addr_copy(&final, &fl.fl6_dst);
272 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
273 final_p = &final;
274 }
275
276 err = ip6_dst_lookup(sk, &dst, &fl);
277 if (err)
278 goto done;
279
280 if (final_p)
281 ipv6_addr_copy(&fl.fl6_dst, final_p);
282
283 err = xfrm_lookup(&dst, &fl, sk, 0);
284 if (err < 0)
285 goto done;
286 }
287
288 skb = dccp_make_response(sk, dst, req);
289 if (skb != NULL) {
290 struct dccp_hdr *dh = dccp_hdr(skb);
291
292 dh->dccph_checksum = dccp_v6_csum_finish(skb,
293 &ireq6->loc_addr,
294 &ireq6->rmt_addr);
295 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
296 err = ip6_xmit(sk, skb, &fl, opt, 0);
297 err = net_xmit_eval(err);
298 }
299
300 done:
301 if (opt != NULL && opt != np->opt)
302 sock_kfree_s(sk, opt, opt->tot_len);
303 dst_release(dst);
304 return err;
305 }
306
307 static void dccp_v6_reqsk_destructor(struct request_sock *req)
308 {
309 if (inet6_rsk(req)->pktopts != NULL)
310 kfree_skb(inet6_rsk(req)->pktopts);
311 }
312
313 static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
314 {
315 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
316 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
317 sizeof(struct dccp_hdr_ext) +
318 sizeof(struct dccp_hdr_reset);
319 struct sk_buff *skb;
320 struct flowi fl;
321 u64 seqno = 0;
322
323 if (rxdh->dccph_type == DCCP_PKT_RESET)
324 return;
325
326 if (!ipv6_unicast_destination(rxskb))
327 return;
328
329 skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header,
330 GFP_ATOMIC);
331 if (skb == NULL)
332 return;
333
334 skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header);
335
336 dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
337
338 /* Swap the send and the receive. */
339 dh->dccph_type = DCCP_PKT_RESET;
340 dh->dccph_sport = rxdh->dccph_dport;
341 dh->dccph_dport = rxdh->dccph_sport;
342 dh->dccph_doff = dccp_hdr_reset_len / 4;
343 dh->dccph_x = 1;
344 dccp_hdr_reset(skb)->dccph_reset_code =
345 DCCP_SKB_CB(rxskb)->dccpd_reset_code;
346
347 /* See "8.3.1. Abnormal Termination" in RFC 4340 */
348 if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
349 dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1);
350
351 dccp_hdr_set_seq(dh, seqno);
352 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), DCCP_SKB_CB(rxskb)->dccpd_seq);
353
354 dccp_csum_outgoing(skb);
355 dh->dccph_checksum = dccp_v6_csum_finish(skb, &rxskb->nh.ipv6h->saddr,
356 &rxskb->nh.ipv6h->daddr);
357
358 memset(&fl, 0, sizeof(fl));
359 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
360 ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr);
361
362 fl.proto = IPPROTO_DCCP;
363 fl.oif = inet6_iif(rxskb);
364 fl.fl_ip_dport = dh->dccph_dport;
365 fl.fl_ip_sport = dh->dccph_sport;
366 security_skb_classify_flow(rxskb, &fl);
367
368 /* sk = NULL, but it is safe for now. RST socket required. */
369 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
370 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
371 ip6_xmit(dccp_v6_ctl_socket->sk, skb, &fl, NULL, 0);
372 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
373 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
374 return;
375 }
376 }
377
378 kfree_skb(skb);
379 }
380
381 static struct request_sock_ops dccp6_request_sock_ops = {
382 .family = AF_INET6,
383 .obj_size = sizeof(struct dccp6_request_sock),
384 .rtx_syn_ack = dccp_v6_send_response,
385 .send_ack = dccp_reqsk_send_ack,
386 .destructor = dccp_v6_reqsk_destructor,
387 .send_reset = dccp_v6_ctl_send_reset,
388 };
389
390 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
391 {
392 const struct dccp_hdr *dh = dccp_hdr(skb);
393 const struct ipv6hdr *iph = skb->nh.ipv6h;
394 struct sock *nsk;
395 struct request_sock **prev;
396 /* Find possible connection requests. */
397 struct request_sock *req = inet6_csk_search_req(sk, &prev,
398 dh->dccph_sport,
399 &iph->saddr,
400 &iph->daddr,
401 inet6_iif(skb));
402 if (req != NULL)
403 return dccp_check_req(sk, skb, req, prev);
404
405 nsk = __inet6_lookup_established(&dccp_hashinfo,
406 &iph->saddr, dh->dccph_sport,
407 &iph->daddr, ntohs(dh->dccph_dport),
408 inet6_iif(skb));
409 if (nsk != NULL) {
410 if (nsk->sk_state != DCCP_TIME_WAIT) {
411 bh_lock_sock(nsk);
412 return nsk;
413 }
414 inet_twsk_put(inet_twsk(nsk));
415 return NULL;
416 }
417
418 return sk;
419 }
420
421 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
422 {
423 struct request_sock *req;
424 struct dccp_request_sock *dreq;
425 struct inet6_request_sock *ireq6;
426 struct ipv6_pinfo *np = inet6_sk(sk);
427 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
428 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
429 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
430
431 if (skb->protocol == htons(ETH_P_IP))
432 return dccp_v4_conn_request(sk, skb);
433
434 if (!ipv6_unicast_destination(skb))
435 goto drop;
436
437 if (dccp_bad_service_code(sk, service)) {
438 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
439 goto drop;
440 }
441 /*
442 * There are no SYN attacks on IPv6, yet...
443 */
444 if (inet_csk_reqsk_queue_is_full(sk))
445 goto drop;
446
447 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
448 goto drop;
449
450 req = inet6_reqsk_alloc(&dccp6_request_sock_ops);
451 if (req == NULL)
452 goto drop;
453
454 if (dccp_parse_options(sk, skb))
455 goto drop_and_free;
456
457 dccp_reqsk_init(req, skb);
458
459 if (security_inet_conn_request(sk, skb, req))
460 goto drop_and_free;
461
462 ireq6 = inet6_rsk(req);
463 ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr);
464 ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr);
465 ireq6->pktopts = NULL;
466
467 if (ipv6_opt_accepted(sk, skb) ||
468 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
469 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
470 atomic_inc(&skb->users);
471 ireq6->pktopts = skb;
472 }
473 ireq6->iif = sk->sk_bound_dev_if;
474
475 /* So that link locals have meaning */
476 if (!sk->sk_bound_dev_if &&
477 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
478 ireq6->iif = inet6_iif(skb);
479
480 /*
481 * Step 3: Process LISTEN state
482 *
483 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
484 *
485 * In fact we defer setting S.GSR, S.SWL, S.SWH to
486 * dccp_create_openreq_child.
487 */
488 dreq = dccp_rsk(req);
489 dreq->dreq_isr = dcb->dccpd_seq;
490 dreq->dreq_iss = dccp_v6_init_sequence(skb);
491 dreq->dreq_service = service;
492
493 if (dccp_v6_send_response(sk, req, NULL))
494 goto drop_and_free;
495
496 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
497 return 0;
498
499 drop_and_free:
500 reqsk_free(req);
501 drop:
502 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
503 dcb->dccpd_reset_code = reset_code;
504 return -1;
505 }
506
507 static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
508 struct sk_buff *skb,
509 struct request_sock *req,
510 struct dst_entry *dst)
511 {
512 struct inet6_request_sock *ireq6 = inet6_rsk(req);
513 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
514 struct inet_sock *newinet;
515 struct dccp_sock *newdp;
516 struct dccp6_sock *newdp6;
517 struct sock *newsk;
518 struct ipv6_txoptions *opt;
519
520 if (skb->protocol == htons(ETH_P_IP)) {
521 /*
522 * v6 mapped
523 */
524 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
525 if (newsk == NULL)
526 return NULL;
527
528 newdp6 = (struct dccp6_sock *)newsk;
529 newdp = dccp_sk(newsk);
530 newinet = inet_sk(newsk);
531 newinet->pinet6 = &newdp6->inet6;
532 newnp = inet6_sk(newsk);
533
534 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
535
536 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
537 newinet->daddr);
538
539 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
540 newinet->saddr);
541
542 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
543
544 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
545 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
546 newnp->pktoptions = NULL;
547 newnp->opt = NULL;
548 newnp->mcast_oif = inet6_iif(skb);
549 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
550
551 /*
552 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
553 * here, dccp_create_openreq_child now does this for us, see the comment in
554 * that function for the gory details. -acme
555 */
556
557 /* It is tricky place. Until this moment IPv4 tcp
558 worked with IPv6 icsk.icsk_af_ops.
559 Sync it now.
560 */
561 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
562
563 return newsk;
564 }
565
566 opt = np->opt;
567
568 if (sk_acceptq_is_full(sk))
569 goto out_overflow;
570
571 if (np->rxopt.bits.osrcrt == 2 && opt == NULL && ireq6->pktopts) {
572 const struct inet6_skb_parm *rxopt = IP6CB(ireq6->pktopts);
573
574 if (rxopt->srcrt)
575 opt = ipv6_invert_rthdr(sk,
576 (struct ipv6_rt_hdr *)(ireq6->pktopts->nh.raw +
577 rxopt->srcrt));
578 }
579
580 if (dst == NULL) {
581 struct in6_addr *final_p = NULL, final;
582 struct flowi fl;
583
584 memset(&fl, 0, sizeof(fl));
585 fl.proto = IPPROTO_DCCP;
586 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
587 if (opt != NULL && opt->srcrt != NULL) {
588 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
589
590 ipv6_addr_copy(&final, &fl.fl6_dst);
591 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
592 final_p = &final;
593 }
594 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
595 fl.oif = sk->sk_bound_dev_if;
596 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
597 fl.fl_ip_sport = inet_sk(sk)->sport;
598 security_sk_classify_flow(sk, &fl);
599
600 if (ip6_dst_lookup(sk, &dst, &fl))
601 goto out;
602
603 if (final_p)
604 ipv6_addr_copy(&fl.fl6_dst, final_p);
605
606 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
607 goto out;
608 }
609
610 newsk = dccp_create_openreq_child(sk, req, skb);
611 if (newsk == NULL)
612 goto out;
613
614 /*
615 * No need to charge this sock to the relevant IPv6 refcnt debug socks
616 * count here, dccp_create_openreq_child now does this for us, see the
617 * comment in that function for the gory details. -acme
618 */
619
620 __ip6_dst_store(newsk, dst, NULL, NULL);
621 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
622 NETIF_F_TSO);
623 newdp6 = (struct dccp6_sock *)newsk;
624 newinet = inet_sk(newsk);
625 newinet->pinet6 = &newdp6->inet6;
626 newdp = dccp_sk(newsk);
627 newnp = inet6_sk(newsk);
628
629 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
630
631 ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
632 ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
633 ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
634 newsk->sk_bound_dev_if = ireq6->iif;
635
636 /* Now IPv6 options...
637
638 First: no IPv4 options.
639 */
640 newinet->opt = NULL;
641
642 /* Clone RX bits */
643 newnp->rxopt.all = np->rxopt.all;
644
645 /* Clone pktoptions received with SYN */
646 newnp->pktoptions = NULL;
647 if (ireq6->pktopts != NULL) {
648 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
649 kfree_skb(ireq6->pktopts);
650 ireq6->pktopts = NULL;
651 if (newnp->pktoptions)
652 skb_set_owner_r(newnp->pktoptions, newsk);
653 }
654 newnp->opt = NULL;
655 newnp->mcast_oif = inet6_iif(skb);
656 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
657
658 /*
659 * Clone native IPv6 options from listening socket (if any)
660 *
661 * Yes, keeping reference count would be much more clever, but we make
662 * one more one thing there: reattach optmem to newsk.
663 */
664 if (opt != NULL) {
665 newnp->opt = ipv6_dup_options(newsk, opt);
666 if (opt != np->opt)
667 sock_kfree_s(sk, opt, opt->tot_len);
668 }
669
670 inet_csk(newsk)->icsk_ext_hdr_len = 0;
671 if (newnp->opt != NULL)
672 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
673 newnp->opt->opt_flen);
674
675 dccp_sync_mss(newsk, dst_mtu(dst));
676
677 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
678
679 __inet6_hash(&dccp_hashinfo, newsk);
680 inet_inherit_port(&dccp_hashinfo, sk, newsk);
681
682 return newsk;
683
684 out_overflow:
685 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
686 out:
687 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
688 if (opt != NULL && opt != np->opt)
689 sock_kfree_s(sk, opt, opt->tot_len);
690 dst_release(dst);
691 return NULL;
692 }
693
694 /* The socket must have it's spinlock held when we get
695 * here.
696 *
697 * We have a potential double-lock case here, so even when
698 * doing backlog processing we use the BH locking scheme.
699 * This is because we cannot sleep with the original spinlock
700 * held.
701 */
702 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
703 {
704 struct ipv6_pinfo *np = inet6_sk(sk);
705 struct sk_buff *opt_skb = NULL;
706
707 /* Imagine: socket is IPv6. IPv4 packet arrives,
708 goes to IPv4 receive handler and backlogged.
709 From backlog it always goes here. Kerboom...
710 Fortunately, dccp_rcv_established and rcv_established
711 handle them correctly, but it is not case with
712 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
713 */
714
715 if (skb->protocol == htons(ETH_P_IP))
716 return dccp_v4_do_rcv(sk, skb);
717
718 if (sk_filter(sk, skb))
719 goto discard;
720
721 /*
722 * socket locking is here for SMP purposes as backlog rcv is currently
723 * called with bh processing disabled.
724 */
725
726 /* Do Stevens' IPV6_PKTOPTIONS.
727
728 Yes, guys, it is the only place in our code, where we
729 may make it not affecting IPv4.
730 The rest of code is protocol independent,
731 and I do not like idea to uglify IPv4.
732
733 Actually, all the idea behind IPV6_PKTOPTIONS
734 looks not very well thought. For now we latch
735 options, received in the last packet, enqueued
736 by tcp. Feel free to propose better solution.
737 --ANK (980728)
738 */
739 if (np->rxopt.all)
740 /*
741 * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
742 * (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
743 */
744 opt_skb = skb_clone(skb, GFP_ATOMIC);
745
746 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
747 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
748 goto reset;
749 if (opt_skb) {
750 /* XXX This is where we would goto ipv6_pktoptions. */
751 __kfree_skb(opt_skb);
752 }
753 return 0;
754 }
755
756 /*
757 * Step 3: Process LISTEN state
758 * If S.state == LISTEN,
759 * If P.type == Request or P contains a valid Init Cookie option,
760 * (* Must scan the packet's options to check for Init
761 * Cookies. Only Init Cookies are processed here,
762 * however; other options are processed in Step 8. This
763 * scan need only be performed if the endpoint uses Init
764 * Cookies *)
765 * (* Generate a new socket and switch to that socket *)
766 * Set S := new socket for this port pair
767 * S.state = RESPOND
768 * Choose S.ISS (initial seqno) or set from Init Cookies
769 * Initialize S.GAR := S.ISS
770 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
771 * Continue with S.state == RESPOND
772 * (* A Response packet will be generated in Step 11 *)
773 * Otherwise,
774 * Generate Reset(No Connection) unless P.type == Reset
775 * Drop packet and return
776 *
777 * NOTE: the check for the packet types is done in
778 * dccp_rcv_state_process
779 */
780 if (sk->sk_state == DCCP_LISTEN) {
781 struct sock *nsk = dccp_v6_hnd_req(sk, skb);
782
783 if (nsk == NULL)
784 goto discard;
785 /*
786 * Queue it on the new socket if the new socket is active,
787 * otherwise we just shortcircuit this and continue with
788 * the new socket..
789 */
790 if (nsk != sk) {
791 if (dccp_child_process(sk, nsk, skb))
792 goto reset;
793 if (opt_skb != NULL)
794 __kfree_skb(opt_skb);
795 return 0;
796 }
797 }
798
799 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
800 goto reset;
801 if (opt_skb) {
802 /* XXX This is where we would goto ipv6_pktoptions. */
803 __kfree_skb(opt_skb);
804 }
805 return 0;
806
807 reset:
808 dccp_v6_ctl_send_reset(sk, skb);
809 discard:
810 if (opt_skb != NULL)
811 __kfree_skb(opt_skb);
812 kfree_skb(skb);
813 return 0;
814 }
815
816 static int dccp_v6_rcv(struct sk_buff **pskb)
817 {
818 const struct dccp_hdr *dh;
819 struct sk_buff *skb = *pskb;
820 struct sock *sk;
821 int min_cov;
822
823 /* Step 1: Check header basics */
824
825 if (dccp_invalid_packet(skb))
826 goto discard_it;
827
828 /* Step 1: If header checksum is incorrect, drop packet and return. */
829 if (dccp_v6_csum_finish(skb, &skb->nh.ipv6h->saddr,
830 &skb->nh.ipv6h->daddr)) {
831 DCCP_WARN("dropped packet with invalid checksum\n");
832 goto discard_it;
833 }
834
835 dh = dccp_hdr(skb);
836
837 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb);
838 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
839
840 if (dccp_packet_without_ack(skb))
841 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
842 else
843 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
844
845 /* Step 2:
846 * Look up flow ID in table and get corresponding socket */
847 sk = __inet6_lookup(&dccp_hashinfo, &skb->nh.ipv6h->saddr,
848 dh->dccph_sport,
849 &skb->nh.ipv6h->daddr, ntohs(dh->dccph_dport),
850 inet6_iif(skb));
851 /*
852 * Step 2:
853 * If no socket ...
854 */
855 if (sk == NULL) {
856 dccp_pr_debug("failed to look up flow ID in table and "
857 "get corresponding socket\n");
858 goto no_dccp_socket;
859 }
860
861 /*
862 * Step 2:
863 * ... or S.state == TIMEWAIT,
864 * Generate Reset(No Connection) unless P.type == Reset
865 * Drop packet and return
866 */
867 if (sk->sk_state == DCCP_TIME_WAIT) {
868 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
869 inet_twsk_put(inet_twsk(sk));
870 goto no_dccp_socket;
871 }
872
873 /*
874 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
875 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted
876 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
877 */
878 min_cov = dccp_sk(sk)->dccps_pcrlen;
879 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
880 dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
881 dh->dccph_cscov, min_cov);
882 /* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
883 goto discard_and_relse;
884 }
885
886 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
887 goto discard_and_relse;
888
889 return sk_receive_skb(sk, skb, 1) ? -1 : 0;
890
891 no_dccp_socket:
892 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
893 goto discard_it;
894 /*
895 * Step 2:
896 * If no socket ...
897 * Generate Reset(No Connection) unless P.type == Reset
898 * Drop packet and return
899 */
900 if (dh->dccph_type != DCCP_PKT_RESET) {
901 DCCP_SKB_CB(skb)->dccpd_reset_code =
902 DCCP_RESET_CODE_NO_CONNECTION;
903 dccp_v6_ctl_send_reset(sk, skb);
904 }
905
906 discard_it:
907 kfree_skb(skb);
908 return 0;
909
910 discard_and_relse:
911 sock_put(sk);
912 goto discard_it;
913 }
914
915 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
916 int addr_len)
917 {
918 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
919 struct inet_connection_sock *icsk = inet_csk(sk);
920 struct inet_sock *inet = inet_sk(sk);
921 struct ipv6_pinfo *np = inet6_sk(sk);
922 struct dccp_sock *dp = dccp_sk(sk);
923 struct in6_addr *saddr = NULL, *final_p = NULL, final;
924 struct flowi fl;
925 struct dst_entry *dst;
926 int addr_type;
927 int err;
928
929 dp->dccps_role = DCCP_ROLE_CLIENT;
930
931 if (addr_len < SIN6_LEN_RFC2133)
932 return -EINVAL;
933
934 if (usin->sin6_family != AF_INET6)
935 return -EAFNOSUPPORT;
936
937 memset(&fl, 0, sizeof(fl));
938
939 if (np->sndflow) {
940 fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
941 IP6_ECN_flow_init(fl.fl6_flowlabel);
942 if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) {
943 struct ip6_flowlabel *flowlabel;
944 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
945 if (flowlabel == NULL)
946 return -EINVAL;
947 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
948 fl6_sock_release(flowlabel);
949 }
950 }
951 /*
952 * connect() to INADDR_ANY means loopback (BSD'ism).
953 */
954 if (ipv6_addr_any(&usin->sin6_addr))
955 usin->sin6_addr.s6_addr[15] = 1;
956
957 addr_type = ipv6_addr_type(&usin->sin6_addr);
958
959 if (addr_type & IPV6_ADDR_MULTICAST)
960 return -ENETUNREACH;
961
962 if (addr_type & IPV6_ADDR_LINKLOCAL) {
963 if (addr_len >= sizeof(struct sockaddr_in6) &&
964 usin->sin6_scope_id) {
965 /* If interface is set while binding, indices
966 * must coincide.
967 */
968 if (sk->sk_bound_dev_if &&
969 sk->sk_bound_dev_if != usin->sin6_scope_id)
970 return -EINVAL;
971
972 sk->sk_bound_dev_if = usin->sin6_scope_id;
973 }
974
975 /* Connect to link-local address requires an interface */
976 if (!sk->sk_bound_dev_if)
977 return -EINVAL;
978 }
979
980 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
981 np->flow_label = fl.fl6_flowlabel;
982
983 /*
984 * DCCP over IPv4
985 */
986 if (addr_type == IPV6_ADDR_MAPPED) {
987 u32 exthdrlen = icsk->icsk_ext_hdr_len;
988 struct sockaddr_in sin;
989
990 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
991
992 if (__ipv6_only_sock(sk))
993 return -ENETUNREACH;
994
995 sin.sin_family = AF_INET;
996 sin.sin_port = usin->sin6_port;
997 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
998
999 icsk->icsk_af_ops = &dccp_ipv6_mapped;
1000 sk->sk_backlog_rcv = dccp_v4_do_rcv;
1001
1002 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
1003 if (err) {
1004 icsk->icsk_ext_hdr_len = exthdrlen;
1005 icsk->icsk_af_ops = &dccp_ipv6_af_ops;
1006 sk->sk_backlog_rcv = dccp_v6_do_rcv;
1007 goto failure;
1008 } else {
1009 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
1010 inet->saddr);
1011 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
1012 inet->rcv_saddr);
1013 }
1014
1015 return err;
1016 }
1017
1018 if (!ipv6_addr_any(&np->rcv_saddr))
1019 saddr = &np->rcv_saddr;
1020
1021 fl.proto = IPPROTO_DCCP;
1022 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
1023 ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr);
1024 fl.oif = sk->sk_bound_dev_if;
1025 fl.fl_ip_dport = usin->sin6_port;
1026 fl.fl_ip_sport = inet->sport;
1027 security_sk_classify_flow(sk, &fl);
1028
1029 if (np->opt != NULL && np->opt->srcrt != NULL) {
1030 const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
1031
1032 ipv6_addr_copy(&final, &fl.fl6_dst);
1033 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1034 final_p = &final;
1035 }
1036
1037 err = ip6_dst_lookup(sk, &dst, &fl);
1038 if (err)
1039 goto failure;
1040
1041 if (final_p)
1042 ipv6_addr_copy(&fl.fl6_dst, final_p);
1043
1044 err = xfrm_lookup(&dst, &fl, sk, 0);
1045 if (err < 0)
1046 goto failure;
1047
1048 if (saddr == NULL) {
1049 saddr = &fl.fl6_src;
1050 ipv6_addr_copy(&np->rcv_saddr, saddr);
1051 }
1052
1053 /* set the source address */
1054 ipv6_addr_copy(&np->saddr, saddr);
1055 inet->rcv_saddr = LOOPBACK4_IPV6;
1056
1057 __ip6_dst_store(sk, dst, NULL, NULL);
1058
1059 icsk->icsk_ext_hdr_len = 0;
1060 if (np->opt != NULL)
1061 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
1062 np->opt->opt_nflen);
1063
1064 inet->dport = usin->sin6_port;
1065
1066 dccp_set_state(sk, DCCP_REQUESTING);
1067 err = inet6_hash_connect(&dccp_death_row, sk);
1068 if (err)
1069 goto late_failure;
1070
1071 dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
1072 np->daddr.s6_addr32,
1073 inet->sport, inet->dport);
1074 err = dccp_connect(sk);
1075 if (err)
1076 goto late_failure;
1077
1078 return 0;
1079
1080 late_failure:
1081 dccp_set_state(sk, DCCP_CLOSED);
1082 __sk_dst_reset(sk);
1083 failure:
1084 inet->dport = 0;
1085 sk->sk_route_caps = 0;
1086 return err;
1087 }
1088
1089 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1090 .queue_xmit = inet6_csk_xmit,
1091 .send_check = dccp_v6_send_check,
1092 .rebuild_header = inet6_sk_rebuild_header,
1093 .conn_request = dccp_v6_conn_request,
1094 .syn_recv_sock = dccp_v6_request_recv_sock,
1095 .net_header_len = sizeof(struct ipv6hdr),
1096 .setsockopt = ipv6_setsockopt,
1097 .getsockopt = ipv6_getsockopt,
1098 .addr2sockaddr = inet6_csk_addr2sockaddr,
1099 .sockaddr_len = sizeof(struct sockaddr_in6),
1100 #ifdef CONFIG_COMPAT
1101 .compat_setsockopt = compat_ipv6_setsockopt,
1102 .compat_getsockopt = compat_ipv6_getsockopt,
1103 #endif
1104 };
1105
1106 /*
1107 * DCCP over IPv4 via INET6 API
1108 */
1109 static struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1110 .queue_xmit = ip_queue_xmit,
1111 .send_check = dccp_v4_send_check,
1112 .rebuild_header = inet_sk_rebuild_header,
1113 .conn_request = dccp_v6_conn_request,
1114 .syn_recv_sock = dccp_v6_request_recv_sock,
1115 .net_header_len = sizeof(struct iphdr),
1116 .setsockopt = ipv6_setsockopt,
1117 .getsockopt = ipv6_getsockopt,
1118 .addr2sockaddr = inet6_csk_addr2sockaddr,
1119 .sockaddr_len = sizeof(struct sockaddr_in6),
1120 #ifdef CONFIG_COMPAT
1121 .compat_setsockopt = compat_ipv6_setsockopt,
1122 .compat_getsockopt = compat_ipv6_getsockopt,
1123 #endif
1124 };
1125
1126 /* NOTE: A lot of things set to zero explicitly by call to
1127 * sk_alloc() so need not be done here.
1128 */
1129 static int dccp_v6_init_sock(struct sock *sk)
1130 {
1131 static __u8 dccp_v6_ctl_sock_initialized;
1132 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
1133
1134 if (err == 0) {
1135 if (unlikely(!dccp_v6_ctl_sock_initialized))
1136 dccp_v6_ctl_sock_initialized = 1;
1137 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1138 }
1139
1140 return err;
1141 }
1142
1143 static int dccp_v6_destroy_sock(struct sock *sk)
1144 {
1145 dccp_destroy_sock(sk);
1146 return inet6_destroy_sock(sk);
1147 }
1148
1149 static struct timewait_sock_ops dccp6_timewait_sock_ops = {
1150 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
1151 };
1152
1153 static struct proto dccp_v6_prot = {
1154 .name = "DCCPv6",
1155 .owner = THIS_MODULE,
1156 .close = dccp_close,
1157 .connect = dccp_v6_connect,
1158 .disconnect = dccp_disconnect,
1159 .ioctl = dccp_ioctl,
1160 .init = dccp_v6_init_sock,
1161 .setsockopt = dccp_setsockopt,
1162 .getsockopt = dccp_getsockopt,
1163 .sendmsg = dccp_sendmsg,
1164 .recvmsg = dccp_recvmsg,
1165 .backlog_rcv = dccp_v6_do_rcv,
1166 .hash = dccp_v6_hash,
1167 .unhash = dccp_unhash,
1168 .accept = inet_csk_accept,
1169 .get_port = dccp_v6_get_port,
1170 .shutdown = dccp_shutdown,
1171 .destroy = dccp_v6_destroy_sock,
1172 .orphan_count = &dccp_orphan_count,
1173 .max_header = MAX_DCCP_HEADER,
1174 .obj_size = sizeof(struct dccp6_sock),
1175 .rsk_prot = &dccp6_request_sock_ops,
1176 .twsk_prot = &dccp6_timewait_sock_ops,
1177 #ifdef CONFIG_COMPAT
1178 .compat_setsockopt = compat_dccp_setsockopt,
1179 .compat_getsockopt = compat_dccp_getsockopt,
1180 #endif
1181 };
1182
1183 static struct inet6_protocol dccp_v6_protocol = {
1184 .handler = dccp_v6_rcv,
1185 .err_handler = dccp_v6_err,
1186 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1187 };
1188
1189 static struct proto_ops inet6_dccp_ops = {
1190 .family = PF_INET6,
1191 .owner = THIS_MODULE,
1192 .release = inet6_release,
1193 .bind = inet6_bind,
1194 .connect = inet_stream_connect,
1195 .socketpair = sock_no_socketpair,
1196 .accept = inet_accept,
1197 .getname = inet6_getname,
1198 .poll = dccp_poll,
1199 .ioctl = inet6_ioctl,
1200 .listen = inet_dccp_listen,
1201 .shutdown = inet_shutdown,
1202 .setsockopt = sock_common_setsockopt,
1203 .getsockopt = sock_common_getsockopt,
1204 .sendmsg = inet_sendmsg,
1205 .recvmsg = sock_common_recvmsg,
1206 .mmap = sock_no_mmap,
1207 .sendpage = sock_no_sendpage,
1208 #ifdef CONFIG_COMPAT
1209 .compat_setsockopt = compat_sock_common_setsockopt,
1210 .compat_getsockopt = compat_sock_common_getsockopt,
1211 #endif
1212 };
1213
1214 static struct inet_protosw dccp_v6_protosw = {
1215 .type = SOCK_DCCP,
1216 .protocol = IPPROTO_DCCP,
1217 .prot = &dccp_v6_prot,
1218 .ops = &inet6_dccp_ops,
1219 .capability = -1,
1220 .flags = INET_PROTOSW_ICSK,
1221 };
1222
1223 static int __init dccp_v6_init(void)
1224 {
1225 int err = proto_register(&dccp_v6_prot, 1);
1226
1227 if (err != 0)
1228 goto out;
1229
1230 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1231 if (err != 0)
1232 goto out_unregister_proto;
1233
1234 inet6_register_protosw(&dccp_v6_protosw);
1235
1236 err = inet_csk_ctl_sock_create(&dccp_v6_ctl_socket, PF_INET6,
1237 SOCK_DCCP, IPPROTO_DCCP);
1238 if (err != 0)
1239 goto out_unregister_protosw;
1240 out:
1241 return err;
1242 out_unregister_protosw:
1243 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1244 inet6_unregister_protosw(&dccp_v6_protosw);
1245 out_unregister_proto:
1246 proto_unregister(&dccp_v6_prot);
1247 goto out;
1248 }
1249
1250 static void __exit dccp_v6_exit(void)
1251 {
1252 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1253 inet6_unregister_protosw(&dccp_v6_protosw);
1254 proto_unregister(&dccp_v6_prot);
1255 }
1256
1257 module_init(dccp_v6_init);
1258 module_exit(dccp_v6_exit);
1259
1260 /*
1261 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1262 * values directly, Also cover the case where the protocol is not specified,
1263 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1264 */
1265 MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-33-type-6");
1266 MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-0-type-6");
1267 MODULE_LICENSE("GPL");
1268 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1269 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");
This page took 0.057828 seconds and 5 git commands to generate.