Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / net / sctp / socket.c
1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001-2003 Intel Corp.
6 * Copyright (c) 2001-2002 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
8 *
9 * This file is part of the SCTP kernel implementation
10 *
11 * These functions interface with the sockets layer to implement the
12 * SCTP Extensions for the Sockets API.
13 *
14 * Note that the descriptions from the specification are USER level
15 * functions--this file is the functions which populate the struct proto
16 * for SCTP which is the BOTTOM of the sockets interface.
17 *
18 * This SCTP implementation is free software;
19 * you can redistribute it and/or modify it under the terms of
20 * the GNU General Public License as published by
21 * the Free Software Foundation; either version 2, or (at your option)
22 * any later version.
23 *
24 * This SCTP implementation is distributed in the hope that it
25 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
26 * ************************
27 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
28 * See the GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with GNU CC; see the file COPYING. If not, see
32 * <http://www.gnu.org/licenses/>.
33 *
34 * Please send any bug reports or fixes you make to the
35 * email address(es):
36 * lksctp developers <linux-sctp@vger.kernel.org>
37 *
38 * Written or modified by:
39 * La Monte H.P. Yarroll <piggy@acm.org>
40 * Narasimha Budihal <narsi@refcode.org>
41 * Karl Knutson <karl@athena.chicago.il.us>
42 * Jon Grimm <jgrimm@us.ibm.com>
43 * Xingang Guo <xingang.guo@intel.com>
44 * Daisy Chang <daisyc@us.ibm.com>
45 * Sridhar Samudrala <samudrala@us.ibm.com>
46 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com>
47 * Ardelle Fan <ardelle.fan@intel.com>
48 * Ryan Layer <rmlayer@us.ibm.com>
49 * Anup Pemmaiah <pemmaiah@cc.usu.edu>
50 * Kevin Gao <kevin.gao@intel.com>
51 */
52
53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54
55 #include <linux/types.h>
56 #include <linux/kernel.h>
57 #include <linux/wait.h>
58 #include <linux/time.h>
59 #include <linux/ip.h>
60 #include <linux/capability.h>
61 #include <linux/fcntl.h>
62 #include <linux/poll.h>
63 #include <linux/init.h>
64 #include <linux/crypto.h>
65 #include <linux/slab.h>
66 #include <linux/file.h>
67 #include <linux/compat.h>
68
69 #include <net/ip.h>
70 #include <net/icmp.h>
71 #include <net/route.h>
72 #include <net/ipv6.h>
73 #include <net/inet_common.h>
74 #include <net/busy_poll.h>
75
76 #include <linux/socket.h> /* for sa_family_t */
77 #include <linux/export.h>
78 #include <net/sock.h>
79 #include <net/sctp/sctp.h>
80 #include <net/sctp/sm.h>
81
82 /* Forward declarations for internal helper functions. */
83 static int sctp_writeable(struct sock *sk);
84 static void sctp_wfree(struct sk_buff *skb);
85 static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
86 size_t msg_len);
87 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
88 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
89 static int sctp_wait_for_accept(struct sock *sk, long timeo);
90 static void sctp_wait_for_close(struct sock *sk, long timeo);
91 static void sctp_destruct_sock(struct sock *sk);
92 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
93 union sctp_addr *addr, int len);
94 static int sctp_bindx_add(struct sock *, struct sockaddr *, int);
95 static int sctp_bindx_rem(struct sock *, struct sockaddr *, int);
96 static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int);
97 static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int);
98 static int sctp_send_asconf(struct sctp_association *asoc,
99 struct sctp_chunk *chunk);
100 static int sctp_do_bind(struct sock *, union sctp_addr *, int);
101 static int sctp_autobind(struct sock *sk);
102 static void sctp_sock_migrate(struct sock *, struct sock *,
103 struct sctp_association *, sctp_socket_type_t);
104
105 extern struct kmem_cache *sctp_bucket_cachep;
106 extern long sysctl_sctp_mem[3];
107 extern int sysctl_sctp_rmem[3];
108 extern int sysctl_sctp_wmem[3];
109
110 static int sctp_memory_pressure;
111 static atomic_long_t sctp_memory_allocated;
112 struct percpu_counter sctp_sockets_allocated;
113
114 static void sctp_enter_memory_pressure(struct sock *sk)
115 {
116 sctp_memory_pressure = 1;
117 }
118
119
120 /* Get the sndbuf space available at the time on the association. */
121 static inline int sctp_wspace(struct sctp_association *asoc)
122 {
123 int amt;
124
125 if (asoc->ep->sndbuf_policy)
126 amt = asoc->sndbuf_used;
127 else
128 amt = sk_wmem_alloc_get(asoc->base.sk);
129
130 if (amt >= asoc->base.sk->sk_sndbuf) {
131 if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK)
132 amt = 0;
133 else {
134 amt = sk_stream_wspace(asoc->base.sk);
135 if (amt < 0)
136 amt = 0;
137 }
138 } else {
139 amt = asoc->base.sk->sk_sndbuf - amt;
140 }
141 return amt;
142 }
143
144 /* Increment the used sndbuf space count of the corresponding association by
145 * the size of the outgoing data chunk.
146 * Also, set the skb destructor for sndbuf accounting later.
147 *
148 * Since it is always 1-1 between chunk and skb, and also a new skb is always
149 * allocated for chunk bundling in sctp_packet_transmit(), we can use the
150 * destructor in the data chunk skb for the purpose of the sndbuf space
151 * tracking.
152 */
153 static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
154 {
155 struct sctp_association *asoc = chunk->asoc;
156 struct sock *sk = asoc->base.sk;
157
158 /* The sndbuf space is tracked per association. */
159 sctp_association_hold(asoc);
160
161 skb_set_owner_w(chunk->skb, sk);
162
163 chunk->skb->destructor = sctp_wfree;
164 /* Save the chunk pointer in skb for sctp_wfree to use later. */
165 skb_shinfo(chunk->skb)->destructor_arg = chunk;
166
167 asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) +
168 sizeof(struct sk_buff) +
169 sizeof(struct sctp_chunk);
170
171 atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
172 sk->sk_wmem_queued += chunk->skb->truesize;
173 sk_mem_charge(sk, chunk->skb->truesize);
174 }
175
176 /* Verify that this is a valid address. */
177 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
178 int len)
179 {
180 struct sctp_af *af;
181
182 /* Verify basic sockaddr. */
183 af = sctp_sockaddr_af(sctp_sk(sk), addr, len);
184 if (!af)
185 return -EINVAL;
186
187 /* Is this a valid SCTP address? */
188 if (!af->addr_valid(addr, sctp_sk(sk), NULL))
189 return -EINVAL;
190
191 if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr)))
192 return -EINVAL;
193
194 return 0;
195 }
196
197 /* Look up the association by its id. If this is not a UDP-style
198 * socket, the ID field is always ignored.
199 */
200 struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
201 {
202 struct sctp_association *asoc = NULL;
203
204 /* If this is not a UDP-style socket, assoc id should be ignored. */
205 if (!sctp_style(sk, UDP)) {
206 /* Return NULL if the socket state is not ESTABLISHED. It
207 * could be a TCP-style listening socket or a socket which
208 * hasn't yet called connect() to establish an association.
209 */
210 if (!sctp_sstate(sk, ESTABLISHED))
211 return NULL;
212
213 /* Get the first and the only association from the list. */
214 if (!list_empty(&sctp_sk(sk)->ep->asocs))
215 asoc = list_entry(sctp_sk(sk)->ep->asocs.next,
216 struct sctp_association, asocs);
217 return asoc;
218 }
219
220 /* Otherwise this is a UDP-style socket. */
221 if (!id || (id == (sctp_assoc_t)-1))
222 return NULL;
223
224 spin_lock_bh(&sctp_assocs_id_lock);
225 asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
226 spin_unlock_bh(&sctp_assocs_id_lock);
227
228 if (!asoc || (asoc->base.sk != sk) || asoc->base.dead)
229 return NULL;
230
231 return asoc;
232 }
233
234 /* Look up the transport from an address and an assoc id. If both address and
235 * id are specified, the associations matching the address and the id should be
236 * the same.
237 */
238 static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
239 struct sockaddr_storage *addr,
240 sctp_assoc_t id)
241 {
242 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
243 struct sctp_transport *transport;
244 union sctp_addr *laddr = (union sctp_addr *)addr;
245
246 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
247 laddr,
248 &transport);
249
250 if (!addr_asoc)
251 return NULL;
252
253 id_asoc = sctp_id2assoc(sk, id);
254 if (id_asoc && (id_asoc != addr_asoc))
255 return NULL;
256
257 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk),
258 (union sctp_addr *)addr);
259
260 return transport;
261 }
262
263 /* API 3.1.2 bind() - UDP Style Syntax
264 * The syntax of bind() is,
265 *
266 * ret = bind(int sd, struct sockaddr *addr, int addrlen);
267 *
268 * sd - the socket descriptor returned by socket().
269 * addr - the address structure (struct sockaddr_in or struct
270 * sockaddr_in6 [RFC 2553]),
271 * addr_len - the size of the address structure.
272 */
273 static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
274 {
275 int retval = 0;
276
277 lock_sock(sk);
278
279 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk,
280 addr, addr_len);
281
282 /* Disallow binding twice. */
283 if (!sctp_sk(sk)->ep->base.bind_addr.port)
284 retval = sctp_do_bind(sk, (union sctp_addr *)addr,
285 addr_len);
286 else
287 retval = -EINVAL;
288
289 release_sock(sk);
290
291 return retval;
292 }
293
294 static long sctp_get_port_local(struct sock *, union sctp_addr *);
295
296 /* Verify this is a valid sockaddr. */
297 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
298 union sctp_addr *addr, int len)
299 {
300 struct sctp_af *af;
301
302 /* Check minimum size. */
303 if (len < sizeof (struct sockaddr))
304 return NULL;
305
306 /* V4 mapped address are really of AF_INET family */
307 if (addr->sa.sa_family == AF_INET6 &&
308 ipv6_addr_v4mapped(&addr->v6.sin6_addr)) {
309 if (!opt->pf->af_supported(AF_INET, opt))
310 return NULL;
311 } else {
312 /* Does this PF support this AF? */
313 if (!opt->pf->af_supported(addr->sa.sa_family, opt))
314 return NULL;
315 }
316
317 /* If we get this far, af is valid. */
318 af = sctp_get_af_specific(addr->sa.sa_family);
319
320 if (len < af->sockaddr_len)
321 return NULL;
322
323 return af;
324 }
325
326 /* Bind a local address either to an endpoint or to an association. */
327 static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
328 {
329 struct net *net = sock_net(sk);
330 struct sctp_sock *sp = sctp_sk(sk);
331 struct sctp_endpoint *ep = sp->ep;
332 struct sctp_bind_addr *bp = &ep->base.bind_addr;
333 struct sctp_af *af;
334 unsigned short snum;
335 int ret = 0;
336
337 /* Common sockaddr verification. */
338 af = sctp_sockaddr_af(sp, addr, len);
339 if (!af) {
340 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n",
341 __func__, sk, addr, len);
342 return -EINVAL;
343 }
344
345 snum = ntohs(addr->v4.sin_port);
346
347 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n",
348 __func__, sk, &addr->sa, bp->port, snum, len);
349
350 /* PF specific bind() address verification. */
351 if (!sp->pf->bind_verify(sp, addr))
352 return -EADDRNOTAVAIL;
353
354 /* We must either be unbound, or bind to the same port.
355 * It's OK to allow 0 ports if we are already bound.
356 * We'll just inhert an already bound port in this case
357 */
358 if (bp->port) {
359 if (!snum)
360 snum = bp->port;
361 else if (snum != bp->port) {
362 pr_debug("%s: new port %d doesn't match existing port "
363 "%d\n", __func__, snum, bp->port);
364 return -EINVAL;
365 }
366 }
367
368 if (snum && snum < PROT_SOCK &&
369 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
370 return -EACCES;
371
372 /* See if the address matches any of the addresses we may have
373 * already bound before checking against other endpoints.
374 */
375 if (sctp_bind_addr_match(bp, addr, sp))
376 return -EINVAL;
377
378 /* Make sure we are allowed to bind here.
379 * The function sctp_get_port_local() does duplicate address
380 * detection.
381 */
382 addr->v4.sin_port = htons(snum);
383 if ((ret = sctp_get_port_local(sk, addr))) {
384 return -EADDRINUSE;
385 }
386
387 /* Refresh ephemeral port. */
388 if (!bp->port)
389 bp->port = inet_sk(sk)->inet_num;
390
391 /* Add the address to the bind address list.
392 * Use GFP_ATOMIC since BHs will be disabled.
393 */
394 ret = sctp_add_bind_addr(bp, addr, SCTP_ADDR_SRC, GFP_ATOMIC);
395
396 /* Copy back into socket for getsockname() use. */
397 if (!ret) {
398 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num);
399 sp->pf->to_sk_saddr(addr, sk);
400 }
401
402 return ret;
403 }
404
405 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
406 *
407 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged
408 * at any one time. If a sender, after sending an ASCONF chunk, decides
409 * it needs to transfer another ASCONF Chunk, it MUST wait until the
410 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a
411 * subsequent ASCONF. Note this restriction binds each side, so at any
412 * time two ASCONF may be in-transit on any given association (one sent
413 * from each endpoint).
414 */
415 static int sctp_send_asconf(struct sctp_association *asoc,
416 struct sctp_chunk *chunk)
417 {
418 struct net *net = sock_net(asoc->base.sk);
419 int retval = 0;
420
421 /* If there is an outstanding ASCONF chunk, queue it for later
422 * transmission.
423 */
424 if (asoc->addip_last_asconf) {
425 list_add_tail(&chunk->list, &asoc->addip_chunk_list);
426 goto out;
427 }
428
429 /* Hold the chunk until an ASCONF_ACK is received. */
430 sctp_chunk_hold(chunk);
431 retval = sctp_primitive_ASCONF(net, asoc, chunk);
432 if (retval)
433 sctp_chunk_free(chunk);
434 else
435 asoc->addip_last_asconf = chunk;
436
437 out:
438 return retval;
439 }
440
441 /* Add a list of addresses as bind addresses to local endpoint or
442 * association.
443 *
444 * Basically run through each address specified in the addrs/addrcnt
445 * array/length pair, determine if it is IPv6 or IPv4 and call
446 * sctp_do_bind() on it.
447 *
448 * If any of them fails, then the operation will be reversed and the
449 * ones that were added will be removed.
450 *
451 * Only sctp_setsockopt_bindx() is supposed to call this function.
452 */
453 static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
454 {
455 int cnt;
456 int retval = 0;
457 void *addr_buf;
458 struct sockaddr *sa_addr;
459 struct sctp_af *af;
460
461 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk,
462 addrs, addrcnt);
463
464 addr_buf = addrs;
465 for (cnt = 0; cnt < addrcnt; cnt++) {
466 /* The list may contain either IPv4 or IPv6 address;
467 * determine the address length for walking thru the list.
468 */
469 sa_addr = addr_buf;
470 af = sctp_get_af_specific(sa_addr->sa_family);
471 if (!af) {
472 retval = -EINVAL;
473 goto err_bindx_add;
474 }
475
476 retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr,
477 af->sockaddr_len);
478
479 addr_buf += af->sockaddr_len;
480
481 err_bindx_add:
482 if (retval < 0) {
483 /* Failed. Cleanup the ones that have been added */
484 if (cnt > 0)
485 sctp_bindx_rem(sk, addrs, cnt);
486 return retval;
487 }
488 }
489
490 return retval;
491 }
492
493 /* Send an ASCONF chunk with Add IP address parameters to all the peers of the
494 * associations that are part of the endpoint indicating that a list of local
495 * addresses are added to the endpoint.
496 *
497 * If any of the addresses is already in the bind address list of the
498 * association, we do not send the chunk for that association. But it will not
499 * affect other associations.
500 *
501 * Only sctp_setsockopt_bindx() is supposed to call this function.
502 */
503 static int sctp_send_asconf_add_ip(struct sock *sk,
504 struct sockaddr *addrs,
505 int addrcnt)
506 {
507 struct net *net = sock_net(sk);
508 struct sctp_sock *sp;
509 struct sctp_endpoint *ep;
510 struct sctp_association *asoc;
511 struct sctp_bind_addr *bp;
512 struct sctp_chunk *chunk;
513 struct sctp_sockaddr_entry *laddr;
514 union sctp_addr *addr;
515 union sctp_addr saveaddr;
516 void *addr_buf;
517 struct sctp_af *af;
518 struct list_head *p;
519 int i;
520 int retval = 0;
521
522 if (!net->sctp.addip_enable)
523 return retval;
524
525 sp = sctp_sk(sk);
526 ep = sp->ep;
527
528 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
529 __func__, sk, addrs, addrcnt);
530
531 list_for_each_entry(asoc, &ep->asocs, asocs) {
532 if (!asoc->peer.asconf_capable)
533 continue;
534
535 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP)
536 continue;
537
538 if (!sctp_state(asoc, ESTABLISHED))
539 continue;
540
541 /* Check if any address in the packed array of addresses is
542 * in the bind address list of the association. If so,
543 * do not send the asconf chunk to its peer, but continue with
544 * other associations.
545 */
546 addr_buf = addrs;
547 for (i = 0; i < addrcnt; i++) {
548 addr = addr_buf;
549 af = sctp_get_af_specific(addr->v4.sin_family);
550 if (!af) {
551 retval = -EINVAL;
552 goto out;
553 }
554
555 if (sctp_assoc_lookup_laddr(asoc, addr))
556 break;
557
558 addr_buf += af->sockaddr_len;
559 }
560 if (i < addrcnt)
561 continue;
562
563 /* Use the first valid address in bind addr list of
564 * association as Address Parameter of ASCONF CHUNK.
565 */
566 bp = &asoc->base.bind_addr;
567 p = bp->address_list.next;
568 laddr = list_entry(p, struct sctp_sockaddr_entry, list);
569 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs,
570 addrcnt, SCTP_PARAM_ADD_IP);
571 if (!chunk) {
572 retval = -ENOMEM;
573 goto out;
574 }
575
576 /* Add the new addresses to the bind address list with
577 * use_as_src set to 0.
578 */
579 addr_buf = addrs;
580 for (i = 0; i < addrcnt; i++) {
581 addr = addr_buf;
582 af = sctp_get_af_specific(addr->v4.sin_family);
583 memcpy(&saveaddr, addr, af->sockaddr_len);
584 retval = sctp_add_bind_addr(bp, &saveaddr,
585 SCTP_ADDR_NEW, GFP_ATOMIC);
586 addr_buf += af->sockaddr_len;
587 }
588 if (asoc->src_out_of_asoc_ok) {
589 struct sctp_transport *trans;
590
591 list_for_each_entry(trans,
592 &asoc->peer.transport_addr_list, transports) {
593 /* Clear the source and route cache */
594 dst_release(trans->dst);
595 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32,
596 2*asoc->pathmtu, 4380));
597 trans->ssthresh = asoc->peer.i.a_rwnd;
598 trans->rto = asoc->rto_initial;
599 sctp_max_rto(asoc, trans);
600 trans->rtt = trans->srtt = trans->rttvar = 0;
601 sctp_transport_route(trans, NULL,
602 sctp_sk(asoc->base.sk));
603 }
604 }
605 retval = sctp_send_asconf(asoc, chunk);
606 }
607
608 out:
609 return retval;
610 }
611
612 /* Remove a list of addresses from bind addresses list. Do not remove the
613 * last address.
614 *
615 * Basically run through each address specified in the addrs/addrcnt
616 * array/length pair, determine if it is IPv6 or IPv4 and call
617 * sctp_del_bind() on it.
618 *
619 * If any of them fails, then the operation will be reversed and the
620 * ones that were removed will be added back.
621 *
622 * At least one address has to be left; if only one address is
623 * available, the operation will return -EBUSY.
624 *
625 * Only sctp_setsockopt_bindx() is supposed to call this function.
626 */
627 static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
628 {
629 struct sctp_sock *sp = sctp_sk(sk);
630 struct sctp_endpoint *ep = sp->ep;
631 int cnt;
632 struct sctp_bind_addr *bp = &ep->base.bind_addr;
633 int retval = 0;
634 void *addr_buf;
635 union sctp_addr *sa_addr;
636 struct sctp_af *af;
637
638 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
639 __func__, sk, addrs, addrcnt);
640
641 addr_buf = addrs;
642 for (cnt = 0; cnt < addrcnt; cnt++) {
643 /* If the bind address list is empty or if there is only one
644 * bind address, there is nothing more to be removed (we need
645 * at least one address here).
646 */
647 if (list_empty(&bp->address_list) ||
648 (sctp_list_single_entry(&bp->address_list))) {
649 retval = -EBUSY;
650 goto err_bindx_rem;
651 }
652
653 sa_addr = addr_buf;
654 af = sctp_get_af_specific(sa_addr->sa.sa_family);
655 if (!af) {
656 retval = -EINVAL;
657 goto err_bindx_rem;
658 }
659
660 if (!af->addr_valid(sa_addr, sp, NULL)) {
661 retval = -EADDRNOTAVAIL;
662 goto err_bindx_rem;
663 }
664
665 if (sa_addr->v4.sin_port &&
666 sa_addr->v4.sin_port != htons(bp->port)) {
667 retval = -EINVAL;
668 goto err_bindx_rem;
669 }
670
671 if (!sa_addr->v4.sin_port)
672 sa_addr->v4.sin_port = htons(bp->port);
673
674 /* FIXME - There is probably a need to check if sk->sk_saddr and
675 * sk->sk_rcv_addr are currently set to one of the addresses to
676 * be removed. This is something which needs to be looked into
677 * when we are fixing the outstanding issues with multi-homing
678 * socket routing and failover schemes. Refer to comments in
679 * sctp_do_bind(). -daisy
680 */
681 retval = sctp_del_bind_addr(bp, sa_addr);
682
683 addr_buf += af->sockaddr_len;
684 err_bindx_rem:
685 if (retval < 0) {
686 /* Failed. Add the ones that has been removed back */
687 if (cnt > 0)
688 sctp_bindx_add(sk, addrs, cnt);
689 return retval;
690 }
691 }
692
693 return retval;
694 }
695
696 /* Send an ASCONF chunk with Delete IP address parameters to all the peers of
697 * the associations that are part of the endpoint indicating that a list of
698 * local addresses are removed from the endpoint.
699 *
700 * If any of the addresses is already in the bind address list of the
701 * association, we do not send the chunk for that association. But it will not
702 * affect other associations.
703 *
704 * Only sctp_setsockopt_bindx() is supposed to call this function.
705 */
706 static int sctp_send_asconf_del_ip(struct sock *sk,
707 struct sockaddr *addrs,
708 int addrcnt)
709 {
710 struct net *net = sock_net(sk);
711 struct sctp_sock *sp;
712 struct sctp_endpoint *ep;
713 struct sctp_association *asoc;
714 struct sctp_transport *transport;
715 struct sctp_bind_addr *bp;
716 struct sctp_chunk *chunk;
717 union sctp_addr *laddr;
718 void *addr_buf;
719 struct sctp_af *af;
720 struct sctp_sockaddr_entry *saddr;
721 int i;
722 int retval = 0;
723 int stored = 0;
724
725 chunk = NULL;
726 if (!net->sctp.addip_enable)
727 return retval;
728
729 sp = sctp_sk(sk);
730 ep = sp->ep;
731
732 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
733 __func__, sk, addrs, addrcnt);
734
735 list_for_each_entry(asoc, &ep->asocs, asocs) {
736
737 if (!asoc->peer.asconf_capable)
738 continue;
739
740 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP)
741 continue;
742
743 if (!sctp_state(asoc, ESTABLISHED))
744 continue;
745
746 /* Check if any address in the packed array of addresses is
747 * not present in the bind address list of the association.
748 * If so, do not send the asconf chunk to its peer, but
749 * continue with other associations.
750 */
751 addr_buf = addrs;
752 for (i = 0; i < addrcnt; i++) {
753 laddr = addr_buf;
754 af = sctp_get_af_specific(laddr->v4.sin_family);
755 if (!af) {
756 retval = -EINVAL;
757 goto out;
758 }
759
760 if (!sctp_assoc_lookup_laddr(asoc, laddr))
761 break;
762
763 addr_buf += af->sockaddr_len;
764 }
765 if (i < addrcnt)
766 continue;
767
768 /* Find one address in the association's bind address list
769 * that is not in the packed array of addresses. This is to
770 * make sure that we do not delete all the addresses in the
771 * association.
772 */
773 bp = &asoc->base.bind_addr;
774 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs,
775 addrcnt, sp);
776 if ((laddr == NULL) && (addrcnt == 1)) {
777 if (asoc->asconf_addr_del_pending)
778 continue;
779 asoc->asconf_addr_del_pending =
780 kzalloc(sizeof(union sctp_addr), GFP_ATOMIC);
781 if (asoc->asconf_addr_del_pending == NULL) {
782 retval = -ENOMEM;
783 goto out;
784 }
785 asoc->asconf_addr_del_pending->sa.sa_family =
786 addrs->sa_family;
787 asoc->asconf_addr_del_pending->v4.sin_port =
788 htons(bp->port);
789 if (addrs->sa_family == AF_INET) {
790 struct sockaddr_in *sin;
791
792 sin = (struct sockaddr_in *)addrs;
793 asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr;
794 } else if (addrs->sa_family == AF_INET6) {
795 struct sockaddr_in6 *sin6;
796
797 sin6 = (struct sockaddr_in6 *)addrs;
798 asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr;
799 }
800
801 pr_debug("%s: keep the last address asoc:%p %pISc at %p\n",
802 __func__, asoc, &asoc->asconf_addr_del_pending->sa,
803 asoc->asconf_addr_del_pending);
804
805 asoc->src_out_of_asoc_ok = 1;
806 stored = 1;
807 goto skip_mkasconf;
808 }
809
810 if (laddr == NULL)
811 return -EINVAL;
812
813 /* We do not need RCU protection throughout this loop
814 * because this is done under a socket lock from the
815 * setsockopt call.
816 */
817 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt,
818 SCTP_PARAM_DEL_IP);
819 if (!chunk) {
820 retval = -ENOMEM;
821 goto out;
822 }
823
824 skip_mkasconf:
825 /* Reset use_as_src flag for the addresses in the bind address
826 * list that are to be deleted.
827 */
828 addr_buf = addrs;
829 for (i = 0; i < addrcnt; i++) {
830 laddr = addr_buf;
831 af = sctp_get_af_specific(laddr->v4.sin_family);
832 list_for_each_entry(saddr, &bp->address_list, list) {
833 if (sctp_cmp_addr_exact(&saddr->a, laddr))
834 saddr->state = SCTP_ADDR_DEL;
835 }
836 addr_buf += af->sockaddr_len;
837 }
838
839 /* Update the route and saddr entries for all the transports
840 * as some of the addresses in the bind address list are
841 * about to be deleted and cannot be used as source addresses.
842 */
843 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
844 transports) {
845 dst_release(transport->dst);
846 sctp_transport_route(transport, NULL,
847 sctp_sk(asoc->base.sk));
848 }
849
850 if (stored)
851 /* We don't need to transmit ASCONF */
852 continue;
853 retval = sctp_send_asconf(asoc, chunk);
854 }
855 out:
856 return retval;
857 }
858
859 /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */
860 int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
861 {
862 struct sock *sk = sctp_opt2sk(sp);
863 union sctp_addr *addr;
864 struct sctp_af *af;
865
866 /* It is safe to write port space in caller. */
867 addr = &addrw->a;
868 addr->v4.sin_port = htons(sp->ep->base.bind_addr.port);
869 af = sctp_get_af_specific(addr->sa.sa_family);
870 if (!af)
871 return -EINVAL;
872 if (sctp_verify_addr(sk, addr, af->sockaddr_len))
873 return -EINVAL;
874
875 if (addrw->state == SCTP_ADDR_NEW)
876 return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1);
877 else
878 return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1);
879 }
880
881 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
882 *
883 * API 8.1
884 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt,
885 * int flags);
886 *
887 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
888 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
889 * or IPv6 addresses.
890 *
891 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
892 * Section 3.1.2 for this usage.
893 *
894 * addrs is a pointer to an array of one or more socket addresses. Each
895 * address is contained in its appropriate structure (i.e. struct
896 * sockaddr_in or struct sockaddr_in6) the family of the address type
897 * must be used to distinguish the address length (note that this
898 * representation is termed a "packed array" of addresses). The caller
899 * specifies the number of addresses in the array with addrcnt.
900 *
901 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns
902 * -1, and sets errno to the appropriate error code.
903 *
904 * For SCTP, the port given in each socket address must be the same, or
905 * sctp_bindx() will fail, setting errno to EINVAL.
906 *
907 * The flags parameter is formed from the bitwise OR of zero or more of
908 * the following currently defined flags:
909 *
910 * SCTP_BINDX_ADD_ADDR
911 *
912 * SCTP_BINDX_REM_ADDR
913 *
914 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the
915 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given
916 * addresses from the association. The two flags are mutually exclusive;
917 * if both are given, sctp_bindx() will fail with EINVAL. A caller may
918 * not remove all addresses from an association; sctp_bindx() will
919 * reject such an attempt with EINVAL.
920 *
921 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate
922 * additional addresses with an endpoint after calling bind(). Or use
923 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening
924 * socket is associated with so that no new association accepted will be
925 * associated with those addresses. If the endpoint supports dynamic
926 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a
927 * endpoint to send the appropriate message to the peer to change the
928 * peers address lists.
929 *
930 * Adding and removing addresses from a connected association is
931 * optional functionality. Implementations that do not support this
932 * functionality should return EOPNOTSUPP.
933 *
934 * Basically do nothing but copying the addresses from user to kernel
935 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk.
936 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt()
937 * from userspace.
938 *
939 * We don't use copy_from_user() for optimization: we first do the
940 * sanity checks (buffer size -fast- and access check-healthy
941 * pointer); if all of those succeed, then we can alloc the memory
942 * (expensive operation) needed to copy the data to kernel. Then we do
943 * the copying without checking the user space area
944 * (__copy_from_user()).
945 *
946 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
947 * it.
948 *
949 * sk The sk of the socket
950 * addrs The pointer to the addresses in user land
951 * addrssize Size of the addrs buffer
952 * op Operation to perform (add or remove, see the flags of
953 * sctp_bindx)
954 *
955 * Returns 0 if ok, <0 errno code on error.
956 */
957 static int sctp_setsockopt_bindx(struct sock *sk,
958 struct sockaddr __user *addrs,
959 int addrs_size, int op)
960 {
961 struct sockaddr *kaddrs;
962 int err;
963 int addrcnt = 0;
964 int walk_size = 0;
965 struct sockaddr *sa_addr;
966 void *addr_buf;
967 struct sctp_af *af;
968
969 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n",
970 __func__, sk, addrs, addrs_size, op);
971
972 if (unlikely(addrs_size <= 0))
973 return -EINVAL;
974
975 /* Check the user passed a healthy pointer. */
976 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
977 return -EFAULT;
978
979 /* Alloc space for the address array in kernel memory. */
980 kaddrs = kmalloc(addrs_size, GFP_KERNEL);
981 if (unlikely(!kaddrs))
982 return -ENOMEM;
983
984 if (__copy_from_user(kaddrs, addrs, addrs_size)) {
985 kfree(kaddrs);
986 return -EFAULT;
987 }
988
989 /* Walk through the addrs buffer and count the number of addresses. */
990 addr_buf = kaddrs;
991 while (walk_size < addrs_size) {
992 if (walk_size + sizeof(sa_family_t) > addrs_size) {
993 kfree(kaddrs);
994 return -EINVAL;
995 }
996
997 sa_addr = addr_buf;
998 af = sctp_get_af_specific(sa_addr->sa_family);
999
1000 /* If the address family is not supported or if this address
1001 * causes the address buffer to overflow return EINVAL.
1002 */
1003 if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
1004 kfree(kaddrs);
1005 return -EINVAL;
1006 }
1007 addrcnt++;
1008 addr_buf += af->sockaddr_len;
1009 walk_size += af->sockaddr_len;
1010 }
1011
1012 /* Do the work. */
1013 switch (op) {
1014 case SCTP_BINDX_ADD_ADDR:
1015 err = sctp_bindx_add(sk, kaddrs, addrcnt);
1016 if (err)
1017 goto out;
1018 err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt);
1019 break;
1020
1021 case SCTP_BINDX_REM_ADDR:
1022 err = sctp_bindx_rem(sk, kaddrs, addrcnt);
1023 if (err)
1024 goto out;
1025 err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt);
1026 break;
1027
1028 default:
1029 err = -EINVAL;
1030 break;
1031 }
1032
1033 out:
1034 kfree(kaddrs);
1035
1036 return err;
1037 }
1038
1039 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size)
1040 *
1041 * Common routine for handling connect() and sctp_connectx().
1042 * Connect will come in with just a single address.
1043 */
1044 static int __sctp_connect(struct sock *sk,
1045 struct sockaddr *kaddrs,
1046 int addrs_size,
1047 sctp_assoc_t *assoc_id)
1048 {
1049 struct net *net = sock_net(sk);
1050 struct sctp_sock *sp;
1051 struct sctp_endpoint *ep;
1052 struct sctp_association *asoc = NULL;
1053 struct sctp_association *asoc2;
1054 struct sctp_transport *transport;
1055 union sctp_addr to;
1056 sctp_scope_t scope;
1057 long timeo;
1058 int err = 0;
1059 int addrcnt = 0;
1060 int walk_size = 0;
1061 union sctp_addr *sa_addr = NULL;
1062 void *addr_buf;
1063 unsigned short port;
1064 unsigned int f_flags = 0;
1065
1066 sp = sctp_sk(sk);
1067 ep = sp->ep;
1068
1069 /* connect() cannot be done on a socket that is already in ESTABLISHED
1070 * state - UDP-style peeled off socket or a TCP-style socket that
1071 * is already connected.
1072 * It cannot be done even on a TCP-style listening socket.
1073 */
1074 if (sctp_sstate(sk, ESTABLISHED) ||
1075 (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) {
1076 err = -EISCONN;
1077 goto out_free;
1078 }
1079
1080 /* Walk through the addrs buffer and count the number of addresses. */
1081 addr_buf = kaddrs;
1082 while (walk_size < addrs_size) {
1083 struct sctp_af *af;
1084
1085 if (walk_size + sizeof(sa_family_t) > addrs_size) {
1086 err = -EINVAL;
1087 goto out_free;
1088 }
1089
1090 sa_addr = addr_buf;
1091 af = sctp_get_af_specific(sa_addr->sa.sa_family);
1092
1093 /* If the address family is not supported or if this address
1094 * causes the address buffer to overflow return EINVAL.
1095 */
1096 if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
1097 err = -EINVAL;
1098 goto out_free;
1099 }
1100
1101 port = ntohs(sa_addr->v4.sin_port);
1102
1103 /* Save current address so we can work with it */
1104 memcpy(&to, sa_addr, af->sockaddr_len);
1105
1106 err = sctp_verify_addr(sk, &to, af->sockaddr_len);
1107 if (err)
1108 goto out_free;
1109
1110 /* Make sure the destination port is correctly set
1111 * in all addresses.
1112 */
1113 if (asoc && asoc->peer.port && asoc->peer.port != port) {
1114 err = -EINVAL;
1115 goto out_free;
1116 }
1117
1118 /* Check if there already is a matching association on the
1119 * endpoint (other than the one created here).
1120 */
1121 asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport);
1122 if (asoc2 && asoc2 != asoc) {
1123 if (asoc2->state >= SCTP_STATE_ESTABLISHED)
1124 err = -EISCONN;
1125 else
1126 err = -EALREADY;
1127 goto out_free;
1128 }
1129
1130 /* If we could not find a matching association on the endpoint,
1131 * make sure that there is no peeled-off association matching
1132 * the peer address even on another socket.
1133 */
1134 if (sctp_endpoint_is_peeled_off(ep, &to)) {
1135 err = -EADDRNOTAVAIL;
1136 goto out_free;
1137 }
1138
1139 if (!asoc) {
1140 /* If a bind() or sctp_bindx() is not called prior to
1141 * an sctp_connectx() call, the system picks an
1142 * ephemeral port and will choose an address set
1143 * equivalent to binding with a wildcard address.
1144 */
1145 if (!ep->base.bind_addr.port) {
1146 if (sctp_autobind(sk)) {
1147 err = -EAGAIN;
1148 goto out_free;
1149 }
1150 } else {
1151 /*
1152 * If an unprivileged user inherits a 1-many
1153 * style socket with open associations on a
1154 * privileged port, it MAY be permitted to
1155 * accept new associations, but it SHOULD NOT
1156 * be permitted to open new associations.
1157 */
1158 if (ep->base.bind_addr.port < PROT_SOCK &&
1159 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
1160 err = -EACCES;
1161 goto out_free;
1162 }
1163 }
1164
1165 scope = sctp_scope(&to);
1166 asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
1167 if (!asoc) {
1168 err = -ENOMEM;
1169 goto out_free;
1170 }
1171
1172 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope,
1173 GFP_KERNEL);
1174 if (err < 0) {
1175 goto out_free;
1176 }
1177
1178 }
1179
1180 /* Prime the peer's transport structures. */
1181 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL,
1182 SCTP_UNKNOWN);
1183 if (!transport) {
1184 err = -ENOMEM;
1185 goto out_free;
1186 }
1187
1188 addrcnt++;
1189 addr_buf += af->sockaddr_len;
1190 walk_size += af->sockaddr_len;
1191 }
1192
1193 /* In case the user of sctp_connectx() wants an association
1194 * id back, assign one now.
1195 */
1196 if (assoc_id) {
1197 err = sctp_assoc_set_id(asoc, GFP_KERNEL);
1198 if (err < 0)
1199 goto out_free;
1200 }
1201
1202 err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
1203 if (err < 0) {
1204 goto out_free;
1205 }
1206
1207 /* Initialize sk's dport and daddr for getpeername() */
1208 inet_sk(sk)->inet_dport = htons(asoc->peer.port);
1209 sp->pf->to_sk_daddr(sa_addr, sk);
1210 sk->sk_err = 0;
1211
1212 /* in-kernel sockets don't generally have a file allocated to them
1213 * if all they do is call sock_create_kern().
1214 */
1215 if (sk->sk_socket->file)
1216 f_flags = sk->sk_socket->file->f_flags;
1217
1218 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
1219
1220 err = sctp_wait_for_connect(asoc, &timeo);
1221 if ((err == 0 || err == -EINPROGRESS) && assoc_id)
1222 *assoc_id = asoc->assoc_id;
1223
1224 /* Don't free association on exit. */
1225 asoc = NULL;
1226
1227 out_free:
1228 pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n",
1229 __func__, asoc, kaddrs, err);
1230
1231 if (asoc) {
1232 /* sctp_primitive_ASSOCIATE may have added this association
1233 * To the hash table, try to unhash it, just in case, its a noop
1234 * if it wasn't hashed so we're safe
1235 */
1236 sctp_unhash_established(asoc);
1237 sctp_association_free(asoc);
1238 }
1239 return err;
1240 }
1241
1242 /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt()
1243 *
1244 * API 8.9
1245 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt,
1246 * sctp_assoc_t *asoc);
1247 *
1248 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
1249 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
1250 * or IPv6 addresses.
1251 *
1252 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
1253 * Section 3.1.2 for this usage.
1254 *
1255 * addrs is a pointer to an array of one or more socket addresses. Each
1256 * address is contained in its appropriate structure (i.e. struct
1257 * sockaddr_in or struct sockaddr_in6) the family of the address type
1258 * must be used to distengish the address length (note that this
1259 * representation is termed a "packed array" of addresses). The caller
1260 * specifies the number of addresses in the array with addrcnt.
1261 *
1262 * On success, sctp_connectx() returns 0. It also sets the assoc_id to
1263 * the association id of the new association. On failure, sctp_connectx()
1264 * returns -1, and sets errno to the appropriate error code. The assoc_id
1265 * is not touched by the kernel.
1266 *
1267 * For SCTP, the port given in each socket address must be the same, or
1268 * sctp_connectx() will fail, setting errno to EINVAL.
1269 *
1270 * An application can use sctp_connectx to initiate an association with
1271 * an endpoint that is multi-homed. Much like sctp_bindx() this call
1272 * allows a caller to specify multiple addresses at which a peer can be
1273 * reached. The way the SCTP stack uses the list of addresses to set up
1274 * the association is implementation dependent. This function only
1275 * specifies that the stack will try to make use of all the addresses in
1276 * the list when needed.
1277 *
1278 * Note that the list of addresses passed in is only used for setting up
1279 * the association. It does not necessarily equal the set of addresses
1280 * the peer uses for the resulting association. If the caller wants to
1281 * find out the set of peer addresses, it must use sctp_getpaddrs() to
1282 * retrieve them after the association has been set up.
1283 *
1284 * Basically do nothing but copying the addresses from user to kernel
1285 * land and invoking either sctp_connectx(). This is used for tunneling
1286 * the sctp_connectx() request through sctp_setsockopt() from userspace.
1287 *
1288 * We don't use copy_from_user() for optimization: we first do the
1289 * sanity checks (buffer size -fast- and access check-healthy
1290 * pointer); if all of those succeed, then we can alloc the memory
1291 * (expensive operation) needed to copy the data to kernel. Then we do
1292 * the copying without checking the user space area
1293 * (__copy_from_user()).
1294 *
1295 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
1296 * it.
1297 *
1298 * sk The sk of the socket
1299 * addrs The pointer to the addresses in user land
1300 * addrssize Size of the addrs buffer
1301 *
1302 * Returns >=0 if ok, <0 errno code on error.
1303 */
1304 static int __sctp_setsockopt_connectx(struct sock *sk,
1305 struct sockaddr __user *addrs,
1306 int addrs_size,
1307 sctp_assoc_t *assoc_id)
1308 {
1309 int err = 0;
1310 struct sockaddr *kaddrs;
1311
1312 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
1313 __func__, sk, addrs, addrs_size);
1314
1315 if (unlikely(addrs_size <= 0))
1316 return -EINVAL;
1317
1318 /* Check the user passed a healthy pointer. */
1319 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
1320 return -EFAULT;
1321
1322 /* Alloc space for the address array in kernel memory. */
1323 kaddrs = kmalloc(addrs_size, GFP_KERNEL);
1324 if (unlikely(!kaddrs))
1325 return -ENOMEM;
1326
1327 if (__copy_from_user(kaddrs, addrs, addrs_size)) {
1328 err = -EFAULT;
1329 } else {
1330 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
1331 }
1332
1333 kfree(kaddrs);
1334
1335 return err;
1336 }
1337
1338 /*
1339 * This is an older interface. It's kept for backward compatibility
1340 * to the option that doesn't provide association id.
1341 */
1342 static int sctp_setsockopt_connectx_old(struct sock *sk,
1343 struct sockaddr __user *addrs,
1344 int addrs_size)
1345 {
1346 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL);
1347 }
1348
1349 /*
1350 * New interface for the API. The since the API is done with a socket
1351 * option, to make it simple we feed back the association id is as a return
1352 * indication to the call. Error is always negative and association id is
1353 * always positive.
1354 */
1355 static int sctp_setsockopt_connectx(struct sock *sk,
1356 struct sockaddr __user *addrs,
1357 int addrs_size)
1358 {
1359 sctp_assoc_t assoc_id = 0;
1360 int err = 0;
1361
1362 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id);
1363
1364 if (err)
1365 return err;
1366 else
1367 return assoc_id;
1368 }
1369
1370 /*
1371 * New (hopefully final) interface for the API.
1372 * We use the sctp_getaddrs_old structure so that use-space library
1373 * can avoid any unnecessary allocations. The only different part
1374 * is that we store the actual length of the address buffer into the
1375 * addrs_num structure member. That way we can re-use the existing
1376 * code.
1377 */
1378 #ifdef CONFIG_COMPAT
1379 struct compat_sctp_getaddrs_old {
1380 sctp_assoc_t assoc_id;
1381 s32 addr_num;
1382 compat_uptr_t addrs; /* struct sockaddr * */
1383 };
1384 #endif
1385
1386 static int sctp_getsockopt_connectx3(struct sock *sk, int len,
1387 char __user *optval,
1388 int __user *optlen)
1389 {
1390 struct sctp_getaddrs_old param;
1391 sctp_assoc_t assoc_id = 0;
1392 int err = 0;
1393
1394 #ifdef CONFIG_COMPAT
1395 if (is_compat_task()) {
1396 struct compat_sctp_getaddrs_old param32;
1397
1398 if (len < sizeof(param32))
1399 return -EINVAL;
1400 if (copy_from_user(&param32, optval, sizeof(param32)))
1401 return -EFAULT;
1402
1403 param.assoc_id = param32.assoc_id;
1404 param.addr_num = param32.addr_num;
1405 param.addrs = compat_ptr(param32.addrs);
1406 } else
1407 #endif
1408 {
1409 if (len < sizeof(param))
1410 return -EINVAL;
1411 if (copy_from_user(&param, optval, sizeof(param)))
1412 return -EFAULT;
1413 }
1414
1415 err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
1416 param.addrs, param.addr_num,
1417 &assoc_id);
1418 if (err == 0 || err == -EINPROGRESS) {
1419 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
1420 return -EFAULT;
1421 if (put_user(sizeof(assoc_id), optlen))
1422 return -EFAULT;
1423 }
1424
1425 return err;
1426 }
1427
1428 /* API 3.1.4 close() - UDP Style Syntax
1429 * Applications use close() to perform graceful shutdown (as described in
1430 * Section 10.1 of [SCTP]) on ALL the associations currently represented
1431 * by a UDP-style socket.
1432 *
1433 * The syntax is
1434 *
1435 * ret = close(int sd);
1436 *
1437 * sd - the socket descriptor of the associations to be closed.
1438 *
1439 * To gracefully shutdown a specific association represented by the
1440 * UDP-style socket, an application should use the sendmsg() call,
1441 * passing no user data, but including the appropriate flag in the
1442 * ancillary data (see Section xxxx).
1443 *
1444 * If sd in the close() call is a branched-off socket representing only
1445 * one association, the shutdown is performed on that association only.
1446 *
1447 * 4.1.6 close() - TCP Style Syntax
1448 *
1449 * Applications use close() to gracefully close down an association.
1450 *
1451 * The syntax is:
1452 *
1453 * int close(int sd);
1454 *
1455 * sd - the socket descriptor of the association to be closed.
1456 *
1457 * After an application calls close() on a socket descriptor, no further
1458 * socket operations will succeed on that descriptor.
1459 *
1460 * API 7.1.4 SO_LINGER
1461 *
1462 * An application using the TCP-style socket can use this option to
1463 * perform the SCTP ABORT primitive. The linger option structure is:
1464 *
1465 * struct linger {
1466 * int l_onoff; // option on/off
1467 * int l_linger; // linger time
1468 * };
1469 *
1470 * To enable the option, set l_onoff to 1. If the l_linger value is set
1471 * to 0, calling close() is the same as the ABORT primitive. If the
1472 * value is set to a negative value, the setsockopt() call will return
1473 * an error. If the value is set to a positive value linger_time, the
1474 * close() can be blocked for at most linger_time ms. If the graceful
1475 * shutdown phase does not finish during this period, close() will
1476 * return but the graceful shutdown phase continues in the system.
1477 */
1478 static void sctp_close(struct sock *sk, long timeout)
1479 {
1480 struct net *net = sock_net(sk);
1481 struct sctp_endpoint *ep;
1482 struct sctp_association *asoc;
1483 struct list_head *pos, *temp;
1484 unsigned int data_was_unread;
1485
1486 pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
1487
1488 lock_sock(sk);
1489 sk->sk_shutdown = SHUTDOWN_MASK;
1490 sk->sk_state = SCTP_SS_CLOSING;
1491
1492 ep = sctp_sk(sk)->ep;
1493
1494 /* Clean up any skbs sitting on the receive queue. */
1495 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
1496 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
1497
1498 /* Walk all associations on an endpoint. */
1499 list_for_each_safe(pos, temp, &ep->asocs) {
1500 asoc = list_entry(pos, struct sctp_association, asocs);
1501
1502 if (sctp_style(sk, TCP)) {
1503 /* A closed association can still be in the list if
1504 * it belongs to a TCP-style listening socket that is
1505 * not yet accepted. If so, free it. If not, send an
1506 * ABORT or SHUTDOWN based on the linger options.
1507 */
1508 if (sctp_state(asoc, CLOSED)) {
1509 sctp_unhash_established(asoc);
1510 sctp_association_free(asoc);
1511 continue;
1512 }
1513 }
1514
1515 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
1516 !skb_queue_empty(&asoc->ulpq.reasm) ||
1517 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
1518 struct sctp_chunk *chunk;
1519
1520 chunk = sctp_make_abort_user(asoc, NULL, 0);
1521 if (chunk)
1522 sctp_primitive_ABORT(net, asoc, chunk);
1523 } else
1524 sctp_primitive_SHUTDOWN(net, asoc, NULL);
1525 }
1526
1527 /* On a TCP-style socket, block for at most linger_time if set. */
1528 if (sctp_style(sk, TCP) && timeout)
1529 sctp_wait_for_close(sk, timeout);
1530
1531 /* This will run the backlog queue. */
1532 release_sock(sk);
1533
1534 /* Supposedly, no process has access to the socket, but
1535 * the net layers still may.
1536 */
1537 local_bh_disable();
1538 bh_lock_sock(sk);
1539
1540 /* Hold the sock, since sk_common_release() will put sock_put()
1541 * and we have just a little more cleanup.
1542 */
1543 sock_hold(sk);
1544 sk_common_release(sk);
1545
1546 bh_unlock_sock(sk);
1547 local_bh_enable();
1548
1549 sock_put(sk);
1550
1551 SCTP_DBG_OBJCNT_DEC(sock);
1552 }
1553
1554 /* Handle EPIPE error. */
1555 static int sctp_error(struct sock *sk, int flags, int err)
1556 {
1557 if (err == -EPIPE)
1558 err = sock_error(sk) ? : -EPIPE;
1559 if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
1560 send_sig(SIGPIPE, current, 0);
1561 return err;
1562 }
1563
1564 /* API 3.1.3 sendmsg() - UDP Style Syntax
1565 *
1566 * An application uses sendmsg() and recvmsg() calls to transmit data to
1567 * and receive data from its peer.
1568 *
1569 * ssize_t sendmsg(int socket, const struct msghdr *message,
1570 * int flags);
1571 *
1572 * socket - the socket descriptor of the endpoint.
1573 * message - pointer to the msghdr structure which contains a single
1574 * user message and possibly some ancillary data.
1575 *
1576 * See Section 5 for complete description of the data
1577 * structures.
1578 *
1579 * flags - flags sent or received with the user message, see Section
1580 * 5 for complete description of the flags.
1581 *
1582 * Note: This function could use a rewrite especially when explicit
1583 * connect support comes in.
1584 */
1585 /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */
1586
1587 static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
1588
1589 static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1590 struct msghdr *msg, size_t msg_len)
1591 {
1592 struct net *net = sock_net(sk);
1593 struct sctp_sock *sp;
1594 struct sctp_endpoint *ep;
1595 struct sctp_association *new_asoc = NULL, *asoc = NULL;
1596 struct sctp_transport *transport, *chunk_tp;
1597 struct sctp_chunk *chunk;
1598 union sctp_addr to;
1599 struct sockaddr *msg_name = NULL;
1600 struct sctp_sndrcvinfo default_sinfo;
1601 struct sctp_sndrcvinfo *sinfo;
1602 struct sctp_initmsg *sinit;
1603 sctp_assoc_t associd = 0;
1604 sctp_cmsgs_t cmsgs = { NULL };
1605 sctp_scope_t scope;
1606 bool fill_sinfo_ttl = false, wait_connect = false;
1607 struct sctp_datamsg *datamsg;
1608 int msg_flags = msg->msg_flags;
1609 __u16 sinfo_flags = 0;
1610 long timeo;
1611 int err;
1612
1613 err = 0;
1614 sp = sctp_sk(sk);
1615 ep = sp->ep;
1616
1617 pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk,
1618 msg, msg_len, ep);
1619
1620 /* We cannot send a message over a TCP-style listening socket. */
1621 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) {
1622 err = -EPIPE;
1623 goto out_nounlock;
1624 }
1625
1626 /* Parse out the SCTP CMSGs. */
1627 err = sctp_msghdr_parse(msg, &cmsgs);
1628 if (err) {
1629 pr_debug("%s: msghdr parse err:%x\n", __func__, err);
1630 goto out_nounlock;
1631 }
1632
1633 /* Fetch the destination address for this packet. This
1634 * address only selects the association--it is not necessarily
1635 * the address we will send to.
1636 * For a peeled-off socket, msg_name is ignored.
1637 */
1638 if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) {
1639 int msg_namelen = msg->msg_namelen;
1640
1641 err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name,
1642 msg_namelen);
1643 if (err)
1644 return err;
1645
1646 if (msg_namelen > sizeof(to))
1647 msg_namelen = sizeof(to);
1648 memcpy(&to, msg->msg_name, msg_namelen);
1649 msg_name = msg->msg_name;
1650 }
1651
1652 sinit = cmsgs.init;
1653 if (cmsgs.sinfo != NULL) {
1654 memset(&default_sinfo, 0, sizeof(default_sinfo));
1655 default_sinfo.sinfo_stream = cmsgs.sinfo->snd_sid;
1656 default_sinfo.sinfo_flags = cmsgs.sinfo->snd_flags;
1657 default_sinfo.sinfo_ppid = cmsgs.sinfo->snd_ppid;
1658 default_sinfo.sinfo_context = cmsgs.sinfo->snd_context;
1659 default_sinfo.sinfo_assoc_id = cmsgs.sinfo->snd_assoc_id;
1660
1661 sinfo = &default_sinfo;
1662 fill_sinfo_ttl = true;
1663 } else {
1664 sinfo = cmsgs.srinfo;
1665 }
1666 /* Did the user specify SNDINFO/SNDRCVINFO? */
1667 if (sinfo) {
1668 sinfo_flags = sinfo->sinfo_flags;
1669 associd = sinfo->sinfo_assoc_id;
1670 }
1671
1672 pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__,
1673 msg_len, sinfo_flags);
1674
1675 /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */
1676 if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) {
1677 err = -EINVAL;
1678 goto out_nounlock;
1679 }
1680
1681 /* If SCTP_EOF is set, no data can be sent. Disallow sending zero
1682 * length messages when SCTP_EOF|SCTP_ABORT is not set.
1683 * If SCTP_ABORT is set, the message length could be non zero with
1684 * the msg_iov set to the user abort reason.
1685 */
1686 if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) ||
1687 (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) {
1688 err = -EINVAL;
1689 goto out_nounlock;
1690 }
1691
1692 /* If SCTP_ADDR_OVER is set, there must be an address
1693 * specified in msg_name.
1694 */
1695 if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) {
1696 err = -EINVAL;
1697 goto out_nounlock;
1698 }
1699
1700 transport = NULL;
1701
1702 pr_debug("%s: about to look up association\n", __func__);
1703
1704 lock_sock(sk);
1705
1706 /* If a msg_name has been specified, assume this is to be used. */
1707 if (msg_name) {
1708 /* Look for a matching association on the endpoint. */
1709 asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport);
1710 if (!asoc) {
1711 /* If we could not find a matching association on the
1712 * endpoint, make sure that it is not a TCP-style
1713 * socket that already has an association or there is
1714 * no peeled-off association on another socket.
1715 */
1716 if ((sctp_style(sk, TCP) &&
1717 sctp_sstate(sk, ESTABLISHED)) ||
1718 sctp_endpoint_is_peeled_off(ep, &to)) {
1719 err = -EADDRNOTAVAIL;
1720 goto out_unlock;
1721 }
1722 }
1723 } else {
1724 asoc = sctp_id2assoc(sk, associd);
1725 if (!asoc) {
1726 err = -EPIPE;
1727 goto out_unlock;
1728 }
1729 }
1730
1731 if (asoc) {
1732 pr_debug("%s: just looked up association:%p\n", __func__, asoc);
1733
1734 /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED
1735 * socket that has an association in CLOSED state. This can
1736 * happen when an accepted socket has an association that is
1737 * already CLOSED.
1738 */
1739 if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) {
1740 err = -EPIPE;
1741 goto out_unlock;
1742 }
1743
1744 if (sinfo_flags & SCTP_EOF) {
1745 pr_debug("%s: shutting down association:%p\n",
1746 __func__, asoc);
1747
1748 sctp_primitive_SHUTDOWN(net, asoc, NULL);
1749 err = 0;
1750 goto out_unlock;
1751 }
1752 if (sinfo_flags & SCTP_ABORT) {
1753
1754 chunk = sctp_make_abort_user(asoc, msg, msg_len);
1755 if (!chunk) {
1756 err = -ENOMEM;
1757 goto out_unlock;
1758 }
1759
1760 pr_debug("%s: aborting association:%p\n",
1761 __func__, asoc);
1762
1763 sctp_primitive_ABORT(net, asoc, chunk);
1764 err = 0;
1765 goto out_unlock;
1766 }
1767 }
1768
1769 /* Do we need to create the association? */
1770 if (!asoc) {
1771 pr_debug("%s: there is no association yet\n", __func__);
1772
1773 if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) {
1774 err = -EINVAL;
1775 goto out_unlock;
1776 }
1777
1778 /* Check for invalid stream against the stream counts,
1779 * either the default or the user specified stream counts.
1780 */
1781 if (sinfo) {
1782 if (!sinit || !sinit->sinit_num_ostreams) {
1783 /* Check against the defaults. */
1784 if (sinfo->sinfo_stream >=
1785 sp->initmsg.sinit_num_ostreams) {
1786 err = -EINVAL;
1787 goto out_unlock;
1788 }
1789 } else {
1790 /* Check against the requested. */
1791 if (sinfo->sinfo_stream >=
1792 sinit->sinit_num_ostreams) {
1793 err = -EINVAL;
1794 goto out_unlock;
1795 }
1796 }
1797 }
1798
1799 /*
1800 * API 3.1.2 bind() - UDP Style Syntax
1801 * If a bind() or sctp_bindx() is not called prior to a
1802 * sendmsg() call that initiates a new association, the
1803 * system picks an ephemeral port and will choose an address
1804 * set equivalent to binding with a wildcard address.
1805 */
1806 if (!ep->base.bind_addr.port) {
1807 if (sctp_autobind(sk)) {
1808 err = -EAGAIN;
1809 goto out_unlock;
1810 }
1811 } else {
1812 /*
1813 * If an unprivileged user inherits a one-to-many
1814 * style socket with open associations on a privileged
1815 * port, it MAY be permitted to accept new associations,
1816 * but it SHOULD NOT be permitted to open new
1817 * associations.
1818 */
1819 if (ep->base.bind_addr.port < PROT_SOCK &&
1820 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
1821 err = -EACCES;
1822 goto out_unlock;
1823 }
1824 }
1825
1826 scope = sctp_scope(&to);
1827 new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
1828 if (!new_asoc) {
1829 err = -ENOMEM;
1830 goto out_unlock;
1831 }
1832 asoc = new_asoc;
1833 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL);
1834 if (err < 0) {
1835 err = -ENOMEM;
1836 goto out_free;
1837 }
1838
1839 /* If the SCTP_INIT ancillary data is specified, set all
1840 * the association init values accordingly.
1841 */
1842 if (sinit) {
1843 if (sinit->sinit_num_ostreams) {
1844 asoc->c.sinit_num_ostreams =
1845 sinit->sinit_num_ostreams;
1846 }
1847 if (sinit->sinit_max_instreams) {
1848 asoc->c.sinit_max_instreams =
1849 sinit->sinit_max_instreams;
1850 }
1851 if (sinit->sinit_max_attempts) {
1852 asoc->max_init_attempts
1853 = sinit->sinit_max_attempts;
1854 }
1855 if (sinit->sinit_max_init_timeo) {
1856 asoc->max_init_timeo =
1857 msecs_to_jiffies(sinit->sinit_max_init_timeo);
1858 }
1859 }
1860
1861 /* Prime the peer's transport structures. */
1862 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN);
1863 if (!transport) {
1864 err = -ENOMEM;
1865 goto out_free;
1866 }
1867 }
1868
1869 /* ASSERT: we have a valid association at this point. */
1870 pr_debug("%s: we have a valid association\n", __func__);
1871
1872 if (!sinfo) {
1873 /* If the user didn't specify SNDINFO/SNDRCVINFO, make up
1874 * one with some defaults.
1875 */
1876 memset(&default_sinfo, 0, sizeof(default_sinfo));
1877 default_sinfo.sinfo_stream = asoc->default_stream;
1878 default_sinfo.sinfo_flags = asoc->default_flags;
1879 default_sinfo.sinfo_ppid = asoc->default_ppid;
1880 default_sinfo.sinfo_context = asoc->default_context;
1881 default_sinfo.sinfo_timetolive = asoc->default_timetolive;
1882 default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc);
1883
1884 sinfo = &default_sinfo;
1885 } else if (fill_sinfo_ttl) {
1886 /* In case SNDINFO was specified, we still need to fill
1887 * it with a default ttl from the assoc here.
1888 */
1889 sinfo->sinfo_timetolive = asoc->default_timetolive;
1890 }
1891
1892 /* API 7.1.7, the sndbuf size per association bounds the
1893 * maximum size of data that can be sent in a single send call.
1894 */
1895 if (msg_len > sk->sk_sndbuf) {
1896 err = -EMSGSIZE;
1897 goto out_free;
1898 }
1899
1900 if (asoc->pmtu_pending)
1901 sctp_assoc_pending_pmtu(sk, asoc);
1902
1903 /* If fragmentation is disabled and the message length exceeds the
1904 * association fragmentation point, return EMSGSIZE. The I-D
1905 * does not specify what this error is, but this looks like
1906 * a great fit.
1907 */
1908 if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) {
1909 err = -EMSGSIZE;
1910 goto out_free;
1911 }
1912
1913 /* Check for invalid stream. */
1914 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) {
1915 err = -EINVAL;
1916 goto out_free;
1917 }
1918
1919 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1920 if (!sctp_wspace(asoc)) {
1921 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
1922 if (err)
1923 goto out_free;
1924 }
1925
1926 /* If an address is passed with the sendto/sendmsg call, it is used
1927 * to override the primary destination address in the TCP model, or
1928 * when SCTP_ADDR_OVER flag is set in the UDP model.
1929 */
1930 if ((sctp_style(sk, TCP) && msg_name) ||
1931 (sinfo_flags & SCTP_ADDR_OVER)) {
1932 chunk_tp = sctp_assoc_lookup_paddr(asoc, &to);
1933 if (!chunk_tp) {
1934 err = -EINVAL;
1935 goto out_free;
1936 }
1937 } else
1938 chunk_tp = NULL;
1939
1940 /* Auto-connect, if we aren't connected already. */
1941 if (sctp_state(asoc, CLOSED)) {
1942 err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
1943 if (err < 0)
1944 goto out_free;
1945
1946 wait_connect = true;
1947 pr_debug("%s: we associated primitively\n", __func__);
1948 }
1949
1950 /* Break the message into multiple chunks of maximum size. */
1951 datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter);
1952 if (IS_ERR(datamsg)) {
1953 err = PTR_ERR(datamsg);
1954 goto out_free;
1955 }
1956
1957 /* Now send the (possibly) fragmented message. */
1958 list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
1959 sctp_chunk_hold(chunk);
1960
1961 /* Do accounting for the write space. */
1962 sctp_set_owner_w(chunk);
1963
1964 chunk->transport = chunk_tp;
1965 }
1966
1967 /* Send it to the lower layers. Note: all chunks
1968 * must either fail or succeed. The lower layer
1969 * works that way today. Keep it that way or this
1970 * breaks.
1971 */
1972 err = sctp_primitive_SEND(net, asoc, datamsg);
1973 /* Did the lower layer accept the chunk? */
1974 if (err) {
1975 sctp_datamsg_free(datamsg);
1976 goto out_free;
1977 }
1978
1979 pr_debug("%s: we sent primitively\n", __func__);
1980
1981 sctp_datamsg_put(datamsg);
1982 err = msg_len;
1983
1984 if (unlikely(wait_connect)) {
1985 timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT);
1986 sctp_wait_for_connect(asoc, &timeo);
1987 }
1988
1989 /* If we are already past ASSOCIATE, the lower
1990 * layers are responsible for association cleanup.
1991 */
1992 goto out_unlock;
1993
1994 out_free:
1995 if (new_asoc) {
1996 sctp_unhash_established(asoc);
1997 sctp_association_free(asoc);
1998 }
1999 out_unlock:
2000 release_sock(sk);
2001
2002 out_nounlock:
2003 return sctp_error(sk, msg_flags, err);
2004
2005 #if 0
2006 do_sock_err:
2007 if (msg_len)
2008 err = msg_len;
2009 else
2010 err = sock_error(sk);
2011 goto out;
2012
2013 do_interrupted:
2014 if (msg_len)
2015 err = msg_len;
2016 goto out;
2017 #endif /* 0 */
2018 }
2019
2020 /* This is an extended version of skb_pull() that removes the data from the
2021 * start of a skb even when data is spread across the list of skb's in the
2022 * frag_list. len specifies the total amount of data that needs to be removed.
2023 * when 'len' bytes could be removed from the skb, it returns 0.
2024 * If 'len' exceeds the total skb length, it returns the no. of bytes that
2025 * could not be removed.
2026 */
2027 static int sctp_skb_pull(struct sk_buff *skb, int len)
2028 {
2029 struct sk_buff *list;
2030 int skb_len = skb_headlen(skb);
2031 int rlen;
2032
2033 if (len <= skb_len) {
2034 __skb_pull(skb, len);
2035 return 0;
2036 }
2037 len -= skb_len;
2038 __skb_pull(skb, skb_len);
2039
2040 skb_walk_frags(skb, list) {
2041 rlen = sctp_skb_pull(list, len);
2042 skb->len -= (len-rlen);
2043 skb->data_len -= (len-rlen);
2044
2045 if (!rlen)
2046 return 0;
2047
2048 len = rlen;
2049 }
2050
2051 return len;
2052 }
2053
2054 /* API 3.1.3 recvmsg() - UDP Style Syntax
2055 *
2056 * ssize_t recvmsg(int socket, struct msghdr *message,
2057 * int flags);
2058 *
2059 * socket - the socket descriptor of the endpoint.
2060 * message - pointer to the msghdr structure which contains a single
2061 * user message and possibly some ancillary data.
2062 *
2063 * See Section 5 for complete description of the data
2064 * structures.
2065 *
2066 * flags - flags sent or received with the user message, see Section
2067 * 5 for complete description of the flags.
2068 */
2069 static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
2070 struct msghdr *msg, size_t len, int noblock,
2071 int flags, int *addr_len)
2072 {
2073 struct sctp_ulpevent *event = NULL;
2074 struct sctp_sock *sp = sctp_sk(sk);
2075 struct sk_buff *skb;
2076 int copied;
2077 int err = 0;
2078 int skb_len;
2079
2080 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, "
2081 "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags,
2082 addr_len);
2083
2084 lock_sock(sk);
2085
2086 if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) {
2087 err = -ENOTCONN;
2088 goto out;
2089 }
2090
2091 skb = sctp_skb_recv_datagram(sk, flags, noblock, &err);
2092 if (!skb)
2093 goto out;
2094
2095 /* Get the total length of the skb including any skb's in the
2096 * frag_list.
2097 */
2098 skb_len = skb->len;
2099
2100 copied = skb_len;
2101 if (copied > len)
2102 copied = len;
2103
2104 err = skb_copy_datagram_msg(skb, 0, msg, copied);
2105
2106 event = sctp_skb2event(skb);
2107
2108 if (err)
2109 goto out_free;
2110
2111 sock_recv_ts_and_drops(msg, sk, skb);
2112 if (sctp_ulpevent_is_notification(event)) {
2113 msg->msg_flags |= MSG_NOTIFICATION;
2114 sp->pf->event_msgname(event, msg->msg_name, addr_len);
2115 } else {
2116 sp->pf->skb_msgname(skb, msg->msg_name, addr_len);
2117 }
2118
2119 /* Check if we allow SCTP_NXTINFO. */
2120 if (sp->recvnxtinfo)
2121 sctp_ulpevent_read_nxtinfo(event, msg, sk);
2122 /* Check if we allow SCTP_RCVINFO. */
2123 if (sp->recvrcvinfo)
2124 sctp_ulpevent_read_rcvinfo(event, msg);
2125 /* Check if we allow SCTP_SNDRCVINFO. */
2126 if (sp->subscribe.sctp_data_io_event)
2127 sctp_ulpevent_read_sndrcvinfo(event, msg);
2128
2129 #if 0
2130 /* FIXME: we should be calling IP/IPv6 layers. */
2131 if (sk->sk_protinfo.af_inet.cmsg_flags)
2132 ip_cmsg_recv(msg, skb);
2133 #endif
2134
2135 err = copied;
2136
2137 /* If skb's length exceeds the user's buffer, update the skb and
2138 * push it back to the receive_queue so that the next call to
2139 * recvmsg() will return the remaining data. Don't set MSG_EOR.
2140 */
2141 if (skb_len > copied) {
2142 msg->msg_flags &= ~MSG_EOR;
2143 if (flags & MSG_PEEK)
2144 goto out_free;
2145 sctp_skb_pull(skb, copied);
2146 skb_queue_head(&sk->sk_receive_queue, skb);
2147
2148 /* When only partial message is copied to the user, increase
2149 * rwnd by that amount. If all the data in the skb is read,
2150 * rwnd is updated when the event is freed.
2151 */
2152 if (!sctp_ulpevent_is_notification(event))
2153 sctp_assoc_rwnd_increase(event->asoc, copied);
2154 goto out;
2155 } else if ((event->msg_flags & MSG_NOTIFICATION) ||
2156 (event->msg_flags & MSG_EOR))
2157 msg->msg_flags |= MSG_EOR;
2158 else
2159 msg->msg_flags &= ~MSG_EOR;
2160
2161 out_free:
2162 if (flags & MSG_PEEK) {
2163 /* Release the skb reference acquired after peeking the skb in
2164 * sctp_skb_recv_datagram().
2165 */
2166 kfree_skb(skb);
2167 } else {
2168 /* Free the event which includes releasing the reference to
2169 * the owner of the skb, freeing the skb and updating the
2170 * rwnd.
2171 */
2172 sctp_ulpevent_free(event);
2173 }
2174 out:
2175 release_sock(sk);
2176 return err;
2177 }
2178
2179 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
2180 *
2181 * This option is a on/off flag. If enabled no SCTP message
2182 * fragmentation will be performed. Instead if a message being sent
2183 * exceeds the current PMTU size, the message will NOT be sent and
2184 * instead a error will be indicated to the user.
2185 */
2186 static int sctp_setsockopt_disable_fragments(struct sock *sk,
2187 char __user *optval,
2188 unsigned int optlen)
2189 {
2190 int val;
2191
2192 if (optlen < sizeof(int))
2193 return -EINVAL;
2194
2195 if (get_user(val, (int __user *)optval))
2196 return -EFAULT;
2197
2198 sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1;
2199
2200 return 0;
2201 }
2202
2203 static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2204 unsigned int optlen)
2205 {
2206 struct sctp_association *asoc;
2207 struct sctp_ulpevent *event;
2208
2209 if (optlen > sizeof(struct sctp_event_subscribe))
2210 return -EINVAL;
2211 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
2212 return -EFAULT;
2213
2214 if (sctp_sk(sk)->subscribe.sctp_data_io_event)
2215 pr_warn_ratelimited(DEPRECATED "%s (pid %d) "
2216 "Requested SCTP_SNDRCVINFO event.\n"
2217 "Use SCTP_RCVINFO through SCTP_RECVRCVINFO option instead.\n",
2218 current->comm, task_pid_nr(current));
2219
2220 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
2221 * if there is no data to be sent or retransmit, the stack will
2222 * immediately send up this notification.
2223 */
2224 if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT,
2225 &sctp_sk(sk)->subscribe)) {
2226 asoc = sctp_id2assoc(sk, 0);
2227
2228 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
2229 event = sctp_ulpevent_make_sender_dry_event(asoc,
2230 GFP_ATOMIC);
2231 if (!event)
2232 return -ENOMEM;
2233
2234 sctp_ulpq_tail_event(&asoc->ulpq, event);
2235 }
2236 }
2237
2238 return 0;
2239 }
2240
2241 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
2242 *
2243 * This socket option is applicable to the UDP-style socket only. When
2244 * set it will cause associations that are idle for more than the
2245 * specified number of seconds to automatically close. An association
2246 * being idle is defined an association that has NOT sent or received
2247 * user data. The special value of '0' indicates that no automatic
2248 * close of any associations should be performed. The option expects an
2249 * integer defining the number of seconds of idle time before an
2250 * association is closed.
2251 */
2252 static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
2253 unsigned int optlen)
2254 {
2255 struct sctp_sock *sp = sctp_sk(sk);
2256 struct net *net = sock_net(sk);
2257
2258 /* Applicable to UDP-style socket only */
2259 if (sctp_style(sk, TCP))
2260 return -EOPNOTSUPP;
2261 if (optlen != sizeof(int))
2262 return -EINVAL;
2263 if (copy_from_user(&sp->autoclose, optval, optlen))
2264 return -EFAULT;
2265
2266 if (sp->autoclose > net->sctp.max_autoclose)
2267 sp->autoclose = net->sctp.max_autoclose;
2268
2269 return 0;
2270 }
2271
2272 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
2273 *
2274 * Applications can enable or disable heartbeats for any peer address of
2275 * an association, modify an address's heartbeat interval, force a
2276 * heartbeat to be sent immediately, and adjust the address's maximum
2277 * number of retransmissions sent before an address is considered
2278 * unreachable. The following structure is used to access and modify an
2279 * address's parameters:
2280 *
2281 * struct sctp_paddrparams {
2282 * sctp_assoc_t spp_assoc_id;
2283 * struct sockaddr_storage spp_address;
2284 * uint32_t spp_hbinterval;
2285 * uint16_t spp_pathmaxrxt;
2286 * uint32_t spp_pathmtu;
2287 * uint32_t spp_sackdelay;
2288 * uint32_t spp_flags;
2289 * };
2290 *
2291 * spp_assoc_id - (one-to-many style socket) This is filled in the
2292 * application, and identifies the association for
2293 * this query.
2294 * spp_address - This specifies which address is of interest.
2295 * spp_hbinterval - This contains the value of the heartbeat interval,
2296 * in milliseconds. If a value of zero
2297 * is present in this field then no changes are to
2298 * be made to this parameter.
2299 * spp_pathmaxrxt - This contains the maximum number of
2300 * retransmissions before this address shall be
2301 * considered unreachable. If a value of zero
2302 * is present in this field then no changes are to
2303 * be made to this parameter.
2304 * spp_pathmtu - When Path MTU discovery is disabled the value
2305 * specified here will be the "fixed" path mtu.
2306 * Note that if the spp_address field is empty
2307 * then all associations on this address will
2308 * have this fixed path mtu set upon them.
2309 *
2310 * spp_sackdelay - When delayed sack is enabled, this value specifies
2311 * the number of milliseconds that sacks will be delayed
2312 * for. This value will apply to all addresses of an
2313 * association if the spp_address field is empty. Note
2314 * also, that if delayed sack is enabled and this
2315 * value is set to 0, no change is made to the last
2316 * recorded delayed sack timer value.
2317 *
2318 * spp_flags - These flags are used to control various features
2319 * on an association. The flag field may contain
2320 * zero or more of the following options.
2321 *
2322 * SPP_HB_ENABLE - Enable heartbeats on the
2323 * specified address. Note that if the address
2324 * field is empty all addresses for the association
2325 * have heartbeats enabled upon them.
2326 *
2327 * SPP_HB_DISABLE - Disable heartbeats on the
2328 * speicifed address. Note that if the address
2329 * field is empty all addresses for the association
2330 * will have their heartbeats disabled. Note also
2331 * that SPP_HB_ENABLE and SPP_HB_DISABLE are
2332 * mutually exclusive, only one of these two should
2333 * be specified. Enabling both fields will have
2334 * undetermined results.
2335 *
2336 * SPP_HB_DEMAND - Request a user initiated heartbeat
2337 * to be made immediately.
2338 *
2339 * SPP_HB_TIME_IS_ZERO - Specify's that the time for
2340 * heartbeat delayis to be set to the value of 0
2341 * milliseconds.
2342 *
2343 * SPP_PMTUD_ENABLE - This field will enable PMTU
2344 * discovery upon the specified address. Note that
2345 * if the address feild is empty then all addresses
2346 * on the association are effected.
2347 *
2348 * SPP_PMTUD_DISABLE - This field will disable PMTU
2349 * discovery upon the specified address. Note that
2350 * if the address feild is empty then all addresses
2351 * on the association are effected. Not also that
2352 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
2353 * exclusive. Enabling both will have undetermined
2354 * results.
2355 *
2356 * SPP_SACKDELAY_ENABLE - Setting this flag turns
2357 * on delayed sack. The time specified in spp_sackdelay
2358 * is used to specify the sack delay for this address. Note
2359 * that if spp_address is empty then all addresses will
2360 * enable delayed sack and take on the sack delay
2361 * value specified in spp_sackdelay.
2362 * SPP_SACKDELAY_DISABLE - Setting this flag turns
2363 * off delayed sack. If the spp_address field is blank then
2364 * delayed sack is disabled for the entire association. Note
2365 * also that this field is mutually exclusive to
2366 * SPP_SACKDELAY_ENABLE, setting both will have undefined
2367 * results.
2368 */
2369 static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2370 struct sctp_transport *trans,
2371 struct sctp_association *asoc,
2372 struct sctp_sock *sp,
2373 int hb_change,
2374 int pmtud_change,
2375 int sackdelay_change)
2376 {
2377 int error;
2378
2379 if (params->spp_flags & SPP_HB_DEMAND && trans) {
2380 struct net *net = sock_net(trans->asoc->base.sk);
2381
2382 error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans);
2383 if (error)
2384 return error;
2385 }
2386
2387 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of
2388 * this field is ignored. Note also that a value of zero indicates
2389 * the current setting should be left unchanged.
2390 */
2391 if (params->spp_flags & SPP_HB_ENABLE) {
2392
2393 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is
2394 * set. This lets us use 0 value when this flag
2395 * is set.
2396 */
2397 if (params->spp_flags & SPP_HB_TIME_IS_ZERO)
2398 params->spp_hbinterval = 0;
2399
2400 if (params->spp_hbinterval ||
2401 (params->spp_flags & SPP_HB_TIME_IS_ZERO)) {
2402 if (trans) {
2403 trans->hbinterval =
2404 msecs_to_jiffies(params->spp_hbinterval);
2405 } else if (asoc) {
2406 asoc->hbinterval =
2407 msecs_to_jiffies(params->spp_hbinterval);
2408 } else {
2409 sp->hbinterval = params->spp_hbinterval;
2410 }
2411 }
2412 }
2413
2414 if (hb_change) {
2415 if (trans) {
2416 trans->param_flags =
2417 (trans->param_flags & ~SPP_HB) | hb_change;
2418 } else if (asoc) {
2419 asoc->param_flags =
2420 (asoc->param_flags & ~SPP_HB) | hb_change;
2421 } else {
2422 sp->param_flags =
2423 (sp->param_flags & ~SPP_HB) | hb_change;
2424 }
2425 }
2426
2427 /* When Path MTU discovery is disabled the value specified here will
2428 * be the "fixed" path mtu (i.e. the value of the spp_flags field must
2429 * include the flag SPP_PMTUD_DISABLE for this field to have any
2430 * effect).
2431 */
2432 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
2433 if (trans) {
2434 trans->pathmtu = params->spp_pathmtu;
2435 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
2436 } else if (asoc) {
2437 asoc->pathmtu = params->spp_pathmtu;
2438 sctp_frag_point(asoc, params->spp_pathmtu);
2439 } else {
2440 sp->pathmtu = params->spp_pathmtu;
2441 }
2442 }
2443
2444 if (pmtud_change) {
2445 if (trans) {
2446 int update = (trans->param_flags & SPP_PMTUD_DISABLE) &&
2447 (params->spp_flags & SPP_PMTUD_ENABLE);
2448 trans->param_flags =
2449 (trans->param_flags & ~SPP_PMTUD) | pmtud_change;
2450 if (update) {
2451 sctp_transport_pmtu(trans, sctp_opt2sk(sp));
2452 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
2453 }
2454 } else if (asoc) {
2455 asoc->param_flags =
2456 (asoc->param_flags & ~SPP_PMTUD) | pmtud_change;
2457 } else {
2458 sp->param_flags =
2459 (sp->param_flags & ~SPP_PMTUD) | pmtud_change;
2460 }
2461 }
2462
2463 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the
2464 * value of this field is ignored. Note also that a value of zero
2465 * indicates the current setting should be left unchanged.
2466 */
2467 if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) {
2468 if (trans) {
2469 trans->sackdelay =
2470 msecs_to_jiffies(params->spp_sackdelay);
2471 } else if (asoc) {
2472 asoc->sackdelay =
2473 msecs_to_jiffies(params->spp_sackdelay);
2474 } else {
2475 sp->sackdelay = params->spp_sackdelay;
2476 }
2477 }
2478
2479 if (sackdelay_change) {
2480 if (trans) {
2481 trans->param_flags =
2482 (trans->param_flags & ~SPP_SACKDELAY) |
2483 sackdelay_change;
2484 } else if (asoc) {
2485 asoc->param_flags =
2486 (asoc->param_flags & ~SPP_SACKDELAY) |
2487 sackdelay_change;
2488 } else {
2489 sp->param_flags =
2490 (sp->param_flags & ~SPP_SACKDELAY) |
2491 sackdelay_change;
2492 }
2493 }
2494
2495 /* Note that a value of zero indicates the current setting should be
2496 left unchanged.
2497 */
2498 if (params->spp_pathmaxrxt) {
2499 if (trans) {
2500 trans->pathmaxrxt = params->spp_pathmaxrxt;
2501 } else if (asoc) {
2502 asoc->pathmaxrxt = params->spp_pathmaxrxt;
2503 } else {
2504 sp->pathmaxrxt = params->spp_pathmaxrxt;
2505 }
2506 }
2507
2508 return 0;
2509 }
2510
2511 static int sctp_setsockopt_peer_addr_params(struct sock *sk,
2512 char __user *optval,
2513 unsigned int optlen)
2514 {
2515 struct sctp_paddrparams params;
2516 struct sctp_transport *trans = NULL;
2517 struct sctp_association *asoc = NULL;
2518 struct sctp_sock *sp = sctp_sk(sk);
2519 int error;
2520 int hb_change, pmtud_change, sackdelay_change;
2521
2522 if (optlen != sizeof(struct sctp_paddrparams))
2523 return -EINVAL;
2524
2525 if (copy_from_user(&params, optval, optlen))
2526 return -EFAULT;
2527
2528 /* Validate flags and value parameters. */
2529 hb_change = params.spp_flags & SPP_HB;
2530 pmtud_change = params.spp_flags & SPP_PMTUD;
2531 sackdelay_change = params.spp_flags & SPP_SACKDELAY;
2532
2533 if (hb_change == SPP_HB ||
2534 pmtud_change == SPP_PMTUD ||
2535 sackdelay_change == SPP_SACKDELAY ||
2536 params.spp_sackdelay > 500 ||
2537 (params.spp_pathmtu &&
2538 params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT))
2539 return -EINVAL;
2540
2541 /* If an address other than INADDR_ANY is specified, and
2542 * no transport is found, then the request is invalid.
2543 */
2544 if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) {
2545 trans = sctp_addr_id2transport(sk, &params.spp_address,
2546 params.spp_assoc_id);
2547 if (!trans)
2548 return -EINVAL;
2549 }
2550
2551 /* Get association, if assoc_id != 0 and the socket is a one
2552 * to many style socket, and an association was not found, then
2553 * the id was invalid.
2554 */
2555 asoc = sctp_id2assoc(sk, params.spp_assoc_id);
2556 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP))
2557 return -EINVAL;
2558
2559 /* Heartbeat demand can only be sent on a transport or
2560 * association, but not a socket.
2561 */
2562 if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc)
2563 return -EINVAL;
2564
2565 /* Process parameters. */
2566 error = sctp_apply_peer_addr_params(&params, trans, asoc, sp,
2567 hb_change, pmtud_change,
2568 sackdelay_change);
2569
2570 if (error)
2571 return error;
2572
2573 /* If changes are for association, also apply parameters to each
2574 * transport.
2575 */
2576 if (!trans && asoc) {
2577 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
2578 transports) {
2579 sctp_apply_peer_addr_params(&params, trans, asoc, sp,
2580 hb_change, pmtud_change,
2581 sackdelay_change);
2582 }
2583 }
2584
2585 return 0;
2586 }
2587
2588 static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags)
2589 {
2590 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE;
2591 }
2592
2593 static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags)
2594 {
2595 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE;
2596 }
2597
2598 /*
2599 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
2600 *
2601 * This option will effect the way delayed acks are performed. This
2602 * option allows you to get or set the delayed ack time, in
2603 * milliseconds. It also allows changing the delayed ack frequency.
2604 * Changing the frequency to 1 disables the delayed sack algorithm. If
2605 * the assoc_id is 0, then this sets or gets the endpoints default
2606 * values. If the assoc_id field is non-zero, then the set or get
2607 * effects the specified association for the one to many model (the
2608 * assoc_id field is ignored by the one to one model). Note that if
2609 * sack_delay or sack_freq are 0 when setting this option, then the
2610 * current values will remain unchanged.
2611 *
2612 * struct sctp_sack_info {
2613 * sctp_assoc_t sack_assoc_id;
2614 * uint32_t sack_delay;
2615 * uint32_t sack_freq;
2616 * };
2617 *
2618 * sack_assoc_id - This parameter, indicates which association the user
2619 * is performing an action upon. Note that if this field's value is
2620 * zero then the endpoints default value is changed (effecting future
2621 * associations only).
2622 *
2623 * sack_delay - This parameter contains the number of milliseconds that
2624 * the user is requesting the delayed ACK timer be set to. Note that
2625 * this value is defined in the standard to be between 200 and 500
2626 * milliseconds.
2627 *
2628 * sack_freq - This parameter contains the number of packets that must
2629 * be received before a sack is sent without waiting for the delay
2630 * timer to expire. The default value for this is 2, setting this
2631 * value to 1 will disable the delayed sack algorithm.
2632 */
2633
2634 static int sctp_setsockopt_delayed_ack(struct sock *sk,
2635 char __user *optval, unsigned int optlen)
2636 {
2637 struct sctp_sack_info params;
2638 struct sctp_transport *trans = NULL;
2639 struct sctp_association *asoc = NULL;
2640 struct sctp_sock *sp = sctp_sk(sk);
2641
2642 if (optlen == sizeof(struct sctp_sack_info)) {
2643 if (copy_from_user(&params, optval, optlen))
2644 return -EFAULT;
2645
2646 if (params.sack_delay == 0 && params.sack_freq == 0)
2647 return 0;
2648 } else if (optlen == sizeof(struct sctp_assoc_value)) {
2649 pr_warn_ratelimited(DEPRECATED
2650 "%s (pid %d) "
2651 "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
2652 "Use struct sctp_sack_info instead\n",
2653 current->comm, task_pid_nr(current));
2654 if (copy_from_user(&params, optval, optlen))
2655 return -EFAULT;
2656
2657 if (params.sack_delay == 0)
2658 params.sack_freq = 1;
2659 else
2660 params.sack_freq = 0;
2661 } else
2662 return -EINVAL;
2663
2664 /* Validate value parameter. */
2665 if (params.sack_delay > 500)
2666 return -EINVAL;
2667
2668 /* Get association, if sack_assoc_id != 0 and the socket is a one
2669 * to many style socket, and an association was not found, then
2670 * the id was invalid.
2671 */
2672 asoc = sctp_id2assoc(sk, params.sack_assoc_id);
2673 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
2674 return -EINVAL;
2675
2676 if (params.sack_delay) {
2677 if (asoc) {
2678 asoc->sackdelay =
2679 msecs_to_jiffies(params.sack_delay);
2680 asoc->param_flags =
2681 sctp_spp_sackdelay_enable(asoc->param_flags);
2682 } else {
2683 sp->sackdelay = params.sack_delay;
2684 sp->param_flags =
2685 sctp_spp_sackdelay_enable(sp->param_flags);
2686 }
2687 }
2688
2689 if (params.sack_freq == 1) {
2690 if (asoc) {
2691 asoc->param_flags =
2692 sctp_spp_sackdelay_disable(asoc->param_flags);
2693 } else {
2694 sp->param_flags =
2695 sctp_spp_sackdelay_disable(sp->param_flags);
2696 }
2697 } else if (params.sack_freq > 1) {
2698 if (asoc) {
2699 asoc->sackfreq = params.sack_freq;
2700 asoc->param_flags =
2701 sctp_spp_sackdelay_enable(asoc->param_flags);
2702 } else {
2703 sp->sackfreq = params.sack_freq;
2704 sp->param_flags =
2705 sctp_spp_sackdelay_enable(sp->param_flags);
2706 }
2707 }
2708
2709 /* If change is for association, also apply to each transport. */
2710 if (asoc) {
2711 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
2712 transports) {
2713 if (params.sack_delay) {
2714 trans->sackdelay =
2715 msecs_to_jiffies(params.sack_delay);
2716 trans->param_flags =
2717 sctp_spp_sackdelay_enable(trans->param_flags);
2718 }
2719 if (params.sack_freq == 1) {
2720 trans->param_flags =
2721 sctp_spp_sackdelay_disable(trans->param_flags);
2722 } else if (params.sack_freq > 1) {
2723 trans->sackfreq = params.sack_freq;
2724 trans->param_flags =
2725 sctp_spp_sackdelay_enable(trans->param_flags);
2726 }
2727 }
2728 }
2729
2730 return 0;
2731 }
2732
2733 /* 7.1.3 Initialization Parameters (SCTP_INITMSG)
2734 *
2735 * Applications can specify protocol parameters for the default association
2736 * initialization. The option name argument to setsockopt() and getsockopt()
2737 * is SCTP_INITMSG.
2738 *
2739 * Setting initialization parameters is effective only on an unconnected
2740 * socket (for UDP-style sockets only future associations are effected
2741 * by the change). With TCP-style sockets, this option is inherited by
2742 * sockets derived from a listener socket.
2743 */
2744 static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen)
2745 {
2746 struct sctp_initmsg sinit;
2747 struct sctp_sock *sp = sctp_sk(sk);
2748
2749 if (optlen != sizeof(struct sctp_initmsg))
2750 return -EINVAL;
2751 if (copy_from_user(&sinit, optval, optlen))
2752 return -EFAULT;
2753
2754 if (sinit.sinit_num_ostreams)
2755 sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams;
2756 if (sinit.sinit_max_instreams)
2757 sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams;
2758 if (sinit.sinit_max_attempts)
2759 sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts;
2760 if (sinit.sinit_max_init_timeo)
2761 sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo;
2762
2763 return 0;
2764 }
2765
2766 /*
2767 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
2768 *
2769 * Applications that wish to use the sendto() system call may wish to
2770 * specify a default set of parameters that would normally be supplied
2771 * through the inclusion of ancillary data. This socket option allows
2772 * such an application to set the default sctp_sndrcvinfo structure.
2773 * The application that wishes to use this socket option simply passes
2774 * in to this call the sctp_sndrcvinfo structure defined in Section
2775 * 5.2.2) The input parameters accepted by this call include
2776 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
2777 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
2778 * to this call if the caller is using the UDP model.
2779 */
2780 static int sctp_setsockopt_default_send_param(struct sock *sk,
2781 char __user *optval,
2782 unsigned int optlen)
2783 {
2784 struct sctp_sock *sp = sctp_sk(sk);
2785 struct sctp_association *asoc;
2786 struct sctp_sndrcvinfo info;
2787
2788 if (optlen != sizeof(info))
2789 return -EINVAL;
2790 if (copy_from_user(&info, optval, optlen))
2791 return -EFAULT;
2792 if (info.sinfo_flags &
2793 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
2794 SCTP_ABORT | SCTP_EOF))
2795 return -EINVAL;
2796
2797 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
2798 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP))
2799 return -EINVAL;
2800 if (asoc) {
2801 asoc->default_stream = info.sinfo_stream;
2802 asoc->default_flags = info.sinfo_flags;
2803 asoc->default_ppid = info.sinfo_ppid;
2804 asoc->default_context = info.sinfo_context;
2805 asoc->default_timetolive = info.sinfo_timetolive;
2806 } else {
2807 sp->default_stream = info.sinfo_stream;
2808 sp->default_flags = info.sinfo_flags;
2809 sp->default_ppid = info.sinfo_ppid;
2810 sp->default_context = info.sinfo_context;
2811 sp->default_timetolive = info.sinfo_timetolive;
2812 }
2813
2814 return 0;
2815 }
2816
2817 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters
2818 * (SCTP_DEFAULT_SNDINFO)
2819 */
2820 static int sctp_setsockopt_default_sndinfo(struct sock *sk,
2821 char __user *optval,
2822 unsigned int optlen)
2823 {
2824 struct sctp_sock *sp = sctp_sk(sk);
2825 struct sctp_association *asoc;
2826 struct sctp_sndinfo info;
2827
2828 if (optlen != sizeof(info))
2829 return -EINVAL;
2830 if (copy_from_user(&info, optval, optlen))
2831 return -EFAULT;
2832 if (info.snd_flags &
2833 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
2834 SCTP_ABORT | SCTP_EOF))
2835 return -EINVAL;
2836
2837 asoc = sctp_id2assoc(sk, info.snd_assoc_id);
2838 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP))
2839 return -EINVAL;
2840 if (asoc) {
2841 asoc->default_stream = info.snd_sid;
2842 asoc->default_flags = info.snd_flags;
2843 asoc->default_ppid = info.snd_ppid;
2844 asoc->default_context = info.snd_context;
2845 } else {
2846 sp->default_stream = info.snd_sid;
2847 sp->default_flags = info.snd_flags;
2848 sp->default_ppid = info.snd_ppid;
2849 sp->default_context = info.snd_context;
2850 }
2851
2852 return 0;
2853 }
2854
2855 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
2856 *
2857 * Requests that the local SCTP stack use the enclosed peer address as
2858 * the association primary. The enclosed address must be one of the
2859 * association peer's addresses.
2860 */
2861 static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval,
2862 unsigned int optlen)
2863 {
2864 struct sctp_prim prim;
2865 struct sctp_transport *trans;
2866
2867 if (optlen != sizeof(struct sctp_prim))
2868 return -EINVAL;
2869
2870 if (copy_from_user(&prim, optval, sizeof(struct sctp_prim)))
2871 return -EFAULT;
2872
2873 trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id);
2874 if (!trans)
2875 return -EINVAL;
2876
2877 sctp_assoc_set_primary(trans->asoc, trans);
2878
2879 return 0;
2880 }
2881
2882 /*
2883 * 7.1.5 SCTP_NODELAY
2884 *
2885 * Turn on/off any Nagle-like algorithm. This means that packets are
2886 * generally sent as soon as possible and no unnecessary delays are
2887 * introduced, at the cost of more packets in the network. Expects an
2888 * integer boolean flag.
2889 */
2890 static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval,
2891 unsigned int optlen)
2892 {
2893 int val;
2894
2895 if (optlen < sizeof(int))
2896 return -EINVAL;
2897 if (get_user(val, (int __user *)optval))
2898 return -EFAULT;
2899
2900 sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1;
2901 return 0;
2902 }
2903
2904 /*
2905 *
2906 * 7.1.1 SCTP_RTOINFO
2907 *
2908 * The protocol parameters used to initialize and bound retransmission
2909 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
2910 * and modify these parameters.
2911 * All parameters are time values, in milliseconds. A value of 0, when
2912 * modifying the parameters, indicates that the current value should not
2913 * be changed.
2914 *
2915 */
2916 static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen)
2917 {
2918 struct sctp_rtoinfo rtoinfo;
2919 struct sctp_association *asoc;
2920 unsigned long rto_min, rto_max;
2921 struct sctp_sock *sp = sctp_sk(sk);
2922
2923 if (optlen != sizeof (struct sctp_rtoinfo))
2924 return -EINVAL;
2925
2926 if (copy_from_user(&rtoinfo, optval, optlen))
2927 return -EFAULT;
2928
2929 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
2930
2931 /* Set the values to the specific association */
2932 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
2933 return -EINVAL;
2934
2935 rto_max = rtoinfo.srto_max;
2936 rto_min = rtoinfo.srto_min;
2937
2938 if (rto_max)
2939 rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max;
2940 else
2941 rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max;
2942
2943 if (rto_min)
2944 rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min;
2945 else
2946 rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min;
2947
2948 if (rto_min > rto_max)
2949 return -EINVAL;
2950
2951 if (asoc) {
2952 if (rtoinfo.srto_initial != 0)
2953 asoc->rto_initial =
2954 msecs_to_jiffies(rtoinfo.srto_initial);
2955 asoc->rto_max = rto_max;
2956 asoc->rto_min = rto_min;
2957 } else {
2958 /* If there is no association or the association-id = 0
2959 * set the values to the endpoint.
2960 */
2961 if (rtoinfo.srto_initial != 0)
2962 sp->rtoinfo.srto_initial = rtoinfo.srto_initial;
2963 sp->rtoinfo.srto_max = rto_max;
2964 sp->rtoinfo.srto_min = rto_min;
2965 }
2966
2967 return 0;
2968 }
2969
2970 /*
2971 *
2972 * 7.1.2 SCTP_ASSOCINFO
2973 *
2974 * This option is used to tune the maximum retransmission attempts
2975 * of the association.
2976 * Returns an error if the new association retransmission value is
2977 * greater than the sum of the retransmission value of the peer.
2978 * See [SCTP] for more information.
2979 *
2980 */
2981 static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen)
2982 {
2983
2984 struct sctp_assocparams assocparams;
2985 struct sctp_association *asoc;
2986
2987 if (optlen != sizeof(struct sctp_assocparams))
2988 return -EINVAL;
2989 if (copy_from_user(&assocparams, optval, optlen))
2990 return -EFAULT;
2991
2992 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
2993
2994 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP))
2995 return -EINVAL;
2996
2997 /* Set the values to the specific association */
2998 if (asoc) {
2999 if (assocparams.sasoc_asocmaxrxt != 0) {
3000 __u32 path_sum = 0;
3001 int paths = 0;
3002 struct sctp_transport *peer_addr;
3003
3004 list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list,
3005 transports) {
3006 path_sum += peer_addr->pathmaxrxt;
3007 paths++;
3008 }
3009
3010 /* Only validate asocmaxrxt if we have more than
3011 * one path/transport. We do this because path
3012 * retransmissions are only counted when we have more
3013 * then one path.
3014 */
3015 if (paths > 1 &&
3016 assocparams.sasoc_asocmaxrxt > path_sum)
3017 return -EINVAL;
3018
3019 asoc->max_retrans = assocparams.sasoc_asocmaxrxt;
3020 }
3021
3022 if (assocparams.sasoc_cookie_life != 0)
3023 asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life);
3024 } else {
3025 /* Set the values to the endpoint */
3026 struct sctp_sock *sp = sctp_sk(sk);
3027
3028 if (assocparams.sasoc_asocmaxrxt != 0)
3029 sp->assocparams.sasoc_asocmaxrxt =
3030 assocparams.sasoc_asocmaxrxt;
3031 if (assocparams.sasoc_cookie_life != 0)
3032 sp->assocparams.sasoc_cookie_life =
3033 assocparams.sasoc_cookie_life;
3034 }
3035 return 0;
3036 }
3037
3038 /*
3039 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
3040 *
3041 * This socket option is a boolean flag which turns on or off mapped V4
3042 * addresses. If this option is turned on and the socket is type
3043 * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
3044 * If this option is turned off, then no mapping will be done of V4
3045 * addresses and a user will receive both PF_INET6 and PF_INET type
3046 * addresses on the socket.
3047 */
3048 static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen)
3049 {
3050 int val;
3051 struct sctp_sock *sp = sctp_sk(sk);
3052
3053 if (optlen < sizeof(int))
3054 return -EINVAL;
3055 if (get_user(val, (int __user *)optval))
3056 return -EFAULT;
3057 if (val)
3058 sp->v4mapped = 1;
3059 else
3060 sp->v4mapped = 0;
3061
3062 return 0;
3063 }
3064
3065 /*
3066 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
3067 * This option will get or set the maximum size to put in any outgoing
3068 * SCTP DATA chunk. If a message is larger than this size it will be
3069 * fragmented by SCTP into the specified size. Note that the underlying
3070 * SCTP implementation may fragment into smaller sized chunks when the
3071 * PMTU of the underlying association is smaller than the value set by
3072 * the user. The default value for this option is '0' which indicates
3073 * the user is NOT limiting fragmentation and only the PMTU will effect
3074 * SCTP's choice of DATA chunk size. Note also that values set larger
3075 * than the maximum size of an IP datagram will effectively let SCTP
3076 * control fragmentation (i.e. the same as setting this option to 0).
3077 *
3078 * The following structure is used to access and modify this parameter:
3079 *
3080 * struct sctp_assoc_value {
3081 * sctp_assoc_t assoc_id;
3082 * uint32_t assoc_value;
3083 * };
3084 *
3085 * assoc_id: This parameter is ignored for one-to-one style sockets.
3086 * For one-to-many style sockets this parameter indicates which
3087 * association the user is performing an action upon. Note that if
3088 * this field's value is zero then the endpoints default value is
3089 * changed (effecting future associations only).
3090 * assoc_value: This parameter specifies the maximum size in bytes.
3091 */
3092 static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen)
3093 {
3094 struct sctp_assoc_value params;
3095 struct sctp_association *asoc;
3096 struct sctp_sock *sp = sctp_sk(sk);
3097 int val;
3098
3099 if (optlen == sizeof(int)) {
3100 pr_warn_ratelimited(DEPRECATED
3101 "%s (pid %d) "
3102 "Use of int in maxseg socket option.\n"
3103 "Use struct sctp_assoc_value instead\n",
3104 current->comm, task_pid_nr(current));
3105 if (copy_from_user(&val, optval, optlen))
3106 return -EFAULT;
3107 params.assoc_id = 0;
3108 } else if (optlen == sizeof(struct sctp_assoc_value)) {
3109 if (copy_from_user(&params, optval, optlen))
3110 return -EFAULT;
3111 val = params.assoc_value;
3112 } else
3113 return -EINVAL;
3114
3115 if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN)))
3116 return -EINVAL;
3117
3118 asoc = sctp_id2assoc(sk, params.assoc_id);
3119 if (!asoc && params.assoc_id && sctp_style(sk, UDP))
3120 return -EINVAL;
3121
3122 if (asoc) {
3123 if (val == 0) {
3124 val = asoc->pathmtu;
3125 val -= sp->pf->af->net_header_len;
3126 val -= sizeof(struct sctphdr) +
3127 sizeof(struct sctp_data_chunk);
3128 }
3129 asoc->user_frag = val;
3130 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
3131 } else {
3132 sp->user_frag = val;
3133 }
3134
3135 return 0;
3136 }
3137
3138
3139 /*
3140 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR)
3141 *
3142 * Requests that the peer mark the enclosed address as the association
3143 * primary. The enclosed address must be one of the association's
3144 * locally bound addresses. The following structure is used to make a
3145 * set primary request:
3146 */
3147 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval,
3148 unsigned int optlen)
3149 {
3150 struct net *net = sock_net(sk);
3151 struct sctp_sock *sp;
3152 struct sctp_association *asoc = NULL;
3153 struct sctp_setpeerprim prim;
3154 struct sctp_chunk *chunk;
3155 struct sctp_af *af;
3156 int err;
3157
3158 sp = sctp_sk(sk);
3159
3160 if (!net->sctp.addip_enable)
3161 return -EPERM;
3162
3163 if (optlen != sizeof(struct sctp_setpeerprim))
3164 return -EINVAL;
3165
3166 if (copy_from_user(&prim, optval, optlen))
3167 return -EFAULT;
3168
3169 asoc = sctp_id2assoc(sk, prim.sspp_assoc_id);
3170 if (!asoc)
3171 return -EINVAL;
3172
3173 if (!asoc->peer.asconf_capable)
3174 return -EPERM;
3175
3176 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY)
3177 return -EPERM;
3178
3179 if (!sctp_state(asoc, ESTABLISHED))
3180 return -ENOTCONN;
3181
3182 af = sctp_get_af_specific(prim.sspp_addr.ss_family);
3183 if (!af)
3184 return -EINVAL;
3185
3186 if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL))
3187 return -EADDRNOTAVAIL;
3188
3189 if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr))
3190 return -EADDRNOTAVAIL;
3191
3192 /* Create an ASCONF chunk with SET_PRIMARY parameter */
3193 chunk = sctp_make_asconf_set_prim(asoc,
3194 (union sctp_addr *)&prim.sspp_addr);
3195 if (!chunk)
3196 return -ENOMEM;
3197
3198 err = sctp_send_asconf(asoc, chunk);
3199
3200 pr_debug("%s: we set peer primary addr primitively\n", __func__);
3201
3202 return err;
3203 }
3204
3205 static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval,
3206 unsigned int optlen)
3207 {
3208 struct sctp_setadaptation adaptation;
3209
3210 if (optlen != sizeof(struct sctp_setadaptation))
3211 return -EINVAL;
3212 if (copy_from_user(&adaptation, optval, optlen))
3213 return -EFAULT;
3214
3215 sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind;
3216
3217 return 0;
3218 }
3219
3220 /*
3221 * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
3222 *
3223 * The context field in the sctp_sndrcvinfo structure is normally only
3224 * used when a failed message is retrieved holding the value that was
3225 * sent down on the actual send call. This option allows the setting of
3226 * a default context on an association basis that will be received on
3227 * reading messages from the peer. This is especially helpful in the
3228 * one-2-many model for an application to keep some reference to an
3229 * internal state machine that is processing messages on the
3230 * association. Note that the setting of this value only effects
3231 * received messages from the peer and does not effect the value that is
3232 * saved with outbound messages.
3233 */
3234 static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
3235 unsigned int optlen)
3236 {
3237 struct sctp_assoc_value params;
3238 struct sctp_sock *sp;
3239 struct sctp_association *asoc;
3240
3241 if (optlen != sizeof(struct sctp_assoc_value))
3242 return -EINVAL;
3243 if (copy_from_user(&params, optval, optlen))
3244 return -EFAULT;
3245
3246 sp = sctp_sk(sk);
3247
3248 if (params.assoc_id != 0) {
3249 asoc = sctp_id2assoc(sk, params.assoc_id);
3250 if (!asoc)
3251 return -EINVAL;
3252 asoc->default_rcv_context = params.assoc_value;
3253 } else {
3254 sp->default_rcv_context = params.assoc_value;
3255 }
3256
3257 return 0;
3258 }
3259
3260 /*
3261 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
3262 *
3263 * This options will at a minimum specify if the implementation is doing
3264 * fragmented interleave. Fragmented interleave, for a one to many
3265 * socket, is when subsequent calls to receive a message may return
3266 * parts of messages from different associations. Some implementations
3267 * may allow you to turn this value on or off. If so, when turned off,
3268 * no fragment interleave will occur (which will cause a head of line
3269 * blocking amongst multiple associations sharing the same one to many
3270 * socket). When this option is turned on, then each receive call may
3271 * come from a different association (thus the user must receive data
3272 * with the extended calls (e.g. sctp_recvmsg) to keep track of which
3273 * association each receive belongs to.
3274 *
3275 * This option takes a boolean value. A non-zero value indicates that
3276 * fragmented interleave is on. A value of zero indicates that
3277 * fragmented interleave is off.
3278 *
3279 * Note that it is important that an implementation that allows this
3280 * option to be turned on, have it off by default. Otherwise an unaware
3281 * application using the one to many model may become confused and act
3282 * incorrectly.
3283 */
3284 static int sctp_setsockopt_fragment_interleave(struct sock *sk,
3285 char __user *optval,
3286 unsigned int optlen)
3287 {
3288 int val;
3289
3290 if (optlen != sizeof(int))
3291 return -EINVAL;
3292 if (get_user(val, (int __user *)optval))
3293 return -EFAULT;
3294
3295 sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1;
3296
3297 return 0;
3298 }
3299
3300 /*
3301 * 8.1.21. Set or Get the SCTP Partial Delivery Point
3302 * (SCTP_PARTIAL_DELIVERY_POINT)
3303 *
3304 * This option will set or get the SCTP partial delivery point. This
3305 * point is the size of a message where the partial delivery API will be
3306 * invoked to help free up rwnd space for the peer. Setting this to a
3307 * lower value will cause partial deliveries to happen more often. The
3308 * calls argument is an integer that sets or gets the partial delivery
3309 * point. Note also that the call will fail if the user attempts to set
3310 * this value larger than the socket receive buffer size.
3311 *
3312 * Note that any single message having a length smaller than or equal to
3313 * the SCTP partial delivery point will be delivered in one single read
3314 * call as long as the user provided buffer is large enough to hold the
3315 * message.
3316 */
3317 static int sctp_setsockopt_partial_delivery_point(struct sock *sk,
3318 char __user *optval,
3319 unsigned int optlen)
3320 {
3321 u32 val;
3322
3323 if (optlen != sizeof(u32))
3324 return -EINVAL;
3325 if (get_user(val, (int __user *)optval))
3326 return -EFAULT;
3327
3328 /* Note: We double the receive buffer from what the user sets
3329 * it to be, also initial rwnd is based on rcvbuf/2.
3330 */
3331 if (val > (sk->sk_rcvbuf >> 1))
3332 return -EINVAL;
3333
3334 sctp_sk(sk)->pd_point = val;
3335
3336 return 0; /* is this the right error code? */
3337 }
3338
3339 /*
3340 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
3341 *
3342 * This option will allow a user to change the maximum burst of packets
3343 * that can be emitted by this association. Note that the default value
3344 * is 4, and some implementations may restrict this setting so that it
3345 * can only be lowered.
3346 *
3347 * NOTE: This text doesn't seem right. Do this on a socket basis with
3348 * future associations inheriting the socket value.
3349 */
3350 static int sctp_setsockopt_maxburst(struct sock *sk,
3351 char __user *optval,
3352 unsigned int optlen)
3353 {
3354 struct sctp_assoc_value params;
3355 struct sctp_sock *sp;
3356 struct sctp_association *asoc;
3357 int val;
3358 int assoc_id = 0;
3359
3360 if (optlen == sizeof(int)) {
3361 pr_warn_ratelimited(DEPRECATED
3362 "%s (pid %d) "
3363 "Use of int in max_burst socket option deprecated.\n"
3364 "Use struct sctp_assoc_value instead\n",
3365 current->comm, task_pid_nr(current));
3366 if (copy_from_user(&val, optval, optlen))
3367 return -EFAULT;
3368 } else if (optlen == sizeof(struct sctp_assoc_value)) {
3369 if (copy_from_user(&params, optval, optlen))
3370 return -EFAULT;
3371 val = params.assoc_value;
3372 assoc_id = params.assoc_id;
3373 } else
3374 return -EINVAL;
3375
3376 sp = sctp_sk(sk);
3377
3378 if (assoc_id != 0) {
3379 asoc = sctp_id2assoc(sk, assoc_id);
3380 if (!asoc)
3381 return -EINVAL;
3382 asoc->max_burst = val;
3383 } else
3384 sp->max_burst = val;
3385
3386 return 0;
3387 }
3388
3389 /*
3390 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK)
3391 *
3392 * This set option adds a chunk type that the user is requesting to be
3393 * received only in an authenticated way. Changes to the list of chunks
3394 * will only effect future associations on the socket.
3395 */
3396 static int sctp_setsockopt_auth_chunk(struct sock *sk,
3397 char __user *optval,
3398 unsigned int optlen)
3399 {
3400 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3401 struct sctp_authchunk val;
3402
3403 if (!ep->auth_enable)
3404 return -EACCES;
3405
3406 if (optlen != sizeof(struct sctp_authchunk))
3407 return -EINVAL;
3408 if (copy_from_user(&val, optval, optlen))
3409 return -EFAULT;
3410
3411 switch (val.sauth_chunk) {
3412 case SCTP_CID_INIT:
3413 case SCTP_CID_INIT_ACK:
3414 case SCTP_CID_SHUTDOWN_COMPLETE:
3415 case SCTP_CID_AUTH:
3416 return -EINVAL;
3417 }
3418
3419 /* add this chunk id to the endpoint */
3420 return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk);
3421 }
3422
3423 /*
3424 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT)
3425 *
3426 * This option gets or sets the list of HMAC algorithms that the local
3427 * endpoint requires the peer to use.
3428 */
3429 static int sctp_setsockopt_hmac_ident(struct sock *sk,
3430 char __user *optval,
3431 unsigned int optlen)
3432 {
3433 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3434 struct sctp_hmacalgo *hmacs;
3435 u32 idents;
3436 int err;
3437
3438 if (!ep->auth_enable)
3439 return -EACCES;
3440
3441 if (optlen < sizeof(struct sctp_hmacalgo))
3442 return -EINVAL;
3443
3444 hmacs = memdup_user(optval, optlen);
3445 if (IS_ERR(hmacs))
3446 return PTR_ERR(hmacs);
3447
3448 idents = hmacs->shmac_num_idents;
3449 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
3450 (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) {
3451 err = -EINVAL;
3452 goto out;
3453 }
3454
3455 err = sctp_auth_ep_set_hmacs(ep, hmacs);
3456 out:
3457 kfree(hmacs);
3458 return err;
3459 }
3460
3461 /*
3462 * 7.1.20. Set a shared key (SCTP_AUTH_KEY)
3463 *
3464 * This option will set a shared secret key which is used to build an
3465 * association shared key.
3466 */
3467 static int sctp_setsockopt_auth_key(struct sock *sk,
3468 char __user *optval,
3469 unsigned int optlen)
3470 {
3471 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3472 struct sctp_authkey *authkey;
3473 struct sctp_association *asoc;
3474 int ret;
3475
3476 if (!ep->auth_enable)
3477 return -EACCES;
3478
3479 if (optlen <= sizeof(struct sctp_authkey))
3480 return -EINVAL;
3481
3482 authkey = memdup_user(optval, optlen);
3483 if (IS_ERR(authkey))
3484 return PTR_ERR(authkey);
3485
3486 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) {
3487 ret = -EINVAL;
3488 goto out;
3489 }
3490
3491 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
3492 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) {
3493 ret = -EINVAL;
3494 goto out;
3495 }
3496
3497 ret = sctp_auth_set_key(ep, asoc, authkey);
3498 out:
3499 kzfree(authkey);
3500 return ret;
3501 }
3502
3503 /*
3504 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY)
3505 *
3506 * This option will get or set the active shared key to be used to build
3507 * the association shared key.
3508 */
3509 static int sctp_setsockopt_active_key(struct sock *sk,
3510 char __user *optval,
3511 unsigned int optlen)
3512 {
3513 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3514 struct sctp_authkeyid val;
3515 struct sctp_association *asoc;
3516
3517 if (!ep->auth_enable)
3518 return -EACCES;
3519
3520 if (optlen != sizeof(struct sctp_authkeyid))
3521 return -EINVAL;
3522 if (copy_from_user(&val, optval, optlen))
3523 return -EFAULT;
3524
3525 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
3526 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
3527 return -EINVAL;
3528
3529 return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
3530 }
3531
3532 /*
3533 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY)
3534 *
3535 * This set option will delete a shared secret key from use.
3536 */
3537 static int sctp_setsockopt_del_key(struct sock *sk,
3538 char __user *optval,
3539 unsigned int optlen)
3540 {
3541 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3542 struct sctp_authkeyid val;
3543 struct sctp_association *asoc;
3544
3545 if (!ep->auth_enable)
3546 return -EACCES;
3547
3548 if (optlen != sizeof(struct sctp_authkeyid))
3549 return -EINVAL;
3550 if (copy_from_user(&val, optval, optlen))
3551 return -EFAULT;
3552
3553 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
3554 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
3555 return -EINVAL;
3556
3557 return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
3558
3559 }
3560
3561 /*
3562 * 8.1.23 SCTP_AUTO_ASCONF
3563 *
3564 * This option will enable or disable the use of the automatic generation of
3565 * ASCONF chunks to add and delete addresses to an existing association. Note
3566 * that this option has two caveats namely: a) it only affects sockets that
3567 * are bound to all addresses available to the SCTP stack, and b) the system
3568 * administrator may have an overriding control that turns the ASCONF feature
3569 * off no matter what setting the socket option may have.
3570 * This option expects an integer boolean flag, where a non-zero value turns on
3571 * the option, and a zero value turns off the option.
3572 * Note. In this implementation, socket operation overrides default parameter
3573 * being set by sysctl as well as FreeBSD implementation
3574 */
3575 static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
3576 unsigned int optlen)
3577 {
3578 int val;
3579 struct sctp_sock *sp = sctp_sk(sk);
3580
3581 if (optlen < sizeof(int))
3582 return -EINVAL;
3583 if (get_user(val, (int __user *)optval))
3584 return -EFAULT;
3585 if (!sctp_is_ep_boundall(sk) && val)
3586 return -EINVAL;
3587 if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
3588 return 0;
3589
3590 if (val == 0 && sp->do_auto_asconf) {
3591 list_del(&sp->auto_asconf_list);
3592 sp->do_auto_asconf = 0;
3593 } else if (val && !sp->do_auto_asconf) {
3594 list_add_tail(&sp->auto_asconf_list,
3595 &sock_net(sk)->sctp.auto_asconf_splist);
3596 sp->do_auto_asconf = 1;
3597 }
3598 return 0;
3599 }
3600
3601 /*
3602 * SCTP_PEER_ADDR_THLDS
3603 *
3604 * This option allows us to alter the partially failed threshold for one or all
3605 * transports in an association. See Section 6.1 of:
3606 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
3607 */
3608 static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
3609 char __user *optval,
3610 unsigned int optlen)
3611 {
3612 struct sctp_paddrthlds val;
3613 struct sctp_transport *trans;
3614 struct sctp_association *asoc;
3615
3616 if (optlen < sizeof(struct sctp_paddrthlds))
3617 return -EINVAL;
3618 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval,
3619 sizeof(struct sctp_paddrthlds)))
3620 return -EFAULT;
3621
3622
3623 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
3624 asoc = sctp_id2assoc(sk, val.spt_assoc_id);
3625 if (!asoc)
3626 return -ENOENT;
3627 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
3628 transports) {
3629 if (val.spt_pathmaxrxt)
3630 trans->pathmaxrxt = val.spt_pathmaxrxt;
3631 trans->pf_retrans = val.spt_pathpfthld;
3632 }
3633
3634 if (val.spt_pathmaxrxt)
3635 asoc->pathmaxrxt = val.spt_pathmaxrxt;
3636 asoc->pf_retrans = val.spt_pathpfthld;
3637 } else {
3638 trans = sctp_addr_id2transport(sk, &val.spt_address,
3639 val.spt_assoc_id);
3640 if (!trans)
3641 return -ENOENT;
3642
3643 if (val.spt_pathmaxrxt)
3644 trans->pathmaxrxt = val.spt_pathmaxrxt;
3645 trans->pf_retrans = val.spt_pathpfthld;
3646 }
3647
3648 return 0;
3649 }
3650
3651 static int sctp_setsockopt_recvrcvinfo(struct sock *sk,
3652 char __user *optval,
3653 unsigned int optlen)
3654 {
3655 int val;
3656
3657 if (optlen < sizeof(int))
3658 return -EINVAL;
3659 if (get_user(val, (int __user *) optval))
3660 return -EFAULT;
3661
3662 sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1;
3663
3664 return 0;
3665 }
3666
3667 static int sctp_setsockopt_recvnxtinfo(struct sock *sk,
3668 char __user *optval,
3669 unsigned int optlen)
3670 {
3671 int val;
3672
3673 if (optlen < sizeof(int))
3674 return -EINVAL;
3675 if (get_user(val, (int __user *) optval))
3676 return -EFAULT;
3677
3678 sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1;
3679
3680 return 0;
3681 }
3682
3683 /* API 6.2 setsockopt(), getsockopt()
3684 *
3685 * Applications use setsockopt() and getsockopt() to set or retrieve
3686 * socket options. Socket options are used to change the default
3687 * behavior of sockets calls. They are described in Section 7.
3688 *
3689 * The syntax is:
3690 *
3691 * ret = getsockopt(int sd, int level, int optname, void __user *optval,
3692 * int __user *optlen);
3693 * ret = setsockopt(int sd, int level, int optname, const void __user *optval,
3694 * int optlen);
3695 *
3696 * sd - the socket descript.
3697 * level - set to IPPROTO_SCTP for all SCTP options.
3698 * optname - the option name.
3699 * optval - the buffer to store the value of the option.
3700 * optlen - the size of the buffer.
3701 */
3702 static int sctp_setsockopt(struct sock *sk, int level, int optname,
3703 char __user *optval, unsigned int optlen)
3704 {
3705 int retval = 0;
3706
3707 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
3708
3709 /* I can hardly begin to describe how wrong this is. This is
3710 * so broken as to be worse than useless. The API draft
3711 * REALLY is NOT helpful here... I am not convinced that the
3712 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP
3713 * are at all well-founded.
3714 */
3715 if (level != SOL_SCTP) {
3716 struct sctp_af *af = sctp_sk(sk)->pf->af;
3717 retval = af->setsockopt(sk, level, optname, optval, optlen);
3718 goto out_nounlock;
3719 }
3720
3721 lock_sock(sk);
3722
3723 switch (optname) {
3724 case SCTP_SOCKOPT_BINDX_ADD:
3725 /* 'optlen' is the size of the addresses buffer. */
3726 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
3727 optlen, SCTP_BINDX_ADD_ADDR);
3728 break;
3729
3730 case SCTP_SOCKOPT_BINDX_REM:
3731 /* 'optlen' is the size of the addresses buffer. */
3732 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
3733 optlen, SCTP_BINDX_REM_ADDR);
3734 break;
3735
3736 case SCTP_SOCKOPT_CONNECTX_OLD:
3737 /* 'optlen' is the size of the addresses buffer. */
3738 retval = sctp_setsockopt_connectx_old(sk,
3739 (struct sockaddr __user *)optval,
3740 optlen);
3741 break;
3742
3743 case SCTP_SOCKOPT_CONNECTX:
3744 /* 'optlen' is the size of the addresses buffer. */
3745 retval = sctp_setsockopt_connectx(sk,
3746 (struct sockaddr __user *)optval,
3747 optlen);
3748 break;
3749
3750 case SCTP_DISABLE_FRAGMENTS:
3751 retval = sctp_setsockopt_disable_fragments(sk, optval, optlen);
3752 break;
3753
3754 case SCTP_EVENTS:
3755 retval = sctp_setsockopt_events(sk, optval, optlen);
3756 break;
3757
3758 case SCTP_AUTOCLOSE:
3759 retval = sctp_setsockopt_autoclose(sk, optval, optlen);
3760 break;
3761
3762 case SCTP_PEER_ADDR_PARAMS:
3763 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
3764 break;
3765
3766 case SCTP_DELAYED_SACK:
3767 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen);
3768 break;
3769 case SCTP_PARTIAL_DELIVERY_POINT:
3770 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen);
3771 break;
3772
3773 case SCTP_INITMSG:
3774 retval = sctp_setsockopt_initmsg(sk, optval, optlen);
3775 break;
3776 case SCTP_DEFAULT_SEND_PARAM:
3777 retval = sctp_setsockopt_default_send_param(sk, optval,
3778 optlen);
3779 break;
3780 case SCTP_DEFAULT_SNDINFO:
3781 retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen);
3782 break;
3783 case SCTP_PRIMARY_ADDR:
3784 retval = sctp_setsockopt_primary_addr(sk, optval, optlen);
3785 break;
3786 case SCTP_SET_PEER_PRIMARY_ADDR:
3787 retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen);
3788 break;
3789 case SCTP_NODELAY:
3790 retval = sctp_setsockopt_nodelay(sk, optval, optlen);
3791 break;
3792 case SCTP_RTOINFO:
3793 retval = sctp_setsockopt_rtoinfo(sk, optval, optlen);
3794 break;
3795 case SCTP_ASSOCINFO:
3796 retval = sctp_setsockopt_associnfo(sk, optval, optlen);
3797 break;
3798 case SCTP_I_WANT_MAPPED_V4_ADDR:
3799 retval = sctp_setsockopt_mappedv4(sk, optval, optlen);
3800 break;
3801 case SCTP_MAXSEG:
3802 retval = sctp_setsockopt_maxseg(sk, optval, optlen);
3803 break;
3804 case SCTP_ADAPTATION_LAYER:
3805 retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen);
3806 break;
3807 case SCTP_CONTEXT:
3808 retval = sctp_setsockopt_context(sk, optval, optlen);
3809 break;
3810 case SCTP_FRAGMENT_INTERLEAVE:
3811 retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen);
3812 break;
3813 case SCTP_MAX_BURST:
3814 retval = sctp_setsockopt_maxburst(sk, optval, optlen);
3815 break;
3816 case SCTP_AUTH_CHUNK:
3817 retval = sctp_setsockopt_auth_chunk(sk, optval, optlen);
3818 break;
3819 case SCTP_HMAC_IDENT:
3820 retval = sctp_setsockopt_hmac_ident(sk, optval, optlen);
3821 break;
3822 case SCTP_AUTH_KEY:
3823 retval = sctp_setsockopt_auth_key(sk, optval, optlen);
3824 break;
3825 case SCTP_AUTH_ACTIVE_KEY:
3826 retval = sctp_setsockopt_active_key(sk, optval, optlen);
3827 break;
3828 case SCTP_AUTH_DELETE_KEY:
3829 retval = sctp_setsockopt_del_key(sk, optval, optlen);
3830 break;
3831 case SCTP_AUTO_ASCONF:
3832 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen);
3833 break;
3834 case SCTP_PEER_ADDR_THLDS:
3835 retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen);
3836 break;
3837 case SCTP_RECVRCVINFO:
3838 retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen);
3839 break;
3840 case SCTP_RECVNXTINFO:
3841 retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen);
3842 break;
3843 default:
3844 retval = -ENOPROTOOPT;
3845 break;
3846 }
3847
3848 release_sock(sk);
3849
3850 out_nounlock:
3851 return retval;
3852 }
3853
3854 /* API 3.1.6 connect() - UDP Style Syntax
3855 *
3856 * An application may use the connect() call in the UDP model to initiate an
3857 * association without sending data.
3858 *
3859 * The syntax is:
3860 *
3861 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len);
3862 *
3863 * sd: the socket descriptor to have a new association added to.
3864 *
3865 * nam: the address structure (either struct sockaddr_in or struct
3866 * sockaddr_in6 defined in RFC2553 [7]).
3867 *
3868 * len: the size of the address.
3869 */
3870 static int sctp_connect(struct sock *sk, struct sockaddr *addr,
3871 int addr_len)
3872 {
3873 int err = 0;
3874 struct sctp_af *af;
3875
3876 lock_sock(sk);
3877
3878 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
3879 addr, addr_len);
3880
3881 /* Validate addr_len before calling common connect/connectx routine. */
3882 af = sctp_get_af_specific(addr->sa_family);
3883 if (!af || addr_len < af->sockaddr_len) {
3884 err = -EINVAL;
3885 } else {
3886 /* Pass correct addr len to common routine (so it knows there
3887 * is only one address being passed.
3888 */
3889 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL);
3890 }
3891
3892 release_sock(sk);
3893 return err;
3894 }
3895
3896 /* FIXME: Write comments. */
3897 static int sctp_disconnect(struct sock *sk, int flags)
3898 {
3899 return -EOPNOTSUPP; /* STUB */
3900 }
3901
3902 /* 4.1.4 accept() - TCP Style Syntax
3903 *
3904 * Applications use accept() call to remove an established SCTP
3905 * association from the accept queue of the endpoint. A new socket
3906 * descriptor will be returned from accept() to represent the newly
3907 * formed association.
3908 */
3909 static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
3910 {
3911 struct sctp_sock *sp;
3912 struct sctp_endpoint *ep;
3913 struct sock *newsk = NULL;
3914 struct sctp_association *asoc;
3915 long timeo;
3916 int error = 0;
3917
3918 lock_sock(sk);
3919
3920 sp = sctp_sk(sk);
3921 ep = sp->ep;
3922
3923 if (!sctp_style(sk, TCP)) {
3924 error = -EOPNOTSUPP;
3925 goto out;
3926 }
3927
3928 if (!sctp_sstate(sk, LISTENING)) {
3929 error = -EINVAL;
3930 goto out;
3931 }
3932
3933 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
3934
3935 error = sctp_wait_for_accept(sk, timeo);
3936 if (error)
3937 goto out;
3938
3939 /* We treat the list of associations on the endpoint as the accept
3940 * queue and pick the first association on the list.
3941 */
3942 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs);
3943
3944 newsk = sp->pf->create_accept_sk(sk, asoc);
3945 if (!newsk) {
3946 error = -ENOMEM;
3947 goto out;
3948 }
3949
3950 /* Populate the fields of the newsk from the oldsk and migrate the
3951 * asoc to the newsk.
3952 */
3953 sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP);
3954
3955 out:
3956 release_sock(sk);
3957 *err = error;
3958 return newsk;
3959 }
3960
3961 /* The SCTP ioctl handler. */
3962 static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
3963 {
3964 int rc = -ENOTCONN;
3965
3966 lock_sock(sk);
3967
3968 /*
3969 * SEQPACKET-style sockets in LISTENING state are valid, for
3970 * SCTP, so only discard TCP-style sockets in LISTENING state.
3971 */
3972 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
3973 goto out;
3974
3975 switch (cmd) {
3976 case SIOCINQ: {
3977 struct sk_buff *skb;
3978 unsigned int amount = 0;
3979
3980 skb = skb_peek(&sk->sk_receive_queue);
3981 if (skb != NULL) {
3982 /*
3983 * We will only return the amount of this packet since
3984 * that is all that will be read.
3985 */
3986 amount = skb->len;
3987 }
3988 rc = put_user(amount, (int __user *)arg);
3989 break;
3990 }
3991 default:
3992 rc = -ENOIOCTLCMD;
3993 break;
3994 }
3995 out:
3996 release_sock(sk);
3997 return rc;
3998 }
3999
4000 /* This is the function which gets called during socket creation to
4001 * initialized the SCTP-specific portion of the sock.
4002 * The sock structure should already be zero-filled memory.
4003 */
4004 static int sctp_init_sock(struct sock *sk)
4005 {
4006 struct net *net = sock_net(sk);
4007 struct sctp_sock *sp;
4008
4009 pr_debug("%s: sk:%p\n", __func__, sk);
4010
4011 sp = sctp_sk(sk);
4012
4013 /* Initialize the SCTP per socket area. */
4014 switch (sk->sk_type) {
4015 case SOCK_SEQPACKET:
4016 sp->type = SCTP_SOCKET_UDP;
4017 break;
4018 case SOCK_STREAM:
4019 sp->type = SCTP_SOCKET_TCP;
4020 break;
4021 default:
4022 return -ESOCKTNOSUPPORT;
4023 }
4024
4025 /* Initialize default send parameters. These parameters can be
4026 * modified with the SCTP_DEFAULT_SEND_PARAM socket option.
4027 */
4028 sp->default_stream = 0;
4029 sp->default_ppid = 0;
4030 sp->default_flags = 0;
4031 sp->default_context = 0;
4032 sp->default_timetolive = 0;
4033
4034 sp->default_rcv_context = 0;
4035 sp->max_burst = net->sctp.max_burst;
4036
4037 sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg;
4038
4039 /* Initialize default setup parameters. These parameters
4040 * can be modified with the SCTP_INITMSG socket option or
4041 * overridden by the SCTP_INIT CMSG.
4042 */
4043 sp->initmsg.sinit_num_ostreams = sctp_max_outstreams;
4044 sp->initmsg.sinit_max_instreams = sctp_max_instreams;
4045 sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init;
4046 sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max;
4047
4048 /* Initialize default RTO related parameters. These parameters can
4049 * be modified for with the SCTP_RTOINFO socket option.
4050 */
4051 sp->rtoinfo.srto_initial = net->sctp.rto_initial;
4052 sp->rtoinfo.srto_max = net->sctp.rto_max;
4053 sp->rtoinfo.srto_min = net->sctp.rto_min;
4054
4055 /* Initialize default association related parameters. These parameters
4056 * can be modified with the SCTP_ASSOCINFO socket option.
4057 */
4058 sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association;
4059 sp->assocparams.sasoc_number_peer_destinations = 0;
4060 sp->assocparams.sasoc_peer_rwnd = 0;
4061 sp->assocparams.sasoc_local_rwnd = 0;
4062 sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life;
4063
4064 /* Initialize default event subscriptions. By default, all the
4065 * options are off.
4066 */
4067 memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe));
4068
4069 /* Default Peer Address Parameters. These defaults can
4070 * be modified via SCTP_PEER_ADDR_PARAMS
4071 */
4072 sp->hbinterval = net->sctp.hb_interval;
4073 sp->pathmaxrxt = net->sctp.max_retrans_path;
4074 sp->pathmtu = 0; /* allow default discovery */
4075 sp->sackdelay = net->sctp.sack_timeout;
4076 sp->sackfreq = 2;
4077 sp->param_flags = SPP_HB_ENABLE |
4078 SPP_PMTUD_ENABLE |
4079 SPP_SACKDELAY_ENABLE;
4080
4081 /* If enabled no SCTP message fragmentation will be performed.
4082 * Configure through SCTP_DISABLE_FRAGMENTS socket option.
4083 */
4084 sp->disable_fragments = 0;
4085
4086 /* Enable Nagle algorithm by default. */
4087 sp->nodelay = 0;
4088
4089 sp->recvrcvinfo = 0;
4090 sp->recvnxtinfo = 0;
4091
4092 /* Enable by default. */
4093 sp->v4mapped = 1;
4094
4095 /* Auto-close idle associations after the configured
4096 * number of seconds. A value of 0 disables this
4097 * feature. Configure through the SCTP_AUTOCLOSE socket option,
4098 * for UDP-style sockets only.
4099 */
4100 sp->autoclose = 0;
4101
4102 /* User specified fragmentation limit. */
4103 sp->user_frag = 0;
4104
4105 sp->adaptation_ind = 0;
4106
4107 sp->pf = sctp_get_pf_specific(sk->sk_family);
4108
4109 /* Control variables for partial data delivery. */
4110 atomic_set(&sp->pd_mode, 0);
4111 skb_queue_head_init(&sp->pd_lobby);
4112 sp->frag_interleave = 0;
4113
4114 /* Create a per socket endpoint structure. Even if we
4115 * change the data structure relationships, this may still
4116 * be useful for storing pre-connect address information.
4117 */
4118 sp->ep = sctp_endpoint_new(sk, GFP_KERNEL);
4119 if (!sp->ep)
4120 return -ENOMEM;
4121
4122 sp->hmac = NULL;
4123
4124 sk->sk_destruct = sctp_destruct_sock;
4125
4126 SCTP_DBG_OBJCNT_INC(sock);
4127
4128 local_bh_disable();
4129 percpu_counter_inc(&sctp_sockets_allocated);
4130 sock_prot_inuse_add(net, sk->sk_prot, 1);
4131 if (net->sctp.default_auto_asconf) {
4132 list_add_tail(&sp->auto_asconf_list,
4133 &net->sctp.auto_asconf_splist);
4134 sp->do_auto_asconf = 1;
4135 } else
4136 sp->do_auto_asconf = 0;
4137 local_bh_enable();
4138
4139 return 0;
4140 }
4141
4142 /* Cleanup any SCTP per socket resources. */
4143 static void sctp_destroy_sock(struct sock *sk)
4144 {
4145 struct sctp_sock *sp;
4146
4147 pr_debug("%s: sk:%p\n", __func__, sk);
4148
4149 /* Release our hold on the endpoint. */
4150 sp = sctp_sk(sk);
4151 /* This could happen during socket init, thus we bail out
4152 * early, since the rest of the below is not setup either.
4153 */
4154 if (sp->ep == NULL)
4155 return;
4156
4157 if (sp->do_auto_asconf) {
4158 sp->do_auto_asconf = 0;
4159 list_del(&sp->auto_asconf_list);
4160 }
4161 sctp_endpoint_free(sp->ep);
4162 local_bh_disable();
4163 percpu_counter_dec(&sctp_sockets_allocated);
4164 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
4165 local_bh_enable();
4166 }
4167
4168 /* Triggered when there are no references on the socket anymore */
4169 static void sctp_destruct_sock(struct sock *sk)
4170 {
4171 struct sctp_sock *sp = sctp_sk(sk);
4172
4173 /* Free up the HMAC transform. */
4174 crypto_free_hash(sp->hmac);
4175
4176 inet_sock_destruct(sk);
4177 }
4178
4179 /* API 4.1.7 shutdown() - TCP Style Syntax
4180 * int shutdown(int socket, int how);
4181 *
4182 * sd - the socket descriptor of the association to be closed.
4183 * how - Specifies the type of shutdown. The values are
4184 * as follows:
4185 * SHUT_RD
4186 * Disables further receive operations. No SCTP
4187 * protocol action is taken.
4188 * SHUT_WR
4189 * Disables further send operations, and initiates
4190 * the SCTP shutdown sequence.
4191 * SHUT_RDWR
4192 * Disables further send and receive operations
4193 * and initiates the SCTP shutdown sequence.
4194 */
4195 static void sctp_shutdown(struct sock *sk, int how)
4196 {
4197 struct net *net = sock_net(sk);
4198 struct sctp_endpoint *ep;
4199 struct sctp_association *asoc;
4200
4201 if (!sctp_style(sk, TCP))
4202 return;
4203
4204 if (how & SEND_SHUTDOWN) {
4205 ep = sctp_sk(sk)->ep;
4206 if (!list_empty(&ep->asocs)) {
4207 asoc = list_entry(ep->asocs.next,
4208 struct sctp_association, asocs);
4209 sctp_primitive_SHUTDOWN(net, asoc, NULL);
4210 }
4211 }
4212 }
4213
4214 /* 7.2.1 Association Status (SCTP_STATUS)
4215
4216 * Applications can retrieve current status information about an
4217 * association, including association state, peer receiver window size,
4218 * number of unacked data chunks, and number of data chunks pending
4219 * receipt. This information is read-only.
4220 */
4221 static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
4222 char __user *optval,
4223 int __user *optlen)
4224 {
4225 struct sctp_status status;
4226 struct sctp_association *asoc = NULL;
4227 struct sctp_transport *transport;
4228 sctp_assoc_t associd;
4229 int retval = 0;
4230
4231 if (len < sizeof(status)) {
4232 retval = -EINVAL;
4233 goto out;
4234 }
4235
4236 len = sizeof(status);
4237 if (copy_from_user(&status, optval, len)) {
4238 retval = -EFAULT;
4239 goto out;
4240 }
4241
4242 associd = status.sstat_assoc_id;
4243 asoc = sctp_id2assoc(sk, associd);
4244 if (!asoc) {
4245 retval = -EINVAL;
4246 goto out;
4247 }
4248
4249 transport = asoc->peer.primary_path;
4250
4251 status.sstat_assoc_id = sctp_assoc2id(asoc);
4252 status.sstat_state = sctp_assoc_to_state(asoc);
4253 status.sstat_rwnd = asoc->peer.rwnd;
4254 status.sstat_unackdata = asoc->unack_data;
4255
4256 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
4257 status.sstat_instrms = asoc->c.sinit_max_instreams;
4258 status.sstat_outstrms = asoc->c.sinit_num_ostreams;
4259 status.sstat_fragmentation_point = asoc->frag_point;
4260 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
4261 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
4262 transport->af_specific->sockaddr_len);
4263 /* Map ipv4 address into v4-mapped-on-v6 address. */
4264 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk),
4265 (union sctp_addr *)&status.sstat_primary.spinfo_address);
4266 status.sstat_primary.spinfo_state = transport->state;
4267 status.sstat_primary.spinfo_cwnd = transport->cwnd;
4268 status.sstat_primary.spinfo_srtt = transport->srtt;
4269 status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto);
4270 status.sstat_primary.spinfo_mtu = transport->pathmtu;
4271
4272 if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN)
4273 status.sstat_primary.spinfo_state = SCTP_ACTIVE;
4274
4275 if (put_user(len, optlen)) {
4276 retval = -EFAULT;
4277 goto out;
4278 }
4279
4280 pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n",
4281 __func__, len, status.sstat_state, status.sstat_rwnd,
4282 status.sstat_assoc_id);
4283
4284 if (copy_to_user(optval, &status, len)) {
4285 retval = -EFAULT;
4286 goto out;
4287 }
4288
4289 out:
4290 return retval;
4291 }
4292
4293
4294 /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO)
4295 *
4296 * Applications can retrieve information about a specific peer address
4297 * of an association, including its reachability state, congestion
4298 * window, and retransmission timer values. This information is
4299 * read-only.
4300 */
4301 static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len,
4302 char __user *optval,
4303 int __user *optlen)
4304 {
4305 struct sctp_paddrinfo pinfo;
4306 struct sctp_transport *transport;
4307 int retval = 0;
4308
4309 if (len < sizeof(pinfo)) {
4310 retval = -EINVAL;
4311 goto out;
4312 }
4313
4314 len = sizeof(pinfo);
4315 if (copy_from_user(&pinfo, optval, len)) {
4316 retval = -EFAULT;
4317 goto out;
4318 }
4319
4320 transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address,
4321 pinfo.spinfo_assoc_id);
4322 if (!transport)
4323 return -EINVAL;
4324
4325 pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
4326 pinfo.spinfo_state = transport->state;
4327 pinfo.spinfo_cwnd = transport->cwnd;
4328 pinfo.spinfo_srtt = transport->srtt;
4329 pinfo.spinfo_rto = jiffies_to_msecs(transport->rto);
4330 pinfo.spinfo_mtu = transport->pathmtu;
4331
4332 if (pinfo.spinfo_state == SCTP_UNKNOWN)
4333 pinfo.spinfo_state = SCTP_ACTIVE;
4334
4335 if (put_user(len, optlen)) {
4336 retval = -EFAULT;
4337 goto out;
4338 }
4339
4340 if (copy_to_user(optval, &pinfo, len)) {
4341 retval = -EFAULT;
4342 goto out;
4343 }
4344
4345 out:
4346 return retval;
4347 }
4348
4349 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
4350 *
4351 * This option is a on/off flag. If enabled no SCTP message
4352 * fragmentation will be performed. Instead if a message being sent
4353 * exceeds the current PMTU size, the message will NOT be sent and
4354 * instead a error will be indicated to the user.
4355 */
4356 static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
4357 char __user *optval, int __user *optlen)
4358 {
4359 int val;
4360
4361 if (len < sizeof(int))
4362 return -EINVAL;
4363
4364 len = sizeof(int);
4365 val = (sctp_sk(sk)->disable_fragments == 1);
4366 if (put_user(len, optlen))
4367 return -EFAULT;
4368 if (copy_to_user(optval, &val, len))
4369 return -EFAULT;
4370 return 0;
4371 }
4372
4373 /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS)
4374 *
4375 * This socket option is used to specify various notifications and
4376 * ancillary data the user wishes to receive.
4377 */
4378 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
4379 int __user *optlen)
4380 {
4381 if (len <= 0)
4382 return -EINVAL;
4383 if (len > sizeof(struct sctp_event_subscribe))
4384 len = sizeof(struct sctp_event_subscribe);
4385 if (put_user(len, optlen))
4386 return -EFAULT;
4387 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
4388 return -EFAULT;
4389 return 0;
4390 }
4391
4392 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
4393 *
4394 * This socket option is applicable to the UDP-style socket only. When
4395 * set it will cause associations that are idle for more than the
4396 * specified number of seconds to automatically close. An association
4397 * being idle is defined an association that has NOT sent or received
4398 * user data. The special value of '0' indicates that no automatic
4399 * close of any associations should be performed. The option expects an
4400 * integer defining the number of seconds of idle time before an
4401 * association is closed.
4402 */
4403 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen)
4404 {
4405 /* Applicable to UDP-style socket only */
4406 if (sctp_style(sk, TCP))
4407 return -EOPNOTSUPP;
4408 if (len < sizeof(int))
4409 return -EINVAL;
4410 len = sizeof(int);
4411 if (put_user(len, optlen))
4412 return -EFAULT;
4413 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
4414 return -EFAULT;
4415 return 0;
4416 }
4417
4418 /* Helper routine to branch off an association to a new socket. */
4419 int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
4420 {
4421 struct sctp_association *asoc = sctp_id2assoc(sk, id);
4422 struct sctp_sock *sp = sctp_sk(sk);
4423 struct socket *sock;
4424 int err = 0;
4425
4426 if (!asoc)
4427 return -EINVAL;
4428
4429 /* An association cannot be branched off from an already peeled-off
4430 * socket, nor is this supported for tcp style sockets.
4431 */
4432 if (!sctp_style(sk, UDP))
4433 return -EINVAL;
4434
4435 /* Create a new socket. */
4436 err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
4437 if (err < 0)
4438 return err;
4439
4440 sctp_copy_sock(sock->sk, sk, asoc);
4441
4442 /* Make peeled-off sockets more like 1-1 accepted sockets.
4443 * Set the daddr and initialize id to something more random
4444 */
4445 sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk);
4446
4447 /* Populate the fields of the newsk from the oldsk and migrate the
4448 * asoc to the newsk.
4449 */
4450 sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH);
4451
4452 *sockp = sock;
4453
4454 return err;
4455 }
4456 EXPORT_SYMBOL(sctp_do_peeloff);
4457
4458 static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen)
4459 {
4460 sctp_peeloff_arg_t peeloff;
4461 struct socket *newsock;
4462 struct file *newfile;
4463 int retval = 0;
4464
4465 if (len < sizeof(sctp_peeloff_arg_t))
4466 return -EINVAL;
4467 len = sizeof(sctp_peeloff_arg_t);
4468 if (copy_from_user(&peeloff, optval, len))
4469 return -EFAULT;
4470
4471 retval = sctp_do_peeloff(sk, peeloff.associd, &newsock);
4472 if (retval < 0)
4473 goto out;
4474
4475 /* Map the socket to an unused fd that can be returned to the user. */
4476 retval = get_unused_fd_flags(0);
4477 if (retval < 0) {
4478 sock_release(newsock);
4479 goto out;
4480 }
4481
4482 newfile = sock_alloc_file(newsock, 0, NULL);
4483 if (unlikely(IS_ERR(newfile))) {
4484 put_unused_fd(retval);
4485 sock_release(newsock);
4486 return PTR_ERR(newfile);
4487 }
4488
4489 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk,
4490 retval);
4491
4492 /* Return the fd mapped to the new socket. */
4493 if (put_user(len, optlen)) {
4494 fput(newfile);
4495 put_unused_fd(retval);
4496 return -EFAULT;
4497 }
4498 peeloff.sd = retval;
4499 if (copy_to_user(optval, &peeloff, len)) {
4500 fput(newfile);
4501 put_unused_fd(retval);
4502 return -EFAULT;
4503 }
4504 fd_install(retval, newfile);
4505 out:
4506 return retval;
4507 }
4508
4509 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
4510 *
4511 * Applications can enable or disable heartbeats for any peer address of
4512 * an association, modify an address's heartbeat interval, force a
4513 * heartbeat to be sent immediately, and adjust the address's maximum
4514 * number of retransmissions sent before an address is considered
4515 * unreachable. The following structure is used to access and modify an
4516 * address's parameters:
4517 *
4518 * struct sctp_paddrparams {
4519 * sctp_assoc_t spp_assoc_id;
4520 * struct sockaddr_storage spp_address;
4521 * uint32_t spp_hbinterval;
4522 * uint16_t spp_pathmaxrxt;
4523 * uint32_t spp_pathmtu;
4524 * uint32_t spp_sackdelay;
4525 * uint32_t spp_flags;
4526 * };
4527 *
4528 * spp_assoc_id - (one-to-many style socket) This is filled in the
4529 * application, and identifies the association for
4530 * this query.
4531 * spp_address - This specifies which address is of interest.
4532 * spp_hbinterval - This contains the value of the heartbeat interval,
4533 * in milliseconds. If a value of zero
4534 * is present in this field then no changes are to
4535 * be made to this parameter.
4536 * spp_pathmaxrxt - This contains the maximum number of
4537 * retransmissions before this address shall be
4538 * considered unreachable. If a value of zero
4539 * is present in this field then no changes are to
4540 * be made to this parameter.
4541 * spp_pathmtu - When Path MTU discovery is disabled the value
4542 * specified here will be the "fixed" path mtu.
4543 * Note that if the spp_address field is empty
4544 * then all associations on this address will
4545 * have this fixed path mtu set upon them.
4546 *
4547 * spp_sackdelay - When delayed sack is enabled, this value specifies
4548 * the number of milliseconds that sacks will be delayed
4549 * for. This value will apply to all addresses of an
4550 * association if the spp_address field is empty. Note
4551 * also, that if delayed sack is enabled and this
4552 * value is set to 0, no change is made to the last
4553 * recorded delayed sack timer value.
4554 *
4555 * spp_flags - These flags are used to control various features
4556 * on an association. The flag field may contain
4557 * zero or more of the following options.
4558 *
4559 * SPP_HB_ENABLE - Enable heartbeats on the
4560 * specified address. Note that if the address
4561 * field is empty all addresses for the association
4562 * have heartbeats enabled upon them.
4563 *
4564 * SPP_HB_DISABLE - Disable heartbeats on the
4565 * speicifed address. Note that if the address
4566 * field is empty all addresses for the association
4567 * will have their heartbeats disabled. Note also
4568 * that SPP_HB_ENABLE and SPP_HB_DISABLE are
4569 * mutually exclusive, only one of these two should
4570 * be specified. Enabling both fields will have
4571 * undetermined results.
4572 *
4573 * SPP_HB_DEMAND - Request a user initiated heartbeat
4574 * to be made immediately.
4575 *
4576 * SPP_PMTUD_ENABLE - This field will enable PMTU
4577 * discovery upon the specified address. Note that
4578 * if the address feild is empty then all addresses
4579 * on the association are effected.
4580 *
4581 * SPP_PMTUD_DISABLE - This field will disable PMTU
4582 * discovery upon the specified address. Note that
4583 * if the address feild is empty then all addresses
4584 * on the association are effected. Not also that
4585 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
4586 * exclusive. Enabling both will have undetermined
4587 * results.
4588 *
4589 * SPP_SACKDELAY_ENABLE - Setting this flag turns
4590 * on delayed sack. The time specified in spp_sackdelay
4591 * is used to specify the sack delay for this address. Note
4592 * that if spp_address is empty then all addresses will
4593 * enable delayed sack and take on the sack delay
4594 * value specified in spp_sackdelay.
4595 * SPP_SACKDELAY_DISABLE - Setting this flag turns
4596 * off delayed sack. If the spp_address field is blank then
4597 * delayed sack is disabled for the entire association. Note
4598 * also that this field is mutually exclusive to
4599 * SPP_SACKDELAY_ENABLE, setting both will have undefined
4600 * results.
4601 */
4602 static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
4603 char __user *optval, int __user *optlen)
4604 {
4605 struct sctp_paddrparams params;
4606 struct sctp_transport *trans = NULL;
4607 struct sctp_association *asoc = NULL;
4608 struct sctp_sock *sp = sctp_sk(sk);
4609
4610 if (len < sizeof(struct sctp_paddrparams))
4611 return -EINVAL;
4612 len = sizeof(struct sctp_paddrparams);
4613 if (copy_from_user(&params, optval, len))
4614 return -EFAULT;
4615
4616 /* If an address other than INADDR_ANY is specified, and
4617 * no transport is found, then the request is invalid.
4618 */
4619 if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) {
4620 trans = sctp_addr_id2transport(sk, &params.spp_address,
4621 params.spp_assoc_id);
4622 if (!trans) {
4623 pr_debug("%s: failed no transport\n", __func__);
4624 return -EINVAL;
4625 }
4626 }
4627
4628 /* Get association, if assoc_id != 0 and the socket is a one
4629 * to many style socket, and an association was not found, then
4630 * the id was invalid.
4631 */
4632 asoc = sctp_id2assoc(sk, params.spp_assoc_id);
4633 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) {
4634 pr_debug("%s: failed no association\n", __func__);
4635 return -EINVAL;
4636 }
4637
4638 if (trans) {
4639 /* Fetch transport values. */
4640 params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval);
4641 params.spp_pathmtu = trans->pathmtu;
4642 params.spp_pathmaxrxt = trans->pathmaxrxt;
4643 params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay);
4644
4645 /*draft-11 doesn't say what to return in spp_flags*/
4646 params.spp_flags = trans->param_flags;
4647 } else if (asoc) {
4648 /* Fetch association values. */
4649 params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval);
4650 params.spp_pathmtu = asoc->pathmtu;
4651 params.spp_pathmaxrxt = asoc->pathmaxrxt;
4652 params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay);
4653
4654 /*draft-11 doesn't say what to return in spp_flags*/
4655 params.spp_flags = asoc->param_flags;
4656 } else {
4657 /* Fetch socket values. */
4658 params.spp_hbinterval = sp->hbinterval;
4659 params.spp_pathmtu = sp->pathmtu;
4660 params.spp_sackdelay = sp->sackdelay;
4661 params.spp_pathmaxrxt = sp->pathmaxrxt;
4662
4663 /*draft-11 doesn't say what to return in spp_flags*/
4664 params.spp_flags = sp->param_flags;
4665 }
4666
4667 if (copy_to_user(optval, &params, len))
4668 return -EFAULT;
4669
4670 if (put_user(len, optlen))
4671 return -EFAULT;
4672
4673 return 0;
4674 }
4675
4676 /*
4677 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
4678 *
4679 * This option will effect the way delayed acks are performed. This
4680 * option allows you to get or set the delayed ack time, in
4681 * milliseconds. It also allows changing the delayed ack frequency.
4682 * Changing the frequency to 1 disables the delayed sack algorithm. If
4683 * the assoc_id is 0, then this sets or gets the endpoints default
4684 * values. If the assoc_id field is non-zero, then the set or get
4685 * effects the specified association for the one to many model (the
4686 * assoc_id field is ignored by the one to one model). Note that if
4687 * sack_delay or sack_freq are 0 when setting this option, then the
4688 * current values will remain unchanged.
4689 *
4690 * struct sctp_sack_info {
4691 * sctp_assoc_t sack_assoc_id;
4692 * uint32_t sack_delay;
4693 * uint32_t sack_freq;
4694 * };
4695 *
4696 * sack_assoc_id - This parameter, indicates which association the user
4697 * is performing an action upon. Note that if this field's value is
4698 * zero then the endpoints default value is changed (effecting future
4699 * associations only).
4700 *
4701 * sack_delay - This parameter contains the number of milliseconds that
4702 * the user is requesting the delayed ACK timer be set to. Note that
4703 * this value is defined in the standard to be between 200 and 500
4704 * milliseconds.
4705 *
4706 * sack_freq - This parameter contains the number of packets that must
4707 * be received before a sack is sent without waiting for the delay
4708 * timer to expire. The default value for this is 2, setting this
4709 * value to 1 will disable the delayed sack algorithm.
4710 */
4711 static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
4712 char __user *optval,
4713 int __user *optlen)
4714 {
4715 struct sctp_sack_info params;
4716 struct sctp_association *asoc = NULL;
4717 struct sctp_sock *sp = sctp_sk(sk);
4718
4719 if (len >= sizeof(struct sctp_sack_info)) {
4720 len = sizeof(struct sctp_sack_info);
4721
4722 if (copy_from_user(&params, optval, len))
4723 return -EFAULT;
4724 } else if (len == sizeof(struct sctp_assoc_value)) {
4725 pr_warn_ratelimited(DEPRECATED
4726 "%s (pid %d) "
4727 "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
4728 "Use struct sctp_sack_info instead\n",
4729 current->comm, task_pid_nr(current));
4730 if (copy_from_user(&params, optval, len))
4731 return -EFAULT;
4732 } else
4733 return -EINVAL;
4734
4735 /* Get association, if sack_assoc_id != 0 and the socket is a one
4736 * to many style socket, and an association was not found, then
4737 * the id was invalid.
4738 */
4739 asoc = sctp_id2assoc(sk, params.sack_assoc_id);
4740 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
4741 return -EINVAL;
4742
4743 if (asoc) {
4744 /* Fetch association values. */
4745 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) {
4746 params.sack_delay = jiffies_to_msecs(
4747 asoc->sackdelay);
4748 params.sack_freq = asoc->sackfreq;
4749
4750 } else {
4751 params.sack_delay = 0;
4752 params.sack_freq = 1;
4753 }
4754 } else {
4755 /* Fetch socket values. */
4756 if (sp->param_flags & SPP_SACKDELAY_ENABLE) {
4757 params.sack_delay = sp->sackdelay;
4758 params.sack_freq = sp->sackfreq;
4759 } else {
4760 params.sack_delay = 0;
4761 params.sack_freq = 1;
4762 }
4763 }
4764
4765 if (copy_to_user(optval, &params, len))
4766 return -EFAULT;
4767
4768 if (put_user(len, optlen))
4769 return -EFAULT;
4770
4771 return 0;
4772 }
4773
4774 /* 7.1.3 Initialization Parameters (SCTP_INITMSG)
4775 *
4776 * Applications can specify protocol parameters for the default association
4777 * initialization. The option name argument to setsockopt() and getsockopt()
4778 * is SCTP_INITMSG.
4779 *
4780 * Setting initialization parameters is effective only on an unconnected
4781 * socket (for UDP-style sockets only future associations are effected
4782 * by the change). With TCP-style sockets, this option is inherited by
4783 * sockets derived from a listener socket.
4784 */
4785 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
4786 {
4787 if (len < sizeof(struct sctp_initmsg))
4788 return -EINVAL;
4789 len = sizeof(struct sctp_initmsg);
4790 if (put_user(len, optlen))
4791 return -EFAULT;
4792 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
4793 return -EFAULT;
4794 return 0;
4795 }
4796
4797
4798 static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
4799 char __user *optval, int __user *optlen)
4800 {
4801 struct sctp_association *asoc;
4802 int cnt = 0;
4803 struct sctp_getaddrs getaddrs;
4804 struct sctp_transport *from;
4805 void __user *to;
4806 union sctp_addr temp;
4807 struct sctp_sock *sp = sctp_sk(sk);
4808 int addrlen;
4809 size_t space_left;
4810 int bytes_copied;
4811
4812 if (len < sizeof(struct sctp_getaddrs))
4813 return -EINVAL;
4814
4815 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
4816 return -EFAULT;
4817
4818 /* For UDP-style sockets, id specifies the association to query. */
4819 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
4820 if (!asoc)
4821 return -EINVAL;
4822
4823 to = optval + offsetof(struct sctp_getaddrs, addrs);
4824 space_left = len - offsetof(struct sctp_getaddrs, addrs);
4825
4826 list_for_each_entry(from, &asoc->peer.transport_addr_list,
4827 transports) {
4828 memcpy(&temp, &from->ipaddr, sizeof(temp));
4829 addrlen = sctp_get_pf_specific(sk->sk_family)
4830 ->addr_to_user(sp, &temp);
4831 if (space_left < addrlen)
4832 return -ENOMEM;
4833 if (copy_to_user(to, &temp, addrlen))
4834 return -EFAULT;
4835 to += addrlen;
4836 cnt++;
4837 space_left -= addrlen;
4838 }
4839
4840 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num))
4841 return -EFAULT;
4842 bytes_copied = ((char __user *)to) - optval;
4843 if (put_user(bytes_copied, optlen))
4844 return -EFAULT;
4845
4846 return 0;
4847 }
4848
4849 static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
4850 size_t space_left, int *bytes_copied)
4851 {
4852 struct sctp_sockaddr_entry *addr;
4853 union sctp_addr temp;
4854 int cnt = 0;
4855 int addrlen;
4856 struct net *net = sock_net(sk);
4857
4858 rcu_read_lock();
4859 list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
4860 if (!addr->valid)
4861 continue;
4862
4863 if ((PF_INET == sk->sk_family) &&
4864 (AF_INET6 == addr->a.sa.sa_family))
4865 continue;
4866 if ((PF_INET6 == sk->sk_family) &&
4867 inet_v6_ipv6only(sk) &&
4868 (AF_INET == addr->a.sa.sa_family))
4869 continue;
4870 memcpy(&temp, &addr->a, sizeof(temp));
4871 if (!temp.v4.sin_port)
4872 temp.v4.sin_port = htons(port);
4873
4874 addrlen = sctp_get_pf_specific(sk->sk_family)
4875 ->addr_to_user(sctp_sk(sk), &temp);
4876
4877 if (space_left < addrlen) {
4878 cnt = -ENOMEM;
4879 break;
4880 }
4881 memcpy(to, &temp, addrlen);
4882
4883 to += addrlen;
4884 cnt++;
4885 space_left -= addrlen;
4886 *bytes_copied += addrlen;
4887 }
4888 rcu_read_unlock();
4889
4890 return cnt;
4891 }
4892
4893
4894 static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4895 char __user *optval, int __user *optlen)
4896 {
4897 struct sctp_bind_addr *bp;
4898 struct sctp_association *asoc;
4899 int cnt = 0;
4900 struct sctp_getaddrs getaddrs;
4901 struct sctp_sockaddr_entry *addr;
4902 void __user *to;
4903 union sctp_addr temp;
4904 struct sctp_sock *sp = sctp_sk(sk);
4905 int addrlen;
4906 int err = 0;
4907 size_t space_left;
4908 int bytes_copied = 0;
4909 void *addrs;
4910 void *buf;
4911
4912 if (len < sizeof(struct sctp_getaddrs))
4913 return -EINVAL;
4914
4915 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
4916 return -EFAULT;
4917
4918 /*
4919 * For UDP-style sockets, id specifies the association to query.
4920 * If the id field is set to the value '0' then the locally bound
4921 * addresses are returned without regard to any particular
4922 * association.
4923 */
4924 if (0 == getaddrs.assoc_id) {
4925 bp = &sctp_sk(sk)->ep->base.bind_addr;
4926 } else {
4927 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
4928 if (!asoc)
4929 return -EINVAL;
4930 bp = &asoc->base.bind_addr;
4931 }
4932
4933 to = optval + offsetof(struct sctp_getaddrs, addrs);
4934 space_left = len - offsetof(struct sctp_getaddrs, addrs);
4935
4936 addrs = kmalloc(space_left, GFP_KERNEL);
4937 if (!addrs)
4938 return -ENOMEM;
4939
4940 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
4941 * addresses from the global local address list.
4942 */
4943 if (sctp_list_single_entry(&bp->address_list)) {
4944 addr = list_entry(bp->address_list.next,
4945 struct sctp_sockaddr_entry, list);
4946 if (sctp_is_any(sk, &addr->a)) {
4947 cnt = sctp_copy_laddrs(sk, bp->port, addrs,
4948 space_left, &bytes_copied);
4949 if (cnt < 0) {
4950 err = cnt;
4951 goto out;
4952 }
4953 goto copy_getaddrs;
4954 }
4955 }
4956
4957 buf = addrs;
4958 /* Protection on the bound address list is not needed since
4959 * in the socket option context we hold a socket lock and
4960 * thus the bound address list can't change.
4961 */
4962 list_for_each_entry(addr, &bp->address_list, list) {
4963 memcpy(&temp, &addr->a, sizeof(temp));
4964 addrlen = sctp_get_pf_specific(sk->sk_family)
4965 ->addr_to_user(sp, &temp);
4966 if (space_left < addrlen) {
4967 err = -ENOMEM; /*fixme: right error?*/
4968 goto out;
4969 }
4970 memcpy(buf, &temp, addrlen);
4971 buf += addrlen;
4972 bytes_copied += addrlen;
4973 cnt++;
4974 space_left -= addrlen;
4975 }
4976
4977 copy_getaddrs:
4978 if (copy_to_user(to, addrs, bytes_copied)) {
4979 err = -EFAULT;
4980 goto out;
4981 }
4982 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) {
4983 err = -EFAULT;
4984 goto out;
4985 }
4986 if (put_user(bytes_copied, optlen))
4987 err = -EFAULT;
4988 out:
4989 kfree(addrs);
4990 return err;
4991 }
4992
4993 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
4994 *
4995 * Requests that the local SCTP stack use the enclosed peer address as
4996 * the association primary. The enclosed address must be one of the
4997 * association peer's addresses.
4998 */
4999 static int sctp_getsockopt_primary_addr(struct sock *sk, int len,
5000 char __user *optval, int __user *optlen)
5001 {
5002 struct sctp_prim prim;
5003 struct sctp_association *asoc;
5004 struct sctp_sock *sp = sctp_sk(sk);
5005
5006 if (len < sizeof(struct sctp_prim))
5007 return -EINVAL;
5008
5009 len = sizeof(struct sctp_prim);
5010
5011 if (copy_from_user(&prim, optval, len))
5012 return -EFAULT;
5013
5014 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id);
5015 if (!asoc)
5016 return -EINVAL;
5017
5018 if (!asoc->peer.primary_path)
5019 return -ENOTCONN;
5020
5021 memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr,
5022 asoc->peer.primary_path->af_specific->sockaddr_len);
5023
5024 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp,
5025 (union sctp_addr *)&prim.ssp_addr);
5026
5027 if (put_user(len, optlen))
5028 return -EFAULT;
5029 if (copy_to_user(optval, &prim, len))
5030 return -EFAULT;
5031
5032 return 0;
5033 }
5034
5035 /*
5036 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER)
5037 *
5038 * Requests that the local endpoint set the specified Adaptation Layer
5039 * Indication parameter for all future INIT and INIT-ACK exchanges.
5040 */
5041 static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len,
5042 char __user *optval, int __user *optlen)
5043 {
5044 struct sctp_setadaptation adaptation;
5045
5046 if (len < sizeof(struct sctp_setadaptation))
5047 return -EINVAL;
5048
5049 len = sizeof(struct sctp_setadaptation);
5050
5051 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind;
5052
5053 if (put_user(len, optlen))
5054 return -EFAULT;
5055 if (copy_to_user(optval, &adaptation, len))
5056 return -EFAULT;
5057
5058 return 0;
5059 }
5060
5061 /*
5062 *
5063 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
5064 *
5065 * Applications that wish to use the sendto() system call may wish to
5066 * specify a default set of parameters that would normally be supplied
5067 * through the inclusion of ancillary data. This socket option allows
5068 * such an application to set the default sctp_sndrcvinfo structure.
5069
5070
5071 * The application that wishes to use this socket option simply passes
5072 * in to this call the sctp_sndrcvinfo structure defined in Section
5073 * 5.2.2) The input parameters accepted by this call include
5074 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
5075 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
5076 * to this call if the caller is using the UDP model.
5077 *
5078 * For getsockopt, it get the default sctp_sndrcvinfo structure.
5079 */
5080 static int sctp_getsockopt_default_send_param(struct sock *sk,
5081 int len, char __user *optval,
5082 int __user *optlen)
5083 {
5084 struct sctp_sock *sp = sctp_sk(sk);
5085 struct sctp_association *asoc;
5086 struct sctp_sndrcvinfo info;
5087
5088 if (len < sizeof(info))
5089 return -EINVAL;
5090
5091 len = sizeof(info);
5092
5093 if (copy_from_user(&info, optval, len))
5094 return -EFAULT;
5095
5096 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
5097 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP))
5098 return -EINVAL;
5099 if (asoc) {
5100 info.sinfo_stream = asoc->default_stream;
5101 info.sinfo_flags = asoc->default_flags;
5102 info.sinfo_ppid = asoc->default_ppid;
5103 info.sinfo_context = asoc->default_context;
5104 info.sinfo_timetolive = asoc->default_timetolive;
5105 } else {
5106 info.sinfo_stream = sp->default_stream;
5107 info.sinfo_flags = sp->default_flags;
5108 info.sinfo_ppid = sp->default_ppid;
5109 info.sinfo_context = sp->default_context;
5110 info.sinfo_timetolive = sp->default_timetolive;
5111 }
5112
5113 if (put_user(len, optlen))
5114 return -EFAULT;
5115 if (copy_to_user(optval, &info, len))
5116 return -EFAULT;
5117
5118 return 0;
5119 }
5120
5121 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters
5122 * (SCTP_DEFAULT_SNDINFO)
5123 */
5124 static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len,
5125 char __user *optval,
5126 int __user *optlen)
5127 {
5128 struct sctp_sock *sp = sctp_sk(sk);
5129 struct sctp_association *asoc;
5130 struct sctp_sndinfo info;
5131
5132 if (len < sizeof(info))
5133 return -EINVAL;
5134
5135 len = sizeof(info);
5136
5137 if (copy_from_user(&info, optval, len))
5138 return -EFAULT;
5139
5140 asoc = sctp_id2assoc(sk, info.snd_assoc_id);
5141 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP))
5142 return -EINVAL;
5143 if (asoc) {
5144 info.snd_sid = asoc->default_stream;
5145 info.snd_flags = asoc->default_flags;
5146 info.snd_ppid = asoc->default_ppid;
5147 info.snd_context = asoc->default_context;
5148 } else {
5149 info.snd_sid = sp->default_stream;
5150 info.snd_flags = sp->default_flags;
5151 info.snd_ppid = sp->default_ppid;
5152 info.snd_context = sp->default_context;
5153 }
5154
5155 if (put_user(len, optlen))
5156 return -EFAULT;
5157 if (copy_to_user(optval, &info, len))
5158 return -EFAULT;
5159
5160 return 0;
5161 }
5162
5163 /*
5164 *
5165 * 7.1.5 SCTP_NODELAY
5166 *
5167 * Turn on/off any Nagle-like algorithm. This means that packets are
5168 * generally sent as soon as possible and no unnecessary delays are
5169 * introduced, at the cost of more packets in the network. Expects an
5170 * integer boolean flag.
5171 */
5172
5173 static int sctp_getsockopt_nodelay(struct sock *sk, int len,
5174 char __user *optval, int __user *optlen)
5175 {
5176 int val;
5177
5178 if (len < sizeof(int))
5179 return -EINVAL;
5180
5181 len = sizeof(int);
5182 val = (sctp_sk(sk)->nodelay == 1);
5183 if (put_user(len, optlen))
5184 return -EFAULT;
5185 if (copy_to_user(optval, &val, len))
5186 return -EFAULT;
5187 return 0;
5188 }
5189
5190 /*
5191 *
5192 * 7.1.1 SCTP_RTOINFO
5193 *
5194 * The protocol parameters used to initialize and bound retransmission
5195 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
5196 * and modify these parameters.
5197 * All parameters are time values, in milliseconds. A value of 0, when
5198 * modifying the parameters, indicates that the current value should not
5199 * be changed.
5200 *
5201 */
5202 static int sctp_getsockopt_rtoinfo(struct sock *sk, int len,
5203 char __user *optval,
5204 int __user *optlen) {
5205 struct sctp_rtoinfo rtoinfo;
5206 struct sctp_association *asoc;
5207
5208 if (len < sizeof (struct sctp_rtoinfo))
5209 return -EINVAL;
5210
5211 len = sizeof(struct sctp_rtoinfo);
5212
5213 if (copy_from_user(&rtoinfo, optval, len))
5214 return -EFAULT;
5215
5216 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
5217
5218 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
5219 return -EINVAL;
5220
5221 /* Values corresponding to the specific association. */
5222 if (asoc) {
5223 rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial);
5224 rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max);
5225 rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min);
5226 } else {
5227 /* Values corresponding to the endpoint. */
5228 struct sctp_sock *sp = sctp_sk(sk);
5229
5230 rtoinfo.srto_initial = sp->rtoinfo.srto_initial;
5231 rtoinfo.srto_max = sp->rtoinfo.srto_max;
5232 rtoinfo.srto_min = sp->rtoinfo.srto_min;
5233 }
5234
5235 if (put_user(len, optlen))
5236 return -EFAULT;
5237
5238 if (copy_to_user(optval, &rtoinfo, len))
5239 return -EFAULT;
5240
5241 return 0;
5242 }
5243
5244 /*
5245 *
5246 * 7.1.2 SCTP_ASSOCINFO
5247 *
5248 * This option is used to tune the maximum retransmission attempts
5249 * of the association.
5250 * Returns an error if the new association retransmission value is
5251 * greater than the sum of the retransmission value of the peer.
5252 * See [SCTP] for more information.
5253 *
5254 */
5255 static int sctp_getsockopt_associnfo(struct sock *sk, int len,
5256 char __user *optval,
5257 int __user *optlen)
5258 {
5259
5260 struct sctp_assocparams assocparams;
5261 struct sctp_association *asoc;
5262 struct list_head *pos;
5263 int cnt = 0;
5264
5265 if (len < sizeof (struct sctp_assocparams))
5266 return -EINVAL;
5267
5268 len = sizeof(struct sctp_assocparams);
5269
5270 if (copy_from_user(&assocparams, optval, len))
5271 return -EFAULT;
5272
5273 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
5274
5275 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP))
5276 return -EINVAL;
5277
5278 /* Values correspoinding to the specific association */
5279 if (asoc) {
5280 assocparams.sasoc_asocmaxrxt = asoc->max_retrans;
5281 assocparams.sasoc_peer_rwnd = asoc->peer.rwnd;
5282 assocparams.sasoc_local_rwnd = asoc->a_rwnd;
5283 assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life);
5284
5285 list_for_each(pos, &asoc->peer.transport_addr_list) {
5286 cnt++;
5287 }
5288
5289 assocparams.sasoc_number_peer_destinations = cnt;
5290 } else {
5291 /* Values corresponding to the endpoint */
5292 struct sctp_sock *sp = sctp_sk(sk);
5293
5294 assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt;
5295 assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd;
5296 assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd;
5297 assocparams.sasoc_cookie_life =
5298 sp->assocparams.sasoc_cookie_life;
5299 assocparams.sasoc_number_peer_destinations =
5300 sp->assocparams.
5301 sasoc_number_peer_destinations;
5302 }
5303
5304 if (put_user(len, optlen))
5305 return -EFAULT;
5306
5307 if (copy_to_user(optval, &assocparams, len))
5308 return -EFAULT;
5309
5310 return 0;
5311 }
5312
5313 /*
5314 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
5315 *
5316 * This socket option is a boolean flag which turns on or off mapped V4
5317 * addresses. If this option is turned on and the socket is type
5318 * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
5319 * If this option is turned off, then no mapping will be done of V4
5320 * addresses and a user will receive both PF_INET6 and PF_INET type
5321 * addresses on the socket.
5322 */
5323 static int sctp_getsockopt_mappedv4(struct sock *sk, int len,
5324 char __user *optval, int __user *optlen)
5325 {
5326 int val;
5327 struct sctp_sock *sp = sctp_sk(sk);
5328
5329 if (len < sizeof(int))
5330 return -EINVAL;
5331
5332 len = sizeof(int);
5333 val = sp->v4mapped;
5334 if (put_user(len, optlen))
5335 return -EFAULT;
5336 if (copy_to_user(optval, &val, len))
5337 return -EFAULT;
5338
5339 return 0;
5340 }
5341
5342 /*
5343 * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
5344 * (chapter and verse is quoted at sctp_setsockopt_context())
5345 */
5346 static int sctp_getsockopt_context(struct sock *sk, int len,
5347 char __user *optval, int __user *optlen)
5348 {
5349 struct sctp_assoc_value params;
5350 struct sctp_sock *sp;
5351 struct sctp_association *asoc;
5352
5353 if (len < sizeof(struct sctp_assoc_value))
5354 return -EINVAL;
5355
5356 len = sizeof(struct sctp_assoc_value);
5357
5358 if (copy_from_user(&params, optval, len))
5359 return -EFAULT;
5360
5361 sp = sctp_sk(sk);
5362
5363 if (params.assoc_id != 0) {
5364 asoc = sctp_id2assoc(sk, params.assoc_id);
5365 if (!asoc)
5366 return -EINVAL;
5367 params.assoc_value = asoc->default_rcv_context;
5368 } else {
5369 params.assoc_value = sp->default_rcv_context;
5370 }
5371
5372 if (put_user(len, optlen))
5373 return -EFAULT;
5374 if (copy_to_user(optval, &params, len))
5375 return -EFAULT;
5376
5377 return 0;
5378 }
5379
5380 /*
5381 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
5382 * This option will get or set the maximum size to put in any outgoing
5383 * SCTP DATA chunk. If a message is larger than this size it will be
5384 * fragmented by SCTP into the specified size. Note that the underlying
5385 * SCTP implementation may fragment into smaller sized chunks when the
5386 * PMTU of the underlying association is smaller than the value set by
5387 * the user. The default value for this option is '0' which indicates
5388 * the user is NOT limiting fragmentation and only the PMTU will effect
5389 * SCTP's choice of DATA chunk size. Note also that values set larger
5390 * than the maximum size of an IP datagram will effectively let SCTP
5391 * control fragmentation (i.e. the same as setting this option to 0).
5392 *
5393 * The following structure is used to access and modify this parameter:
5394 *
5395 * struct sctp_assoc_value {
5396 * sctp_assoc_t assoc_id;
5397 * uint32_t assoc_value;
5398 * };
5399 *
5400 * assoc_id: This parameter is ignored for one-to-one style sockets.
5401 * For one-to-many style sockets this parameter indicates which
5402 * association the user is performing an action upon. Note that if
5403 * this field's value is zero then the endpoints default value is
5404 * changed (effecting future associations only).
5405 * assoc_value: This parameter specifies the maximum size in bytes.
5406 */
5407 static int sctp_getsockopt_maxseg(struct sock *sk, int len,
5408 char __user *optval, int __user *optlen)
5409 {
5410 struct sctp_assoc_value params;
5411 struct sctp_association *asoc;
5412
5413 if (len == sizeof(int)) {
5414 pr_warn_ratelimited(DEPRECATED
5415 "%s (pid %d) "
5416 "Use of int in maxseg socket option.\n"
5417 "Use struct sctp_assoc_value instead\n",
5418 current->comm, task_pid_nr(current));
5419 params.assoc_id = 0;
5420 } else if (len >= sizeof(struct sctp_assoc_value)) {
5421 len = sizeof(struct sctp_assoc_value);
5422 if (copy_from_user(&params, optval, sizeof(params)))
5423 return -EFAULT;
5424 } else
5425 return -EINVAL;
5426
5427 asoc = sctp_id2assoc(sk, params.assoc_id);
5428 if (!asoc && params.assoc_id && sctp_style(sk, UDP))
5429 return -EINVAL;
5430
5431 if (asoc)
5432 params.assoc_value = asoc->frag_point;
5433 else
5434 params.assoc_value = sctp_sk(sk)->user_frag;
5435
5436 if (put_user(len, optlen))
5437 return -EFAULT;
5438 if (len == sizeof(int)) {
5439 if (copy_to_user(optval, &params.assoc_value, len))
5440 return -EFAULT;
5441 } else {
5442 if (copy_to_user(optval, &params, len))
5443 return -EFAULT;
5444 }
5445
5446 return 0;
5447 }
5448
5449 /*
5450 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
5451 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave())
5452 */
5453 static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len,
5454 char __user *optval, int __user *optlen)
5455 {
5456 int val;
5457
5458 if (len < sizeof(int))
5459 return -EINVAL;
5460
5461 len = sizeof(int);
5462
5463 val = sctp_sk(sk)->frag_interleave;
5464 if (put_user(len, optlen))
5465 return -EFAULT;
5466 if (copy_to_user(optval, &val, len))
5467 return -EFAULT;
5468
5469 return 0;
5470 }
5471
5472 /*
5473 * 7.1.25. Set or Get the sctp partial delivery point
5474 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point())
5475 */
5476 static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len,
5477 char __user *optval,
5478 int __user *optlen)
5479 {
5480 u32 val;
5481
5482 if (len < sizeof(u32))
5483 return -EINVAL;
5484
5485 len = sizeof(u32);
5486
5487 val = sctp_sk(sk)->pd_point;
5488 if (put_user(len, optlen))
5489 return -EFAULT;
5490 if (copy_to_user(optval, &val, len))
5491 return -EFAULT;
5492
5493 return 0;
5494 }
5495
5496 /*
5497 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
5498 * (chapter and verse is quoted at sctp_setsockopt_maxburst())
5499 */
5500 static int sctp_getsockopt_maxburst(struct sock *sk, int len,
5501 char __user *optval,
5502 int __user *optlen)
5503 {
5504 struct sctp_assoc_value params;
5505 struct sctp_sock *sp;
5506 struct sctp_association *asoc;
5507
5508 if (len == sizeof(int)) {
5509 pr_warn_ratelimited(DEPRECATED
5510 "%s (pid %d) "
5511 "Use of int in max_burst socket option.\n"
5512 "Use struct sctp_assoc_value instead\n",
5513 current->comm, task_pid_nr(current));
5514 params.assoc_id = 0;
5515 } else if (len >= sizeof(struct sctp_assoc_value)) {
5516 len = sizeof(struct sctp_assoc_value);
5517 if (copy_from_user(&params, optval, len))
5518 return -EFAULT;
5519 } else
5520 return -EINVAL;
5521
5522 sp = sctp_sk(sk);
5523
5524 if (params.assoc_id != 0) {
5525 asoc = sctp_id2assoc(sk, params.assoc_id);
5526 if (!asoc)
5527 return -EINVAL;
5528 params.assoc_value = asoc->max_burst;
5529 } else
5530 params.assoc_value = sp->max_burst;
5531
5532 if (len == sizeof(int)) {
5533 if (copy_to_user(optval, &params.assoc_value, len))
5534 return -EFAULT;
5535 } else {
5536 if (copy_to_user(optval, &params, len))
5537 return -EFAULT;
5538 }
5539
5540 return 0;
5541
5542 }
5543
5544 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
5545 char __user *optval, int __user *optlen)
5546 {
5547 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
5548 struct sctp_hmacalgo __user *p = (void __user *)optval;
5549 struct sctp_hmac_algo_param *hmacs;
5550 __u16 data_len = 0;
5551 u32 num_idents;
5552
5553 if (!ep->auth_enable)
5554 return -EACCES;
5555
5556 hmacs = ep->auth_hmacs_list;
5557 data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t);
5558
5559 if (len < sizeof(struct sctp_hmacalgo) + data_len)
5560 return -EINVAL;
5561
5562 len = sizeof(struct sctp_hmacalgo) + data_len;
5563 num_idents = data_len / sizeof(u16);
5564
5565 if (put_user(len, optlen))
5566 return -EFAULT;
5567 if (put_user(num_idents, &p->shmac_num_idents))
5568 return -EFAULT;
5569 if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
5570 return -EFAULT;
5571 return 0;
5572 }
5573
5574 static int sctp_getsockopt_active_key(struct sock *sk, int len,
5575 char __user *optval, int __user *optlen)
5576 {
5577 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
5578 struct sctp_authkeyid val;
5579 struct sctp_association *asoc;
5580
5581 if (!ep->auth_enable)
5582 return -EACCES;
5583
5584 if (len < sizeof(struct sctp_authkeyid))
5585 return -EINVAL;
5586 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid)))
5587 return -EFAULT;
5588
5589 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
5590 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
5591 return -EINVAL;
5592
5593 if (asoc)
5594 val.scact_keynumber = asoc->active_key_id;
5595 else
5596 val.scact_keynumber = ep->active_key_id;
5597
5598 len = sizeof(struct sctp_authkeyid);
5599 if (put_user(len, optlen))
5600 return -EFAULT;
5601 if (copy_to_user(optval, &val, len))
5602 return -EFAULT;
5603
5604 return 0;
5605 }
5606
5607 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
5608 char __user *optval, int __user *optlen)
5609 {
5610 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
5611 struct sctp_authchunks __user *p = (void __user *)optval;
5612 struct sctp_authchunks val;
5613 struct sctp_association *asoc;
5614 struct sctp_chunks_param *ch;
5615 u32 num_chunks = 0;
5616 char __user *to;
5617
5618 if (!ep->auth_enable)
5619 return -EACCES;
5620
5621 if (len < sizeof(struct sctp_authchunks))
5622 return -EINVAL;
5623
5624 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
5625 return -EFAULT;
5626
5627 to = p->gauth_chunks;
5628 asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
5629 if (!asoc)
5630 return -EINVAL;
5631
5632 ch = asoc->peer.peer_chunks;
5633 if (!ch)
5634 goto num;
5635
5636 /* See if the user provided enough room for all the data */
5637 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
5638 if (len < num_chunks)
5639 return -EINVAL;
5640
5641 if (copy_to_user(to, ch->chunks, num_chunks))
5642 return -EFAULT;
5643 num:
5644 len = sizeof(struct sctp_authchunks) + num_chunks;
5645 if (put_user(len, optlen))
5646 return -EFAULT;
5647 if (put_user(num_chunks, &p->gauth_number_of_chunks))
5648 return -EFAULT;
5649 return 0;
5650 }
5651
5652 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
5653 char __user *optval, int __user *optlen)
5654 {
5655 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
5656 struct sctp_authchunks __user *p = (void __user *)optval;
5657 struct sctp_authchunks val;
5658 struct sctp_association *asoc;
5659 struct sctp_chunks_param *ch;
5660 u32 num_chunks = 0;
5661 char __user *to;
5662
5663 if (!ep->auth_enable)
5664 return -EACCES;
5665
5666 if (len < sizeof(struct sctp_authchunks))
5667 return -EINVAL;
5668
5669 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
5670 return -EFAULT;
5671
5672 to = p->gauth_chunks;
5673 asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
5674 if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP))
5675 return -EINVAL;
5676
5677 if (asoc)
5678 ch = (struct sctp_chunks_param *)asoc->c.auth_chunks;
5679 else
5680 ch = ep->auth_chunk_list;
5681
5682 if (!ch)
5683 goto num;
5684
5685 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
5686 if (len < sizeof(struct sctp_authchunks) + num_chunks)
5687 return -EINVAL;
5688
5689 if (copy_to_user(to, ch->chunks, num_chunks))
5690 return -EFAULT;
5691 num:
5692 len = sizeof(struct sctp_authchunks) + num_chunks;
5693 if (put_user(len, optlen))
5694 return -EFAULT;
5695 if (put_user(num_chunks, &p->gauth_number_of_chunks))
5696 return -EFAULT;
5697
5698 return 0;
5699 }
5700
5701 /*
5702 * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER)
5703 * This option gets the current number of associations that are attached
5704 * to a one-to-many style socket. The option value is an uint32_t.
5705 */
5706 static int sctp_getsockopt_assoc_number(struct sock *sk, int len,
5707 char __user *optval, int __user *optlen)
5708 {
5709 struct sctp_sock *sp = sctp_sk(sk);
5710 struct sctp_association *asoc;
5711 u32 val = 0;
5712
5713 if (sctp_style(sk, TCP))
5714 return -EOPNOTSUPP;
5715
5716 if (len < sizeof(u32))
5717 return -EINVAL;
5718
5719 len = sizeof(u32);
5720
5721 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
5722 val++;
5723 }
5724
5725 if (put_user(len, optlen))
5726 return -EFAULT;
5727 if (copy_to_user(optval, &val, len))
5728 return -EFAULT;
5729
5730 return 0;
5731 }
5732
5733 /*
5734 * 8.1.23 SCTP_AUTO_ASCONF
5735 * See the corresponding setsockopt entry as description
5736 */
5737 static int sctp_getsockopt_auto_asconf(struct sock *sk, int len,
5738 char __user *optval, int __user *optlen)
5739 {
5740 int val = 0;
5741
5742 if (len < sizeof(int))
5743 return -EINVAL;
5744
5745 len = sizeof(int);
5746 if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk))
5747 val = 1;
5748 if (put_user(len, optlen))
5749 return -EFAULT;
5750 if (copy_to_user(optval, &val, len))
5751 return -EFAULT;
5752 return 0;
5753 }
5754
5755 /*
5756 * 8.2.6. Get the Current Identifiers of Associations
5757 * (SCTP_GET_ASSOC_ID_LIST)
5758 *
5759 * This option gets the current list of SCTP association identifiers of
5760 * the SCTP associations handled by a one-to-many style socket.
5761 */
5762 static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
5763 char __user *optval, int __user *optlen)
5764 {
5765 struct sctp_sock *sp = sctp_sk(sk);
5766 struct sctp_association *asoc;
5767 struct sctp_assoc_ids *ids;
5768 u32 num = 0;
5769
5770 if (sctp_style(sk, TCP))
5771 return -EOPNOTSUPP;
5772
5773 if (len < sizeof(struct sctp_assoc_ids))
5774 return -EINVAL;
5775
5776 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
5777 num++;
5778 }
5779
5780 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
5781 return -EINVAL;
5782
5783 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
5784
5785 ids = kmalloc(len, GFP_KERNEL);
5786 if (unlikely(!ids))
5787 return -ENOMEM;
5788
5789 ids->gaids_number_of_ids = num;
5790 num = 0;
5791 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
5792 ids->gaids_assoc_id[num++] = asoc->assoc_id;
5793 }
5794
5795 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) {
5796 kfree(ids);
5797 return -EFAULT;
5798 }
5799
5800 kfree(ids);
5801 return 0;
5802 }
5803
5804 /*
5805 * SCTP_PEER_ADDR_THLDS
5806 *
5807 * This option allows us to fetch the partially failed threshold for one or all
5808 * transports in an association. See Section 6.1 of:
5809 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
5810 */
5811 static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
5812 char __user *optval,
5813 int len,
5814 int __user *optlen)
5815 {
5816 struct sctp_paddrthlds val;
5817 struct sctp_transport *trans;
5818 struct sctp_association *asoc;
5819
5820 if (len < sizeof(struct sctp_paddrthlds))
5821 return -EINVAL;
5822 len = sizeof(struct sctp_paddrthlds);
5823 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len))
5824 return -EFAULT;
5825
5826 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
5827 asoc = sctp_id2assoc(sk, val.spt_assoc_id);
5828 if (!asoc)
5829 return -ENOENT;
5830
5831 val.spt_pathpfthld = asoc->pf_retrans;
5832 val.spt_pathmaxrxt = asoc->pathmaxrxt;
5833 } else {
5834 trans = sctp_addr_id2transport(sk, &val.spt_address,
5835 val.spt_assoc_id);
5836 if (!trans)
5837 return -ENOENT;
5838
5839 val.spt_pathmaxrxt = trans->pathmaxrxt;
5840 val.spt_pathpfthld = trans->pf_retrans;
5841 }
5842
5843 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
5844 return -EFAULT;
5845
5846 return 0;
5847 }
5848
5849 /*
5850 * SCTP_GET_ASSOC_STATS
5851 *
5852 * This option retrieves local per endpoint statistics. It is modeled
5853 * after OpenSolaris' implementation
5854 */
5855 static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
5856 char __user *optval,
5857 int __user *optlen)
5858 {
5859 struct sctp_assoc_stats sas;
5860 struct sctp_association *asoc = NULL;
5861
5862 /* User must provide at least the assoc id */
5863 if (len < sizeof(sctp_assoc_t))
5864 return -EINVAL;
5865
5866 /* Allow the struct to grow and fill in as much as possible */
5867 len = min_t(size_t, len, sizeof(sas));
5868
5869 if (copy_from_user(&sas, optval, len))
5870 return -EFAULT;
5871
5872 asoc = sctp_id2assoc(sk, sas.sas_assoc_id);
5873 if (!asoc)
5874 return -EINVAL;
5875
5876 sas.sas_rtxchunks = asoc->stats.rtxchunks;
5877 sas.sas_gapcnt = asoc->stats.gapcnt;
5878 sas.sas_outofseqtsns = asoc->stats.outofseqtsns;
5879 sas.sas_osacks = asoc->stats.osacks;
5880 sas.sas_isacks = asoc->stats.isacks;
5881 sas.sas_octrlchunks = asoc->stats.octrlchunks;
5882 sas.sas_ictrlchunks = asoc->stats.ictrlchunks;
5883 sas.sas_oodchunks = asoc->stats.oodchunks;
5884 sas.sas_iodchunks = asoc->stats.iodchunks;
5885 sas.sas_ouodchunks = asoc->stats.ouodchunks;
5886 sas.sas_iuodchunks = asoc->stats.iuodchunks;
5887 sas.sas_idupchunks = asoc->stats.idupchunks;
5888 sas.sas_opackets = asoc->stats.opackets;
5889 sas.sas_ipackets = asoc->stats.ipackets;
5890
5891 /* New high max rto observed, will return 0 if not a single
5892 * RTO update took place. obs_rto_ipaddr will be bogus
5893 * in such a case
5894 */
5895 sas.sas_maxrto = asoc->stats.max_obs_rto;
5896 memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr,
5897 sizeof(struct sockaddr_storage));
5898
5899 /* Mark beginning of a new observation period */
5900 asoc->stats.max_obs_rto = asoc->rto_min;
5901
5902 if (put_user(len, optlen))
5903 return -EFAULT;
5904
5905 pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id);
5906
5907 if (copy_to_user(optval, &sas, len))
5908 return -EFAULT;
5909
5910 return 0;
5911 }
5912
5913 static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len,
5914 char __user *optval,
5915 int __user *optlen)
5916 {
5917 int val = 0;
5918
5919 if (len < sizeof(int))
5920 return -EINVAL;
5921
5922 len = sizeof(int);
5923 if (sctp_sk(sk)->recvrcvinfo)
5924 val = 1;
5925 if (put_user(len, optlen))
5926 return -EFAULT;
5927 if (copy_to_user(optval, &val, len))
5928 return -EFAULT;
5929
5930 return 0;
5931 }
5932
5933 static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len,
5934 char __user *optval,
5935 int __user *optlen)
5936 {
5937 int val = 0;
5938
5939 if (len < sizeof(int))
5940 return -EINVAL;
5941
5942 len = sizeof(int);
5943 if (sctp_sk(sk)->recvnxtinfo)
5944 val = 1;
5945 if (put_user(len, optlen))
5946 return -EFAULT;
5947 if (copy_to_user(optval, &val, len))
5948 return -EFAULT;
5949
5950 return 0;
5951 }
5952
5953 static int sctp_getsockopt(struct sock *sk, int level, int optname,
5954 char __user *optval, int __user *optlen)
5955 {
5956 int retval = 0;
5957 int len;
5958
5959 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
5960
5961 /* I can hardly begin to describe how wrong this is. This is
5962 * so broken as to be worse than useless. The API draft
5963 * REALLY is NOT helpful here... I am not convinced that the
5964 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP
5965 * are at all well-founded.
5966 */
5967 if (level != SOL_SCTP) {
5968 struct sctp_af *af = sctp_sk(sk)->pf->af;
5969
5970 retval = af->getsockopt(sk, level, optname, optval, optlen);
5971 return retval;
5972 }
5973
5974 if (get_user(len, optlen))
5975 return -EFAULT;
5976
5977 lock_sock(sk);
5978
5979 switch (optname) {
5980 case SCTP_STATUS:
5981 retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen);
5982 break;
5983 case SCTP_DISABLE_FRAGMENTS:
5984 retval = sctp_getsockopt_disable_fragments(sk, len, optval,
5985 optlen);
5986 break;
5987 case SCTP_EVENTS:
5988 retval = sctp_getsockopt_events(sk, len, optval, optlen);
5989 break;
5990 case SCTP_AUTOCLOSE:
5991 retval = sctp_getsockopt_autoclose(sk, len, optval, optlen);
5992 break;
5993 case SCTP_SOCKOPT_PEELOFF:
5994 retval = sctp_getsockopt_peeloff(sk, len, optval, optlen);
5995 break;
5996 case SCTP_PEER_ADDR_PARAMS:
5997 retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
5998 optlen);
5999 break;
6000 case SCTP_DELAYED_SACK:
6001 retval = sctp_getsockopt_delayed_ack(sk, len, optval,
6002 optlen);
6003 break;
6004 case SCTP_INITMSG:
6005 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen);
6006 break;
6007 case SCTP_GET_PEER_ADDRS:
6008 retval = sctp_getsockopt_peer_addrs(sk, len, optval,
6009 optlen);
6010 break;
6011 case SCTP_GET_LOCAL_ADDRS:
6012 retval = sctp_getsockopt_local_addrs(sk, len, optval,
6013 optlen);
6014 break;
6015 case SCTP_SOCKOPT_CONNECTX3:
6016 retval = sctp_getsockopt_connectx3(sk, len, optval, optlen);
6017 break;
6018 case SCTP_DEFAULT_SEND_PARAM:
6019 retval = sctp_getsockopt_default_send_param(sk, len,
6020 optval, optlen);
6021 break;
6022 case SCTP_DEFAULT_SNDINFO:
6023 retval = sctp_getsockopt_default_sndinfo(sk, len,
6024 optval, optlen);
6025 break;
6026 case SCTP_PRIMARY_ADDR:
6027 retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen);
6028 break;
6029 case SCTP_NODELAY:
6030 retval = sctp_getsockopt_nodelay(sk, len, optval, optlen);
6031 break;
6032 case SCTP_RTOINFO:
6033 retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen);
6034 break;
6035 case SCTP_ASSOCINFO:
6036 retval = sctp_getsockopt_associnfo(sk, len, optval, optlen);
6037 break;
6038 case SCTP_I_WANT_MAPPED_V4_ADDR:
6039 retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen);
6040 break;
6041 case SCTP_MAXSEG:
6042 retval = sctp_getsockopt_maxseg(sk, len, optval, optlen);
6043 break;
6044 case SCTP_GET_PEER_ADDR_INFO:
6045 retval = sctp_getsockopt_peer_addr_info(sk, len, optval,
6046 optlen);
6047 break;
6048 case SCTP_ADAPTATION_LAYER:
6049 retval = sctp_getsockopt_adaptation_layer(sk, len, optval,
6050 optlen);
6051 break;
6052 case SCTP_CONTEXT:
6053 retval = sctp_getsockopt_context(sk, len, optval, optlen);
6054 break;
6055 case SCTP_FRAGMENT_INTERLEAVE:
6056 retval = sctp_getsockopt_fragment_interleave(sk, len, optval,
6057 optlen);
6058 break;
6059 case SCTP_PARTIAL_DELIVERY_POINT:
6060 retval = sctp_getsockopt_partial_delivery_point(sk, len, optval,
6061 optlen);
6062 break;
6063 case SCTP_MAX_BURST:
6064 retval = sctp_getsockopt_maxburst(sk, len, optval, optlen);
6065 break;
6066 case SCTP_AUTH_KEY:
6067 case SCTP_AUTH_CHUNK:
6068 case SCTP_AUTH_DELETE_KEY:
6069 retval = -EOPNOTSUPP;
6070 break;
6071 case SCTP_HMAC_IDENT:
6072 retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen);
6073 break;
6074 case SCTP_AUTH_ACTIVE_KEY:
6075 retval = sctp_getsockopt_active_key(sk, len, optval, optlen);
6076 break;
6077 case SCTP_PEER_AUTH_CHUNKS:
6078 retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval,
6079 optlen);
6080 break;
6081 case SCTP_LOCAL_AUTH_CHUNKS:
6082 retval = sctp_getsockopt_local_auth_chunks(sk, len, optval,
6083 optlen);
6084 break;
6085 case SCTP_GET_ASSOC_NUMBER:
6086 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen);
6087 break;
6088 case SCTP_GET_ASSOC_ID_LIST:
6089 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen);
6090 break;
6091 case SCTP_AUTO_ASCONF:
6092 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen);
6093 break;
6094 case SCTP_PEER_ADDR_THLDS:
6095 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen);
6096 break;
6097 case SCTP_GET_ASSOC_STATS:
6098 retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen);
6099 break;
6100 case SCTP_RECVRCVINFO:
6101 retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen);
6102 break;
6103 case SCTP_RECVNXTINFO:
6104 retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen);
6105 break;
6106 default:
6107 retval = -ENOPROTOOPT;
6108 break;
6109 }
6110
6111 release_sock(sk);
6112 return retval;
6113 }
6114
6115 static void sctp_hash(struct sock *sk)
6116 {
6117 /* STUB */
6118 }
6119
6120 static void sctp_unhash(struct sock *sk)
6121 {
6122 /* STUB */
6123 }
6124
6125 /* Check if port is acceptable. Possibly find first available port.
6126 *
6127 * The port hash table (contained in the 'global' SCTP protocol storage
6128 * returned by struct sctp_protocol *sctp_get_protocol()). The hash
6129 * table is an array of 4096 lists (sctp_bind_hashbucket). Each
6130 * list (the list number is the port number hashed out, so as you
6131 * would expect from a hash function, all the ports in a given list have
6132 * such a number that hashes out to the same list number; you were
6133 * expecting that, right?); so each list has a set of ports, with a
6134 * link to the socket (struct sock) that uses it, the port number and
6135 * a fastreuse flag (FIXME: NPI ipg).
6136 */
6137 static struct sctp_bind_bucket *sctp_bucket_create(
6138 struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
6139
6140 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
6141 {
6142 struct sctp_bind_hashbucket *head; /* hash list */
6143 struct sctp_bind_bucket *pp;
6144 unsigned short snum;
6145 int ret;
6146
6147 snum = ntohs(addr->v4.sin_port);
6148
6149 pr_debug("%s: begins, snum:%d\n", __func__, snum);
6150
6151 local_bh_disable();
6152
6153 if (snum == 0) {
6154 /* Search for an available port. */
6155 int low, high, remaining, index;
6156 unsigned int rover;
6157 struct net *net = sock_net(sk);
6158
6159 inet_get_local_port_range(net, &low, &high);
6160 remaining = (high - low) + 1;
6161 rover = prandom_u32() % remaining + low;
6162
6163 do {
6164 rover++;
6165 if ((rover < low) || (rover > high))
6166 rover = low;
6167 if (inet_is_local_reserved_port(net, rover))
6168 continue;
6169 index = sctp_phashfn(sock_net(sk), rover);
6170 head = &sctp_port_hashtable[index];
6171 spin_lock(&head->lock);
6172 sctp_for_each_hentry(pp, &head->chain)
6173 if ((pp->port == rover) &&
6174 net_eq(sock_net(sk), pp->net))
6175 goto next;
6176 break;
6177 next:
6178 spin_unlock(&head->lock);
6179 } while (--remaining > 0);
6180
6181 /* Exhausted local port range during search? */
6182 ret = 1;
6183 if (remaining <= 0)
6184 goto fail;
6185
6186 /* OK, here is the one we will use. HEAD (the port
6187 * hash table list entry) is non-NULL and we hold it's
6188 * mutex.
6189 */
6190 snum = rover;
6191 } else {
6192 /* We are given an specific port number; we verify
6193 * that it is not being used. If it is used, we will
6194 * exahust the search in the hash list corresponding
6195 * to the port number (snum) - we detect that with the
6196 * port iterator, pp being NULL.
6197 */
6198 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
6199 spin_lock(&head->lock);
6200 sctp_for_each_hentry(pp, &head->chain) {
6201 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
6202 goto pp_found;
6203 }
6204 }
6205 pp = NULL;
6206 goto pp_not_found;
6207 pp_found:
6208 if (!hlist_empty(&pp->owner)) {
6209 /* We had a port hash table hit - there is an
6210 * available port (pp != NULL) and it is being
6211 * used by other socket (pp->owner not empty); that other
6212 * socket is going to be sk2.
6213 */
6214 int reuse = sk->sk_reuse;
6215 struct sock *sk2;
6216
6217 pr_debug("%s: found a possible match\n", __func__);
6218
6219 if (pp->fastreuse && sk->sk_reuse &&
6220 sk->sk_state != SCTP_SS_LISTENING)
6221 goto success;
6222
6223 /* Run through the list of sockets bound to the port
6224 * (pp->port) [via the pointers bind_next and
6225 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one,
6226 * we get the endpoint they describe and run through
6227 * the endpoint's list of IP (v4 or v6) addresses,
6228 * comparing each of the addresses with the address of
6229 * the socket sk. If we find a match, then that means
6230 * that this port/socket (sk) combination are already
6231 * in an endpoint.
6232 */
6233 sk_for_each_bound(sk2, &pp->owner) {
6234 struct sctp_endpoint *ep2;
6235 ep2 = sctp_sk(sk2)->ep;
6236
6237 if (sk == sk2 ||
6238 (reuse && sk2->sk_reuse &&
6239 sk2->sk_state != SCTP_SS_LISTENING))
6240 continue;
6241
6242 if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr,
6243 sctp_sk(sk2), sctp_sk(sk))) {
6244 ret = (long)sk2;
6245 goto fail_unlock;
6246 }
6247 }
6248
6249 pr_debug("%s: found a match\n", __func__);
6250 }
6251 pp_not_found:
6252 /* If there was a hash table miss, create a new port. */
6253 ret = 1;
6254 if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum)))
6255 goto fail_unlock;
6256
6257 /* In either case (hit or miss), make sure fastreuse is 1 only
6258 * if sk->sk_reuse is too (that is, if the caller requested
6259 * SO_REUSEADDR on this socket -sk-).
6260 */
6261 if (hlist_empty(&pp->owner)) {
6262 if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING)
6263 pp->fastreuse = 1;
6264 else
6265 pp->fastreuse = 0;
6266 } else if (pp->fastreuse &&
6267 (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING))
6268 pp->fastreuse = 0;
6269
6270 /* We are set, so fill up all the data in the hash table
6271 * entry, tie the socket list information with the rest of the
6272 * sockets FIXME: Blurry, NPI (ipg).
6273 */
6274 success:
6275 if (!sctp_sk(sk)->bind_hash) {
6276 inet_sk(sk)->inet_num = snum;
6277 sk_add_bind_node(sk, &pp->owner);
6278 sctp_sk(sk)->bind_hash = pp;
6279 }
6280 ret = 0;
6281
6282 fail_unlock:
6283 spin_unlock(&head->lock);
6284
6285 fail:
6286 local_bh_enable();
6287 return ret;
6288 }
6289
6290 /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral
6291 * port is requested.
6292 */
6293 static int sctp_get_port(struct sock *sk, unsigned short snum)
6294 {
6295 union sctp_addr addr;
6296 struct sctp_af *af = sctp_sk(sk)->pf->af;
6297
6298 /* Set up a dummy address struct from the sk. */
6299 af->from_sk(&addr, sk);
6300 addr.v4.sin_port = htons(snum);
6301
6302 /* Note: sk->sk_num gets filled in if ephemeral port request. */
6303 return !!sctp_get_port_local(sk, &addr);
6304 }
6305
6306 /*
6307 * Move a socket to LISTENING state.
6308 */
6309 static int sctp_listen_start(struct sock *sk, int backlog)
6310 {
6311 struct sctp_sock *sp = sctp_sk(sk);
6312 struct sctp_endpoint *ep = sp->ep;
6313 struct crypto_hash *tfm = NULL;
6314 char alg[32];
6315
6316 /* Allocate HMAC for generating cookie. */
6317 if (!sp->hmac && sp->sctp_hmac_alg) {
6318 sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg);
6319 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
6320 if (IS_ERR(tfm)) {
6321 net_info_ratelimited("failed to load transform for %s: %ld\n",
6322 sp->sctp_hmac_alg, PTR_ERR(tfm));
6323 return -ENOSYS;
6324 }
6325 sctp_sk(sk)->hmac = tfm;
6326 }
6327
6328 /*
6329 * If a bind() or sctp_bindx() is not called prior to a listen()
6330 * call that allows new associations to be accepted, the system
6331 * picks an ephemeral port and will choose an address set equivalent
6332 * to binding with a wildcard address.
6333 *
6334 * This is not currently spelled out in the SCTP sockets
6335 * extensions draft, but follows the practice as seen in TCP
6336 * sockets.
6337 *
6338 */
6339 sk->sk_state = SCTP_SS_LISTENING;
6340 if (!ep->base.bind_addr.port) {
6341 if (sctp_autobind(sk))
6342 return -EAGAIN;
6343 } else {
6344 if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
6345 sk->sk_state = SCTP_SS_CLOSED;
6346 return -EADDRINUSE;
6347 }
6348 }
6349
6350 sk->sk_max_ack_backlog = backlog;
6351 sctp_hash_endpoint(ep);
6352 return 0;
6353 }
6354
6355 /*
6356 * 4.1.3 / 5.1.3 listen()
6357 *
6358 * By default, new associations are not accepted for UDP style sockets.
6359 * An application uses listen() to mark a socket as being able to
6360 * accept new associations.
6361 *
6362 * On TCP style sockets, applications use listen() to ready the SCTP
6363 * endpoint for accepting inbound associations.
6364 *
6365 * On both types of endpoints a backlog of '0' disables listening.
6366 *
6367 * Move a socket to LISTENING state.
6368 */
6369 int sctp_inet_listen(struct socket *sock, int backlog)
6370 {
6371 struct sock *sk = sock->sk;
6372 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
6373 int err = -EINVAL;
6374
6375 if (unlikely(backlog < 0))
6376 return err;
6377
6378 lock_sock(sk);
6379
6380 /* Peeled-off sockets are not allowed to listen(). */
6381 if (sctp_style(sk, UDP_HIGH_BANDWIDTH))
6382 goto out;
6383
6384 if (sock->state != SS_UNCONNECTED)
6385 goto out;
6386
6387 /* If backlog is zero, disable listening. */
6388 if (!backlog) {
6389 if (sctp_sstate(sk, CLOSED))
6390 goto out;
6391
6392 err = 0;
6393 sctp_unhash_endpoint(ep);
6394 sk->sk_state = SCTP_SS_CLOSED;
6395 if (sk->sk_reuse)
6396 sctp_sk(sk)->bind_hash->fastreuse = 1;
6397 goto out;
6398 }
6399
6400 /* If we are already listening, just update the backlog */
6401 if (sctp_sstate(sk, LISTENING))
6402 sk->sk_max_ack_backlog = backlog;
6403 else {
6404 err = sctp_listen_start(sk, backlog);
6405 if (err)
6406 goto out;
6407 }
6408
6409 err = 0;
6410 out:
6411 release_sock(sk);
6412 return err;
6413 }
6414
6415 /*
6416 * This function is done by modeling the current datagram_poll() and the
6417 * tcp_poll(). Note that, based on these implementations, we don't
6418 * lock the socket in this function, even though it seems that,
6419 * ideally, locking or some other mechanisms can be used to ensure
6420 * the integrity of the counters (sndbuf and wmem_alloc) used
6421 * in this place. We assume that we don't need locks either until proven
6422 * otherwise.
6423 *
6424 * Another thing to note is that we include the Async I/O support
6425 * here, again, by modeling the current TCP/UDP code. We don't have
6426 * a good way to test with it yet.
6427 */
6428 unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
6429 {
6430 struct sock *sk = sock->sk;
6431 struct sctp_sock *sp = sctp_sk(sk);
6432 unsigned int mask;
6433
6434 poll_wait(file, sk_sleep(sk), wait);
6435
6436 /* A TCP-style listening socket becomes readable when the accept queue
6437 * is not empty.
6438 */
6439 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
6440 return (!list_empty(&sp->ep->asocs)) ?
6441 (POLLIN | POLLRDNORM) : 0;
6442
6443 mask = 0;
6444
6445 /* Is there any exceptional events? */
6446 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
6447 mask |= POLLERR |
6448 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
6449 if (sk->sk_shutdown & RCV_SHUTDOWN)
6450 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
6451 if (sk->sk_shutdown == SHUTDOWN_MASK)
6452 mask |= POLLHUP;
6453
6454 /* Is it readable? Reconsider this code with TCP-style support. */
6455 if (!skb_queue_empty(&sk->sk_receive_queue))
6456 mask |= POLLIN | POLLRDNORM;
6457
6458 /* The association is either gone or not ready. */
6459 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED))
6460 return mask;
6461
6462 /* Is it writable? */
6463 if (sctp_writeable(sk)) {
6464 mask |= POLLOUT | POLLWRNORM;
6465 } else {
6466 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
6467 /*
6468 * Since the socket is not locked, the buffer
6469 * might be made available after the writeable check and
6470 * before the bit is set. This could cause a lost I/O
6471 * signal. tcp_poll() has a race breaker for this race
6472 * condition. Based on their implementation, we put
6473 * in the following code to cover it as well.
6474 */
6475 if (sctp_writeable(sk))
6476 mask |= POLLOUT | POLLWRNORM;
6477 }
6478 return mask;
6479 }
6480
6481 /********************************************************************
6482 * 2nd Level Abstractions
6483 ********************************************************************/
6484
6485 static struct sctp_bind_bucket *sctp_bucket_create(
6486 struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum)
6487 {
6488 struct sctp_bind_bucket *pp;
6489
6490 pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC);
6491 if (pp) {
6492 SCTP_DBG_OBJCNT_INC(bind_bucket);
6493 pp->port = snum;
6494 pp->fastreuse = 0;
6495 INIT_HLIST_HEAD(&pp->owner);
6496 pp->net = net;
6497 hlist_add_head(&pp->node, &head->chain);
6498 }
6499 return pp;
6500 }
6501
6502 /* Caller must hold hashbucket lock for this tb with local BH disabled */
6503 static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
6504 {
6505 if (pp && hlist_empty(&pp->owner)) {
6506 __hlist_del(&pp->node);
6507 kmem_cache_free(sctp_bucket_cachep, pp);
6508 SCTP_DBG_OBJCNT_DEC(bind_bucket);
6509 }
6510 }
6511
6512 /* Release this socket's reference to a local port. */
6513 static inline void __sctp_put_port(struct sock *sk)
6514 {
6515 struct sctp_bind_hashbucket *head =
6516 &sctp_port_hashtable[sctp_phashfn(sock_net(sk),
6517 inet_sk(sk)->inet_num)];
6518 struct sctp_bind_bucket *pp;
6519
6520 spin_lock(&head->lock);
6521 pp = sctp_sk(sk)->bind_hash;
6522 __sk_del_bind_node(sk);
6523 sctp_sk(sk)->bind_hash = NULL;
6524 inet_sk(sk)->inet_num = 0;
6525 sctp_bucket_destroy(pp);
6526 spin_unlock(&head->lock);
6527 }
6528
6529 void sctp_put_port(struct sock *sk)
6530 {
6531 local_bh_disable();
6532 __sctp_put_port(sk);
6533 local_bh_enable();
6534 }
6535
6536 /*
6537 * The system picks an ephemeral port and choose an address set equivalent
6538 * to binding with a wildcard address.
6539 * One of those addresses will be the primary address for the association.
6540 * This automatically enables the multihoming capability of SCTP.
6541 */
6542 static int sctp_autobind(struct sock *sk)
6543 {
6544 union sctp_addr autoaddr;
6545 struct sctp_af *af;
6546 __be16 port;
6547
6548 /* Initialize a local sockaddr structure to INADDR_ANY. */
6549 af = sctp_sk(sk)->pf->af;
6550
6551 port = htons(inet_sk(sk)->inet_num);
6552 af->inaddr_any(&autoaddr, port);
6553
6554 return sctp_do_bind(sk, &autoaddr, af->sockaddr_len);
6555 }
6556
6557 /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation.
6558 *
6559 * From RFC 2292
6560 * 4.2 The cmsghdr Structure *
6561 *
6562 * When ancillary data is sent or received, any number of ancillary data
6563 * objects can be specified by the msg_control and msg_controllen members of
6564 * the msghdr structure, because each object is preceded by
6565 * a cmsghdr structure defining the object's length (the cmsg_len member).
6566 * Historically Berkeley-derived implementations have passed only one object
6567 * at a time, but this API allows multiple objects to be
6568 * passed in a single call to sendmsg() or recvmsg(). The following example
6569 * shows two ancillary data objects in a control buffer.
6570 *
6571 * |<--------------------------- msg_controllen -------------------------->|
6572 * | |
6573 *
6574 * |<----- ancillary data object ----->|<----- ancillary data object ----->|
6575 *
6576 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->|
6577 * | | |
6578 *
6579 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| |
6580 *
6581 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| |
6582 * | | | | |
6583 *
6584 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
6585 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX|
6586 *
6587 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX|
6588 *
6589 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
6590 * ^
6591 * |
6592 *
6593 * msg_control
6594 * points here
6595 */
6596 static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
6597 {
6598 struct cmsghdr *cmsg;
6599 struct msghdr *my_msg = (struct msghdr *)msg;
6600
6601 for_each_cmsghdr(cmsg, my_msg) {
6602 if (!CMSG_OK(my_msg, cmsg))
6603 return -EINVAL;
6604
6605 /* Should we parse this header or ignore? */
6606 if (cmsg->cmsg_level != IPPROTO_SCTP)
6607 continue;
6608
6609 /* Strictly check lengths following example in SCM code. */
6610 switch (cmsg->cmsg_type) {
6611 case SCTP_INIT:
6612 /* SCTP Socket API Extension
6613 * 5.3.1 SCTP Initiation Structure (SCTP_INIT)
6614 *
6615 * This cmsghdr structure provides information for
6616 * initializing new SCTP associations with sendmsg().
6617 * The SCTP_INITMSG socket option uses this same data
6618 * structure. This structure is not used for
6619 * recvmsg().
6620 *
6621 * cmsg_level cmsg_type cmsg_data[]
6622 * ------------ ------------ ----------------------
6623 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg
6624 */
6625 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg)))
6626 return -EINVAL;
6627
6628 cmsgs->init = CMSG_DATA(cmsg);
6629 break;
6630
6631 case SCTP_SNDRCV:
6632 /* SCTP Socket API Extension
6633 * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV)
6634 *
6635 * This cmsghdr structure specifies SCTP options for
6636 * sendmsg() and describes SCTP header information
6637 * about a received message through recvmsg().
6638 *
6639 * cmsg_level cmsg_type cmsg_data[]
6640 * ------------ ------------ ----------------------
6641 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo
6642 */
6643 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo)))
6644 return -EINVAL;
6645
6646 cmsgs->srinfo = CMSG_DATA(cmsg);
6647
6648 if (cmsgs->srinfo->sinfo_flags &
6649 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
6650 SCTP_ABORT | SCTP_EOF))
6651 return -EINVAL;
6652 break;
6653
6654 case SCTP_SNDINFO:
6655 /* SCTP Socket API Extension
6656 * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO)
6657 *
6658 * This cmsghdr structure specifies SCTP options for
6659 * sendmsg(). This structure and SCTP_RCVINFO replaces
6660 * SCTP_SNDRCV which has been deprecated.
6661 *
6662 * cmsg_level cmsg_type cmsg_data[]
6663 * ------------ ------------ ---------------------
6664 * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo
6665 */
6666 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo)))
6667 return -EINVAL;
6668
6669 cmsgs->sinfo = CMSG_DATA(cmsg);
6670
6671 if (cmsgs->sinfo->snd_flags &
6672 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
6673 SCTP_ABORT | SCTP_EOF))
6674 return -EINVAL;
6675 break;
6676 default:
6677 return -EINVAL;
6678 }
6679 }
6680
6681 return 0;
6682 }
6683
6684 /*
6685 * Wait for a packet..
6686 * Note: This function is the same function as in core/datagram.c
6687 * with a few modifications to make lksctp work.
6688 */
6689 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p)
6690 {
6691 int error;
6692 DEFINE_WAIT(wait);
6693
6694 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
6695
6696 /* Socket errors? */
6697 error = sock_error(sk);
6698 if (error)
6699 goto out;
6700
6701 if (!skb_queue_empty(&sk->sk_receive_queue))
6702 goto ready;
6703
6704 /* Socket shut down? */
6705 if (sk->sk_shutdown & RCV_SHUTDOWN)
6706 goto out;
6707
6708 /* Sequenced packets can come disconnected. If so we report the
6709 * problem.
6710 */
6711 error = -ENOTCONN;
6712
6713 /* Is there a good reason to think that we may receive some data? */
6714 if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING))
6715 goto out;
6716
6717 /* Handle signals. */
6718 if (signal_pending(current))
6719 goto interrupted;
6720
6721 /* Let another process have a go. Since we are going to sleep
6722 * anyway. Note: This may cause odd behaviors if the message
6723 * does not fit in the user's buffer, but this seems to be the
6724 * only way to honor MSG_DONTWAIT realistically.
6725 */
6726 release_sock(sk);
6727 *timeo_p = schedule_timeout(*timeo_p);
6728 lock_sock(sk);
6729
6730 ready:
6731 finish_wait(sk_sleep(sk), &wait);
6732 return 0;
6733
6734 interrupted:
6735 error = sock_intr_errno(*timeo_p);
6736
6737 out:
6738 finish_wait(sk_sleep(sk), &wait);
6739 *err = error;
6740 return error;
6741 }
6742
6743 /* Receive a datagram.
6744 * Note: This is pretty much the same routine as in core/datagram.c
6745 * with a few changes to make lksctp work.
6746 */
6747 struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
6748 int noblock, int *err)
6749 {
6750 int error;
6751 struct sk_buff *skb;
6752 long timeo;
6753
6754 timeo = sock_rcvtimeo(sk, noblock);
6755
6756 pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo,
6757 MAX_SCHEDULE_TIMEOUT);
6758
6759 do {
6760 /* Again only user level code calls this function,
6761 * so nothing interrupt level
6762 * will suddenly eat the receive_queue.
6763 *
6764 * Look at current nfs client by the way...
6765 * However, this function was correct in any case. 8)
6766 */
6767 if (flags & MSG_PEEK) {
6768 spin_lock_bh(&sk->sk_receive_queue.lock);
6769 skb = skb_peek(&sk->sk_receive_queue);
6770 if (skb)
6771 atomic_inc(&skb->users);
6772 spin_unlock_bh(&sk->sk_receive_queue.lock);
6773 } else {
6774 skb = skb_dequeue(&sk->sk_receive_queue);
6775 }
6776
6777 if (skb)
6778 return skb;
6779
6780 /* Caller is allowed not to check sk->sk_err before calling. */
6781 error = sock_error(sk);
6782 if (error)
6783 goto no_packet;
6784
6785 if (sk->sk_shutdown & RCV_SHUTDOWN)
6786 break;
6787
6788 if (sk_can_busy_loop(sk) &&
6789 sk_busy_loop(sk, noblock))
6790 continue;
6791
6792 /* User doesn't want to wait. */
6793 error = -EAGAIN;
6794 if (!timeo)
6795 goto no_packet;
6796 } while (sctp_wait_for_packet(sk, err, &timeo) == 0);
6797
6798 return NULL;
6799
6800 no_packet:
6801 *err = error;
6802 return NULL;
6803 }
6804
6805 /* If sndbuf has changed, wake up per association sndbuf waiters. */
6806 static void __sctp_write_space(struct sctp_association *asoc)
6807 {
6808 struct sock *sk = asoc->base.sk;
6809 struct socket *sock = sk->sk_socket;
6810
6811 if ((sctp_wspace(asoc) > 0) && sock) {
6812 if (waitqueue_active(&asoc->wait))
6813 wake_up_interruptible(&asoc->wait);
6814
6815 if (sctp_writeable(sk)) {
6816 wait_queue_head_t *wq = sk_sleep(sk);
6817
6818 if (wq && waitqueue_active(wq))
6819 wake_up_interruptible(wq);
6820
6821 /* Note that we try to include the Async I/O support
6822 * here by modeling from the current TCP/UDP code.
6823 * We have not tested with it yet.
6824 */
6825 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
6826 sock_wake_async(sock,
6827 SOCK_WAKE_SPACE, POLL_OUT);
6828 }
6829 }
6830 }
6831
6832 static void sctp_wake_up_waiters(struct sock *sk,
6833 struct sctp_association *asoc)
6834 {
6835 struct sctp_association *tmp = asoc;
6836
6837 /* We do accounting for the sndbuf space per association,
6838 * so we only need to wake our own association.
6839 */
6840 if (asoc->ep->sndbuf_policy)
6841 return __sctp_write_space(asoc);
6842
6843 /* If association goes down and is just flushing its
6844 * outq, then just normally notify others.
6845 */
6846 if (asoc->base.dead)
6847 return sctp_write_space(sk);
6848
6849 /* Accounting for the sndbuf space is per socket, so we
6850 * need to wake up others, try to be fair and in case of
6851 * other associations, let them have a go first instead
6852 * of just doing a sctp_write_space() call.
6853 *
6854 * Note that we reach sctp_wake_up_waiters() only when
6855 * associations free up queued chunks, thus we are under
6856 * lock and the list of associations on a socket is
6857 * guaranteed not to change.
6858 */
6859 for (tmp = list_next_entry(tmp, asocs); 1;
6860 tmp = list_next_entry(tmp, asocs)) {
6861 /* Manually skip the head element. */
6862 if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs))
6863 continue;
6864 /* Wake up association. */
6865 __sctp_write_space(tmp);
6866 /* We've reached the end. */
6867 if (tmp == asoc)
6868 break;
6869 }
6870 }
6871
6872 /* Do accounting for the sndbuf space.
6873 * Decrement the used sndbuf space of the corresponding association by the
6874 * data size which was just transmitted(freed).
6875 */
6876 static void sctp_wfree(struct sk_buff *skb)
6877 {
6878 struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg;
6879 struct sctp_association *asoc = chunk->asoc;
6880 struct sock *sk = asoc->base.sk;
6881
6882 asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) +
6883 sizeof(struct sk_buff) +
6884 sizeof(struct sctp_chunk);
6885
6886 atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
6887
6888 /*
6889 * This undoes what is done via sctp_set_owner_w and sk_mem_charge
6890 */
6891 sk->sk_wmem_queued -= skb->truesize;
6892 sk_mem_uncharge(sk, skb->truesize);
6893
6894 sock_wfree(skb);
6895 sctp_wake_up_waiters(sk, asoc);
6896
6897 sctp_association_put(asoc);
6898 }
6899
6900 /* Do accounting for the receive space on the socket.
6901 * Accounting for the association is done in ulpevent.c
6902 * We set this as a destructor for the cloned data skbs so that
6903 * accounting is done at the correct time.
6904 */
6905 void sctp_sock_rfree(struct sk_buff *skb)
6906 {
6907 struct sock *sk = skb->sk;
6908 struct sctp_ulpevent *event = sctp_skb2event(skb);
6909
6910 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc);
6911
6912 /*
6913 * Mimic the behavior of sock_rfree
6914 */
6915 sk_mem_uncharge(sk, event->rmem_len);
6916 }
6917
6918
6919 /* Helper function to wait for space in the sndbuf. */
6920 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
6921 size_t msg_len)
6922 {
6923 struct sock *sk = asoc->base.sk;
6924 int err = 0;
6925 long current_timeo = *timeo_p;
6926 DEFINE_WAIT(wait);
6927
6928 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
6929 *timeo_p, msg_len);
6930
6931 /* Increment the association's refcnt. */
6932 sctp_association_hold(asoc);
6933
6934 /* Wait on the association specific sndbuf space. */
6935 for (;;) {
6936 prepare_to_wait_exclusive(&asoc->wait, &wait,
6937 TASK_INTERRUPTIBLE);
6938 if (!*timeo_p)
6939 goto do_nonblock;
6940 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
6941 asoc->base.dead)
6942 goto do_error;
6943 if (signal_pending(current))
6944 goto do_interrupted;
6945 if (msg_len <= sctp_wspace(asoc))
6946 break;
6947
6948 /* Let another process have a go. Since we are going
6949 * to sleep anyway.
6950 */
6951 release_sock(sk);
6952 current_timeo = schedule_timeout(current_timeo);
6953 BUG_ON(sk != asoc->base.sk);
6954 lock_sock(sk);
6955
6956 *timeo_p = current_timeo;
6957 }
6958
6959 out:
6960 finish_wait(&asoc->wait, &wait);
6961
6962 /* Release the association's refcnt. */
6963 sctp_association_put(asoc);
6964
6965 return err;
6966
6967 do_error:
6968 err = -EPIPE;
6969 goto out;
6970
6971 do_interrupted:
6972 err = sock_intr_errno(*timeo_p);
6973 goto out;
6974
6975 do_nonblock:
6976 err = -EAGAIN;
6977 goto out;
6978 }
6979
6980 void sctp_data_ready(struct sock *sk)
6981 {
6982 struct socket_wq *wq;
6983
6984 rcu_read_lock();
6985 wq = rcu_dereference(sk->sk_wq);
6986 if (wq_has_sleeper(wq))
6987 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
6988 POLLRDNORM | POLLRDBAND);
6989 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
6990 rcu_read_unlock();
6991 }
6992
6993 /* If socket sndbuf has changed, wake up all per association waiters. */
6994 void sctp_write_space(struct sock *sk)
6995 {
6996 struct sctp_association *asoc;
6997
6998 /* Wake up the tasks in each wait queue. */
6999 list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) {
7000 __sctp_write_space(asoc);
7001 }
7002 }
7003
7004 /* Is there any sndbuf space available on the socket?
7005 *
7006 * Note that sk_wmem_alloc is the sum of the send buffers on all of the
7007 * associations on the same socket. For a UDP-style socket with
7008 * multiple associations, it is possible for it to be "unwriteable"
7009 * prematurely. I assume that this is acceptable because
7010 * a premature "unwriteable" is better than an accidental "writeable" which
7011 * would cause an unwanted block under certain circumstances. For the 1-1
7012 * UDP-style sockets or TCP-style sockets, this code should work.
7013 * - Daisy
7014 */
7015 static int sctp_writeable(struct sock *sk)
7016 {
7017 int amt = 0;
7018
7019 amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
7020 if (amt < 0)
7021 amt = 0;
7022 return amt;
7023 }
7024
7025 /* Wait for an association to go into ESTABLISHED state. If timeout is 0,
7026 * returns immediately with EINPROGRESS.
7027 */
7028 static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p)
7029 {
7030 struct sock *sk = asoc->base.sk;
7031 int err = 0;
7032 long current_timeo = *timeo_p;
7033 DEFINE_WAIT(wait);
7034
7035 pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p);
7036
7037 /* Increment the association's refcnt. */
7038 sctp_association_hold(asoc);
7039
7040 for (;;) {
7041 prepare_to_wait_exclusive(&asoc->wait, &wait,
7042 TASK_INTERRUPTIBLE);
7043 if (!*timeo_p)
7044 goto do_nonblock;
7045 if (sk->sk_shutdown & RCV_SHUTDOWN)
7046 break;
7047 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
7048 asoc->base.dead)
7049 goto do_error;
7050 if (signal_pending(current))
7051 goto do_interrupted;
7052
7053 if (sctp_state(asoc, ESTABLISHED))
7054 break;
7055
7056 /* Let another process have a go. Since we are going
7057 * to sleep anyway.
7058 */
7059 release_sock(sk);
7060 current_timeo = schedule_timeout(current_timeo);
7061 lock_sock(sk);
7062
7063 *timeo_p = current_timeo;
7064 }
7065
7066 out:
7067 finish_wait(&asoc->wait, &wait);
7068
7069 /* Release the association's refcnt. */
7070 sctp_association_put(asoc);
7071
7072 return err;
7073
7074 do_error:
7075 if (asoc->init_err_counter + 1 > asoc->max_init_attempts)
7076 err = -ETIMEDOUT;
7077 else
7078 err = -ECONNREFUSED;
7079 goto out;
7080
7081 do_interrupted:
7082 err = sock_intr_errno(*timeo_p);
7083 goto out;
7084
7085 do_nonblock:
7086 err = -EINPROGRESS;
7087 goto out;
7088 }
7089
7090 static int sctp_wait_for_accept(struct sock *sk, long timeo)
7091 {
7092 struct sctp_endpoint *ep;
7093 int err = 0;
7094 DEFINE_WAIT(wait);
7095
7096 ep = sctp_sk(sk)->ep;
7097
7098
7099 for (;;) {
7100 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
7101 TASK_INTERRUPTIBLE);
7102
7103 if (list_empty(&ep->asocs)) {
7104 release_sock(sk);
7105 timeo = schedule_timeout(timeo);
7106 lock_sock(sk);
7107 }
7108
7109 err = -EINVAL;
7110 if (!sctp_sstate(sk, LISTENING))
7111 break;
7112
7113 err = 0;
7114 if (!list_empty(&ep->asocs))
7115 break;
7116
7117 err = sock_intr_errno(timeo);
7118 if (signal_pending(current))
7119 break;
7120
7121 err = -EAGAIN;
7122 if (!timeo)
7123 break;
7124 }
7125
7126 finish_wait(sk_sleep(sk), &wait);
7127
7128 return err;
7129 }
7130
7131 static void sctp_wait_for_close(struct sock *sk, long timeout)
7132 {
7133 DEFINE_WAIT(wait);
7134
7135 do {
7136 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
7137 if (list_empty(&sctp_sk(sk)->ep->asocs))
7138 break;
7139 release_sock(sk);
7140 timeout = schedule_timeout(timeout);
7141 lock_sock(sk);
7142 } while (!signal_pending(current) && timeout);
7143
7144 finish_wait(sk_sleep(sk), &wait);
7145 }
7146
7147 static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
7148 {
7149 struct sk_buff *frag;
7150
7151 if (!skb->data_len)
7152 goto done;
7153
7154 /* Don't forget the fragments. */
7155 skb_walk_frags(skb, frag)
7156 sctp_skb_set_owner_r_frag(frag, sk);
7157
7158 done:
7159 sctp_skb_set_owner_r(skb, sk);
7160 }
7161
7162 void sctp_copy_sock(struct sock *newsk, struct sock *sk,
7163 struct sctp_association *asoc)
7164 {
7165 struct inet_sock *inet = inet_sk(sk);
7166 struct inet_sock *newinet;
7167
7168 newsk->sk_type = sk->sk_type;
7169 newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
7170 newsk->sk_flags = sk->sk_flags;
7171 newsk->sk_no_check_tx = sk->sk_no_check_tx;
7172 newsk->sk_no_check_rx = sk->sk_no_check_rx;
7173 newsk->sk_reuse = sk->sk_reuse;
7174
7175 newsk->sk_shutdown = sk->sk_shutdown;
7176 newsk->sk_destruct = sctp_destruct_sock;
7177 newsk->sk_family = sk->sk_family;
7178 newsk->sk_protocol = IPPROTO_SCTP;
7179 newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
7180 newsk->sk_sndbuf = sk->sk_sndbuf;
7181 newsk->sk_rcvbuf = sk->sk_rcvbuf;
7182 newsk->sk_lingertime = sk->sk_lingertime;
7183 newsk->sk_rcvtimeo = sk->sk_rcvtimeo;
7184 newsk->sk_sndtimeo = sk->sk_sndtimeo;
7185
7186 newinet = inet_sk(newsk);
7187
7188 /* Initialize sk's sport, dport, rcv_saddr and daddr for
7189 * getsockname() and getpeername()
7190 */
7191 newinet->inet_sport = inet->inet_sport;
7192 newinet->inet_saddr = inet->inet_saddr;
7193 newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
7194 newinet->inet_dport = htons(asoc->peer.port);
7195 newinet->pmtudisc = inet->pmtudisc;
7196 newinet->inet_id = asoc->next_tsn ^ jiffies;
7197
7198 newinet->uc_ttl = inet->uc_ttl;
7199 newinet->mc_loop = 1;
7200 newinet->mc_ttl = 1;
7201 newinet->mc_index = 0;
7202 newinet->mc_list = NULL;
7203 }
7204
7205 /* Populate the fields of the newsk from the oldsk and migrate the assoc
7206 * and its messages to the newsk.
7207 */
7208 static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
7209 struct sctp_association *assoc,
7210 sctp_socket_type_t type)
7211 {
7212 struct sctp_sock *oldsp = sctp_sk(oldsk);
7213 struct sctp_sock *newsp = sctp_sk(newsk);
7214 struct sctp_bind_bucket *pp; /* hash list port iterator */
7215 struct sctp_endpoint *newep = newsp->ep;
7216 struct sk_buff *skb, *tmp;
7217 struct sctp_ulpevent *event;
7218 struct sctp_bind_hashbucket *head;
7219 struct list_head tmplist;
7220
7221 /* Migrate socket buffer sizes and all the socket level options to the
7222 * new socket.
7223 */
7224 newsk->sk_sndbuf = oldsk->sk_sndbuf;
7225 newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
7226 /* Brute force copy old sctp opt. */
7227 if (oldsp->do_auto_asconf) {
7228 memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
7229 inet_sk_copy_descendant(newsk, oldsk);
7230 memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
7231 } else
7232 inet_sk_copy_descendant(newsk, oldsk);
7233
7234 /* Restore the ep value that was overwritten with the above structure
7235 * copy.
7236 */
7237 newsp->ep = newep;
7238 newsp->hmac = NULL;
7239
7240 /* Hook this new socket in to the bind_hash list. */
7241 head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk),
7242 inet_sk(oldsk)->inet_num)];
7243 local_bh_disable();
7244 spin_lock(&head->lock);
7245 pp = sctp_sk(oldsk)->bind_hash;
7246 sk_add_bind_node(newsk, &pp->owner);
7247 sctp_sk(newsk)->bind_hash = pp;
7248 inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num;
7249 spin_unlock(&head->lock);
7250 local_bh_enable();
7251
7252 /* Copy the bind_addr list from the original endpoint to the new
7253 * endpoint so that we can handle restarts properly
7254 */
7255 sctp_bind_addr_dup(&newsp->ep->base.bind_addr,
7256 &oldsp->ep->base.bind_addr, GFP_KERNEL);
7257
7258 /* Move any messages in the old socket's receive queue that are for the
7259 * peeled off association to the new socket's receive queue.
7260 */
7261 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
7262 event = sctp_skb2event(skb);
7263 if (event->asoc == assoc) {
7264 __skb_unlink(skb, &oldsk->sk_receive_queue);
7265 __skb_queue_tail(&newsk->sk_receive_queue, skb);
7266 sctp_skb_set_owner_r_frag(skb, newsk);
7267 }
7268 }
7269
7270 /* Clean up any messages pending delivery due to partial
7271 * delivery. Three cases:
7272 * 1) No partial deliver; no work.
7273 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
7274 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
7275 */
7276 skb_queue_head_init(&newsp->pd_lobby);
7277 atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode);
7278
7279 if (atomic_read(&sctp_sk(oldsk)->pd_mode)) {
7280 struct sk_buff_head *queue;
7281
7282 /* Decide which queue to move pd_lobby skbs to. */
7283 if (assoc->ulpq.pd_mode) {
7284 queue = &newsp->pd_lobby;
7285 } else
7286 queue = &newsk->sk_receive_queue;
7287
7288 /* Walk through the pd_lobby, looking for skbs that
7289 * need moved to the new socket.
7290 */
7291 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
7292 event = sctp_skb2event(skb);
7293 if (event->asoc == assoc) {
7294 __skb_unlink(skb, &oldsp->pd_lobby);
7295 __skb_queue_tail(queue, skb);
7296 sctp_skb_set_owner_r_frag(skb, newsk);
7297 }
7298 }
7299
7300 /* Clear up any skbs waiting for the partial
7301 * delivery to finish.
7302 */
7303 if (assoc->ulpq.pd_mode)
7304 sctp_clear_pd(oldsk, NULL);
7305
7306 }
7307
7308 sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp)
7309 sctp_skb_set_owner_r_frag(skb, newsk);
7310
7311 sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp)
7312 sctp_skb_set_owner_r_frag(skb, newsk);
7313
7314 /* Set the type of socket to indicate that it is peeled off from the
7315 * original UDP-style socket or created with the accept() call on a
7316 * TCP-style socket..
7317 */
7318 newsp->type = type;
7319
7320 /* Mark the new socket "in-use" by the user so that any packets
7321 * that may arrive on the association after we've moved it are
7322 * queued to the backlog. This prevents a potential race between
7323 * backlog processing on the old socket and new-packet processing
7324 * on the new socket.
7325 *
7326 * The caller has just allocated newsk so we can guarantee that other
7327 * paths won't try to lock it and then oldsk.
7328 */
7329 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
7330 sctp_assoc_migrate(assoc, newsk);
7331
7332 /* If the association on the newsk is already closed before accept()
7333 * is called, set RCV_SHUTDOWN flag.
7334 */
7335 if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP))
7336 newsk->sk_shutdown |= RCV_SHUTDOWN;
7337
7338 newsk->sk_state = SCTP_SS_ESTABLISHED;
7339 release_sock(newsk);
7340 }
7341
7342
7343 /* This proto struct describes the ULP interface for SCTP. */
7344 struct proto sctp_prot = {
7345 .name = "SCTP",
7346 .owner = THIS_MODULE,
7347 .close = sctp_close,
7348 .connect = sctp_connect,
7349 .disconnect = sctp_disconnect,
7350 .accept = sctp_accept,
7351 .ioctl = sctp_ioctl,
7352 .init = sctp_init_sock,
7353 .destroy = sctp_destroy_sock,
7354 .shutdown = sctp_shutdown,
7355 .setsockopt = sctp_setsockopt,
7356 .getsockopt = sctp_getsockopt,
7357 .sendmsg = sctp_sendmsg,
7358 .recvmsg = sctp_recvmsg,
7359 .bind = sctp_bind,
7360 .backlog_rcv = sctp_backlog_rcv,
7361 .hash = sctp_hash,
7362 .unhash = sctp_unhash,
7363 .get_port = sctp_get_port,
7364 .obj_size = sizeof(struct sctp_sock),
7365 .sysctl_mem = sysctl_sctp_mem,
7366 .sysctl_rmem = sysctl_sctp_rmem,
7367 .sysctl_wmem = sysctl_sctp_wmem,
7368 .memory_pressure = &sctp_memory_pressure,
7369 .enter_memory_pressure = sctp_enter_memory_pressure,
7370 .memory_allocated = &sctp_memory_allocated,
7371 .sockets_allocated = &sctp_sockets_allocated,
7372 };
7373
7374 #if IS_ENABLED(CONFIG_IPV6)
7375
7376 struct proto sctpv6_prot = {
7377 .name = "SCTPv6",
7378 .owner = THIS_MODULE,
7379 .close = sctp_close,
7380 .connect = sctp_connect,
7381 .disconnect = sctp_disconnect,
7382 .accept = sctp_accept,
7383 .ioctl = sctp_ioctl,
7384 .init = sctp_init_sock,
7385 .destroy = sctp_destroy_sock,
7386 .shutdown = sctp_shutdown,
7387 .setsockopt = sctp_setsockopt,
7388 .getsockopt = sctp_getsockopt,
7389 .sendmsg = sctp_sendmsg,
7390 .recvmsg = sctp_recvmsg,
7391 .bind = sctp_bind,
7392 .backlog_rcv = sctp_backlog_rcv,
7393 .hash = sctp_hash,
7394 .unhash = sctp_unhash,
7395 .get_port = sctp_get_port,
7396 .obj_size = sizeof(struct sctp6_sock),
7397 .sysctl_mem = sysctl_sctp_mem,
7398 .sysctl_rmem = sysctl_sctp_rmem,
7399 .sysctl_wmem = sysctl_sctp_wmem,
7400 .memory_pressure = &sctp_memory_pressure,
7401 .enter_memory_pressure = sctp_enter_memory_pressure,
7402 .memory_allocated = &sctp_memory_allocated,
7403 .sockets_allocated = &sctp_sockets_allocated,
7404 };
7405 #endif /* IS_ENABLED(CONFIG_IPV6) */
This page took 0.219391 seconds and 5 git commands to generate.