tcp: tso: allow CA_CWR state in tcp_tso_should_defer()
[deliverable/linux.git] / net / ipv4 / ip_sockglue.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The IP to API glue.
7 *
8 * Authors: see ip.c
9 *
10 * Fixes:
11 * Many : Split from ip.c , see ip.c for history.
12 * Martin Mares : TOS setting fixed.
13 * Alan Cox : Fixed a couple of oopses in Martin's
14 * TOS tweaks.
15 * Mike McLagan : Routing by source
16 */
17
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/mm.h>
21 #include <linux/skbuff.h>
22 #include <linux/ip.h>
23 #include <linux/icmp.h>
24 #include <linux/inetdevice.h>
25 #include <linux/netdevice.h>
26 #include <linux/slab.h>
27 #include <net/sock.h>
28 #include <net/ip.h>
29 #include <net/icmp.h>
30 #include <net/tcp_states.h>
31 #include <linux/udp.h>
32 #include <linux/igmp.h>
33 #include <linux/netfilter.h>
34 #include <linux/route.h>
35 #include <linux/mroute.h>
36 #include <net/inet_ecn.h>
37 #include <net/route.h>
38 #include <net/xfrm.h>
39 #include <net/compat.h>
40 #include <net/checksum.h>
41 #if IS_ENABLED(CONFIG_IPV6)
42 #include <net/transp_v6.h>
43 #endif
44 #include <net/ip_fib.h>
45
46 #include <linux/errqueue.h>
47 #include <asm/uaccess.h>
48
49 /*
50 * SOL_IP control messages.
51 */
52
53 static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
54 {
55 struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
56
57 info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
58
59 put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
60 }
61
62 static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb)
63 {
64 int ttl = ip_hdr(skb)->ttl;
65 put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl);
66 }
67
68 static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb)
69 {
70 put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos);
71 }
72
73 static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
74 {
75 if (IPCB(skb)->opt.optlen == 0)
76 return;
77
78 put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
79 ip_hdr(skb) + 1);
80 }
81
82
83 static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
84 {
85 unsigned char optbuf[sizeof(struct ip_options) + 40];
86 struct ip_options *opt = (struct ip_options *)optbuf;
87
88 if (IPCB(skb)->opt.optlen == 0)
89 return;
90
91 if (ip_options_echo(opt, skb)) {
92 msg->msg_flags |= MSG_CTRUNC;
93 return;
94 }
95 ip_options_undo(opt);
96
97 put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
98 }
99
100 static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
101 int offset)
102 {
103 __wsum csum = skb->csum;
104
105 if (skb->ip_summed != CHECKSUM_COMPLETE)
106 return;
107
108 if (offset != 0)
109 csum = csum_sub(csum, csum_partial(skb->data, offset, 0));
110
111 put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
112 }
113
114 static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
115 {
116 char *secdata;
117 u32 seclen, secid;
118 int err;
119
120 err = security_socket_getpeersec_dgram(NULL, skb, &secid);
121 if (err)
122 return;
123
124 err = security_secid_to_secctx(secid, &secdata, &seclen);
125 if (err)
126 return;
127
128 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
129 security_release_secctx(secdata, seclen);
130 }
131
132 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
133 {
134 struct sockaddr_in sin;
135 const struct iphdr *iph = ip_hdr(skb);
136 __be16 *ports = (__be16 *)skb_transport_header(skb);
137
138 if (skb_transport_offset(skb) + 4 > skb->len)
139 return;
140
141 /* All current transport protocols have the port numbers in the
142 * first four bytes of the transport header and this function is
143 * written with this assumption in mind.
144 */
145
146 sin.sin_family = AF_INET;
147 sin.sin_addr.s_addr = iph->daddr;
148 sin.sin_port = ports[1];
149 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
150
151 put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
152 }
153
154 void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
155 int offset)
156 {
157 struct inet_sock *inet = inet_sk(skb->sk);
158 unsigned int flags = inet->cmsg_flags;
159
160 /* Ordered by supposed usage frequency */
161 if (flags & IP_CMSG_PKTINFO) {
162 ip_cmsg_recv_pktinfo(msg, skb);
163
164 flags &= ~IP_CMSG_PKTINFO;
165 if (!flags)
166 return;
167 }
168
169 if (flags & IP_CMSG_TTL) {
170 ip_cmsg_recv_ttl(msg, skb);
171
172 flags &= ~IP_CMSG_TTL;
173 if (!flags)
174 return;
175 }
176
177 if (flags & IP_CMSG_TOS) {
178 ip_cmsg_recv_tos(msg, skb);
179
180 flags &= ~IP_CMSG_TOS;
181 if (!flags)
182 return;
183 }
184
185 if (flags & IP_CMSG_RECVOPTS) {
186 ip_cmsg_recv_opts(msg, skb);
187
188 flags &= ~IP_CMSG_RECVOPTS;
189 if (!flags)
190 return;
191 }
192
193 if (flags & IP_CMSG_RETOPTS) {
194 ip_cmsg_recv_retopts(msg, skb);
195
196 flags &= ~IP_CMSG_RETOPTS;
197 if (!flags)
198 return;
199 }
200
201 if (flags & IP_CMSG_PASSSEC) {
202 ip_cmsg_recv_security(msg, skb);
203
204 flags &= ~IP_CMSG_PASSSEC;
205 if (!flags)
206 return;
207 }
208
209 if (flags & IP_CMSG_ORIGDSTADDR) {
210 ip_cmsg_recv_dstaddr(msg, skb);
211
212 flags &= ~IP_CMSG_ORIGDSTADDR;
213 if (!flags)
214 return;
215 }
216
217 if (flags & IP_CMSG_CHECKSUM)
218 ip_cmsg_recv_checksum(msg, skb, offset);
219 }
220 EXPORT_SYMBOL(ip_cmsg_recv_offset);
221
222 int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
223 bool allow_ipv6)
224 {
225 int err, val;
226 struct cmsghdr *cmsg;
227
228 for_each_cmsghdr(cmsg, msg) {
229 if (!CMSG_OK(msg, cmsg))
230 return -EINVAL;
231 #if IS_ENABLED(CONFIG_IPV6)
232 if (allow_ipv6 &&
233 cmsg->cmsg_level == SOL_IPV6 &&
234 cmsg->cmsg_type == IPV6_PKTINFO) {
235 struct in6_pktinfo *src_info;
236
237 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info)))
238 return -EINVAL;
239 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
240 if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
241 return -EINVAL;
242 ipc->oif = src_info->ipi6_ifindex;
243 ipc->addr = src_info->ipi6_addr.s6_addr32[3];
244 continue;
245 }
246 #endif
247 if (cmsg->cmsg_level != SOL_IP)
248 continue;
249 switch (cmsg->cmsg_type) {
250 case IP_RETOPTS:
251 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
252 err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
253 err < 40 ? err : 40);
254 if (err)
255 return err;
256 break;
257 case IP_PKTINFO:
258 {
259 struct in_pktinfo *info;
260 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
261 return -EINVAL;
262 info = (struct in_pktinfo *)CMSG_DATA(cmsg);
263 ipc->oif = info->ipi_ifindex;
264 ipc->addr = info->ipi_spec_dst.s_addr;
265 break;
266 }
267 case IP_TTL:
268 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
269 return -EINVAL;
270 val = *(int *)CMSG_DATA(cmsg);
271 if (val < 1 || val > 255)
272 return -EINVAL;
273 ipc->ttl = val;
274 break;
275 case IP_TOS:
276 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
277 return -EINVAL;
278 val = *(int *)CMSG_DATA(cmsg);
279 if (val < 0 || val > 255)
280 return -EINVAL;
281 ipc->tos = val;
282 ipc->priority = rt_tos2priority(ipc->tos);
283 break;
284
285 default:
286 return -EINVAL;
287 }
288 }
289 return 0;
290 }
291
292
293 /* Special input handler for packets caught by router alert option.
294 They are selected only by protocol field, and then processed likely
295 local ones; but only if someone wants them! Otherwise, router
296 not running rsvpd will kill RSVP.
297
298 It is user level problem, what it will make with them.
299 I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
300 but receiver should be enough clever f.e. to forward mtrace requests,
301 sent to multicast group to reach destination designated router.
302 */
303 struct ip_ra_chain __rcu *ip_ra_chain;
304 static DEFINE_SPINLOCK(ip_ra_lock);
305
306
307 static void ip_ra_destroy_rcu(struct rcu_head *head)
308 {
309 struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
310
311 sock_put(ra->saved_sk);
312 kfree(ra);
313 }
314
315 int ip_ra_control(struct sock *sk, unsigned char on,
316 void (*destructor)(struct sock *))
317 {
318 struct ip_ra_chain *ra, *new_ra;
319 struct ip_ra_chain __rcu **rap;
320
321 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
322 return -EINVAL;
323
324 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
325
326 spin_lock_bh(&ip_ra_lock);
327 for (rap = &ip_ra_chain;
328 (ra = rcu_dereference_protected(*rap,
329 lockdep_is_held(&ip_ra_lock))) != NULL;
330 rap = &ra->next) {
331 if (ra->sk == sk) {
332 if (on) {
333 spin_unlock_bh(&ip_ra_lock);
334 kfree(new_ra);
335 return -EADDRINUSE;
336 }
337 /* dont let ip_call_ra_chain() use sk again */
338 ra->sk = NULL;
339 RCU_INIT_POINTER(*rap, ra->next);
340 spin_unlock_bh(&ip_ra_lock);
341
342 if (ra->destructor)
343 ra->destructor(sk);
344 /*
345 * Delay sock_put(sk) and kfree(ra) after one rcu grace
346 * period. This guarantee ip_call_ra_chain() dont need
347 * to mess with socket refcounts.
348 */
349 ra->saved_sk = sk;
350 call_rcu(&ra->rcu, ip_ra_destroy_rcu);
351 return 0;
352 }
353 }
354 if (new_ra == NULL) {
355 spin_unlock_bh(&ip_ra_lock);
356 return -ENOBUFS;
357 }
358 new_ra->sk = sk;
359 new_ra->destructor = destructor;
360
361 RCU_INIT_POINTER(new_ra->next, ra);
362 rcu_assign_pointer(*rap, new_ra);
363 sock_hold(sk);
364 spin_unlock_bh(&ip_ra_lock);
365
366 return 0;
367 }
368
369 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
370 __be16 port, u32 info, u8 *payload)
371 {
372 struct sock_exterr_skb *serr;
373
374 skb = skb_clone(skb, GFP_ATOMIC);
375 if (!skb)
376 return;
377
378 serr = SKB_EXT_ERR(skb);
379 serr->ee.ee_errno = err;
380 serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
381 serr->ee.ee_type = icmp_hdr(skb)->type;
382 serr->ee.ee_code = icmp_hdr(skb)->code;
383 serr->ee.ee_pad = 0;
384 serr->ee.ee_info = info;
385 serr->ee.ee_data = 0;
386 serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
387 skb_network_header(skb);
388 serr->port = port;
389
390 if (skb_pull(skb, payload - skb->data) != NULL) {
391 skb_reset_transport_header(skb);
392 if (sock_queue_err_skb(sk, skb) == 0)
393 return;
394 }
395 kfree_skb(skb);
396 }
397
398 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
399 {
400 struct inet_sock *inet = inet_sk(sk);
401 struct sock_exterr_skb *serr;
402 struct iphdr *iph;
403 struct sk_buff *skb;
404
405 if (!inet->recverr)
406 return;
407
408 skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
409 if (!skb)
410 return;
411
412 skb_put(skb, sizeof(struct iphdr));
413 skb_reset_network_header(skb);
414 iph = ip_hdr(skb);
415 iph->daddr = daddr;
416
417 serr = SKB_EXT_ERR(skb);
418 serr->ee.ee_errno = err;
419 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
420 serr->ee.ee_type = 0;
421 serr->ee.ee_code = 0;
422 serr->ee.ee_pad = 0;
423 serr->ee.ee_info = info;
424 serr->ee.ee_data = 0;
425 serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
426 serr->port = port;
427
428 __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
429 skb_reset_transport_header(skb);
430
431 if (sock_queue_err_skb(sk, skb))
432 kfree_skb(skb);
433 }
434
435 static bool ipv4_pktinfo_prepare_errqueue(const struct sock *sk,
436 const struct sk_buff *skb,
437 int ee_origin)
438 {
439 struct in_pktinfo *info = PKTINFO_SKB_CB(skb);
440
441 if ((ee_origin != SO_EE_ORIGIN_TIMESTAMPING) ||
442 (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
443 (!skb->dev))
444 return false;
445
446 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
447 info->ipi_ifindex = skb->dev->ifindex;
448 return true;
449 }
450
451 /*
452 * Handle MSG_ERRQUEUE
453 */
454 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
455 {
456 struct sock_exterr_skb *serr;
457 struct sk_buff *skb;
458 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
459 struct {
460 struct sock_extended_err ee;
461 struct sockaddr_in offender;
462 } errhdr;
463 int err;
464 int copied;
465
466 WARN_ON_ONCE(sk->sk_family == AF_INET6);
467
468 err = -EAGAIN;
469 skb = sock_dequeue_err_skb(sk);
470 if (skb == NULL)
471 goto out;
472
473 copied = skb->len;
474 if (copied > len) {
475 msg->msg_flags |= MSG_TRUNC;
476 copied = len;
477 }
478 err = skb_copy_datagram_msg(skb, 0, msg, copied);
479 if (err)
480 goto out_free_skb;
481
482 sock_recv_timestamp(msg, sk, skb);
483
484 serr = SKB_EXT_ERR(skb);
485
486 if (sin && skb->len) {
487 sin->sin_family = AF_INET;
488 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
489 serr->addr_offset);
490 sin->sin_port = serr->port;
491 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
492 *addr_len = sizeof(*sin);
493 }
494
495 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
496 sin = &errhdr.offender;
497 memset(sin, 0, sizeof(*sin));
498
499 if (skb->len &&
500 (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
501 ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin))) {
502 sin->sin_family = AF_INET;
503 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
504 if (inet_sk(sk)->cmsg_flags)
505 ip_cmsg_recv(msg, skb);
506 }
507
508 put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr);
509
510 /* Now we could try to dump offended packet options */
511
512 msg->msg_flags |= MSG_ERRQUEUE;
513 err = copied;
514
515 out_free_skb:
516 kfree_skb(skb);
517 out:
518 return err;
519 }
520
521
522 /*
523 * Socket option code for IP. This is the end of the line after any
524 * TCP,UDP etc options on an IP socket.
525 */
526
527 static int do_ip_setsockopt(struct sock *sk, int level,
528 int optname, char __user *optval, unsigned int optlen)
529 {
530 struct inet_sock *inet = inet_sk(sk);
531 int val = 0, err;
532
533 switch (optname) {
534 case IP_PKTINFO:
535 case IP_RECVTTL:
536 case IP_RECVOPTS:
537 case IP_RECVTOS:
538 case IP_RETOPTS:
539 case IP_TOS:
540 case IP_TTL:
541 case IP_HDRINCL:
542 case IP_MTU_DISCOVER:
543 case IP_RECVERR:
544 case IP_ROUTER_ALERT:
545 case IP_FREEBIND:
546 case IP_PASSSEC:
547 case IP_TRANSPARENT:
548 case IP_MINTTL:
549 case IP_NODEFRAG:
550 case IP_UNICAST_IF:
551 case IP_MULTICAST_TTL:
552 case IP_MULTICAST_ALL:
553 case IP_MULTICAST_LOOP:
554 case IP_RECVORIGDSTADDR:
555 case IP_CHECKSUM:
556 if (optlen >= sizeof(int)) {
557 if (get_user(val, (int __user *) optval))
558 return -EFAULT;
559 } else if (optlen >= sizeof(char)) {
560 unsigned char ucval;
561
562 if (get_user(ucval, (unsigned char __user *) optval))
563 return -EFAULT;
564 val = (int) ucval;
565 }
566 }
567
568 /* If optlen==0, it is equivalent to val == 0 */
569
570 if (ip_mroute_opt(optname))
571 return ip_mroute_setsockopt(sk, optname, optval, optlen);
572
573 err = 0;
574 lock_sock(sk);
575
576 switch (optname) {
577 case IP_OPTIONS:
578 {
579 struct ip_options_rcu *old, *opt = NULL;
580
581 if (optlen > 40)
582 goto e_inval;
583 err = ip_options_get_from_user(sock_net(sk), &opt,
584 optval, optlen);
585 if (err)
586 break;
587 old = rcu_dereference_protected(inet->inet_opt,
588 sock_owned_by_user(sk));
589 if (inet->is_icsk) {
590 struct inet_connection_sock *icsk = inet_csk(sk);
591 #if IS_ENABLED(CONFIG_IPV6)
592 if (sk->sk_family == PF_INET ||
593 (!((1 << sk->sk_state) &
594 (TCPF_LISTEN | TCPF_CLOSE)) &&
595 inet->inet_daddr != LOOPBACK4_IPV6)) {
596 #endif
597 if (old)
598 icsk->icsk_ext_hdr_len -= old->opt.optlen;
599 if (opt)
600 icsk->icsk_ext_hdr_len += opt->opt.optlen;
601 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
602 #if IS_ENABLED(CONFIG_IPV6)
603 }
604 #endif
605 }
606 rcu_assign_pointer(inet->inet_opt, opt);
607 if (old)
608 kfree_rcu(old, rcu);
609 break;
610 }
611 case IP_PKTINFO:
612 if (val)
613 inet->cmsg_flags |= IP_CMSG_PKTINFO;
614 else
615 inet->cmsg_flags &= ~IP_CMSG_PKTINFO;
616 break;
617 case IP_RECVTTL:
618 if (val)
619 inet->cmsg_flags |= IP_CMSG_TTL;
620 else
621 inet->cmsg_flags &= ~IP_CMSG_TTL;
622 break;
623 case IP_RECVTOS:
624 if (val)
625 inet->cmsg_flags |= IP_CMSG_TOS;
626 else
627 inet->cmsg_flags &= ~IP_CMSG_TOS;
628 break;
629 case IP_RECVOPTS:
630 if (val)
631 inet->cmsg_flags |= IP_CMSG_RECVOPTS;
632 else
633 inet->cmsg_flags &= ~IP_CMSG_RECVOPTS;
634 break;
635 case IP_RETOPTS:
636 if (val)
637 inet->cmsg_flags |= IP_CMSG_RETOPTS;
638 else
639 inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
640 break;
641 case IP_PASSSEC:
642 if (val)
643 inet->cmsg_flags |= IP_CMSG_PASSSEC;
644 else
645 inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
646 break;
647 case IP_RECVORIGDSTADDR:
648 if (val)
649 inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR;
650 else
651 inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
652 break;
653 case IP_CHECKSUM:
654 if (val) {
655 if (!(inet->cmsg_flags & IP_CMSG_CHECKSUM)) {
656 inet_inc_convert_csum(sk);
657 inet->cmsg_flags |= IP_CMSG_CHECKSUM;
658 }
659 } else {
660 if (inet->cmsg_flags & IP_CMSG_CHECKSUM) {
661 inet_dec_convert_csum(sk);
662 inet->cmsg_flags &= ~IP_CMSG_CHECKSUM;
663 }
664 }
665 break;
666 case IP_TOS: /* This sets both TOS and Precedence */
667 if (sk->sk_type == SOCK_STREAM) {
668 val &= ~INET_ECN_MASK;
669 val |= inet->tos & INET_ECN_MASK;
670 }
671 if (inet->tos != val) {
672 inet->tos = val;
673 sk->sk_priority = rt_tos2priority(val);
674 sk_dst_reset(sk);
675 }
676 break;
677 case IP_TTL:
678 if (optlen < 1)
679 goto e_inval;
680 if (val != -1 && (val < 1 || val > 255))
681 goto e_inval;
682 inet->uc_ttl = val;
683 break;
684 case IP_HDRINCL:
685 if (sk->sk_type != SOCK_RAW) {
686 err = -ENOPROTOOPT;
687 break;
688 }
689 inet->hdrincl = val ? 1 : 0;
690 break;
691 case IP_NODEFRAG:
692 if (sk->sk_type != SOCK_RAW) {
693 err = -ENOPROTOOPT;
694 break;
695 }
696 inet->nodefrag = val ? 1 : 0;
697 break;
698 case IP_MTU_DISCOVER:
699 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
700 goto e_inval;
701 inet->pmtudisc = val;
702 break;
703 case IP_RECVERR:
704 inet->recverr = !!val;
705 if (!val)
706 skb_queue_purge(&sk->sk_error_queue);
707 break;
708 case IP_MULTICAST_TTL:
709 if (sk->sk_type == SOCK_STREAM)
710 goto e_inval;
711 if (optlen < 1)
712 goto e_inval;
713 if (val == -1)
714 val = 1;
715 if (val < 0 || val > 255)
716 goto e_inval;
717 inet->mc_ttl = val;
718 break;
719 case IP_MULTICAST_LOOP:
720 if (optlen < 1)
721 goto e_inval;
722 inet->mc_loop = !!val;
723 break;
724 case IP_UNICAST_IF:
725 {
726 struct net_device *dev = NULL;
727 int ifindex;
728
729 if (optlen != sizeof(int))
730 goto e_inval;
731
732 ifindex = (__force int)ntohl((__force __be32)val);
733 if (ifindex == 0) {
734 inet->uc_index = 0;
735 err = 0;
736 break;
737 }
738
739 dev = dev_get_by_index(sock_net(sk), ifindex);
740 err = -EADDRNOTAVAIL;
741 if (!dev)
742 break;
743 dev_put(dev);
744
745 err = -EINVAL;
746 if (sk->sk_bound_dev_if)
747 break;
748
749 inet->uc_index = ifindex;
750 err = 0;
751 break;
752 }
753 case IP_MULTICAST_IF:
754 {
755 struct ip_mreqn mreq;
756 struct net_device *dev = NULL;
757
758 if (sk->sk_type == SOCK_STREAM)
759 goto e_inval;
760 /*
761 * Check the arguments are allowable
762 */
763
764 if (optlen < sizeof(struct in_addr))
765 goto e_inval;
766
767 err = -EFAULT;
768 if (optlen >= sizeof(struct ip_mreqn)) {
769 if (copy_from_user(&mreq, optval, sizeof(mreq)))
770 break;
771 } else {
772 memset(&mreq, 0, sizeof(mreq));
773 if (optlen >= sizeof(struct ip_mreq)) {
774 if (copy_from_user(&mreq, optval,
775 sizeof(struct ip_mreq)))
776 break;
777 } else if (optlen >= sizeof(struct in_addr)) {
778 if (copy_from_user(&mreq.imr_address, optval,
779 sizeof(struct in_addr)))
780 break;
781 }
782 }
783
784 if (!mreq.imr_ifindex) {
785 if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
786 inet->mc_index = 0;
787 inet->mc_addr = 0;
788 err = 0;
789 break;
790 }
791 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
792 if (dev)
793 mreq.imr_ifindex = dev->ifindex;
794 } else
795 dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
796
797
798 err = -EADDRNOTAVAIL;
799 if (!dev)
800 break;
801 dev_put(dev);
802
803 err = -EINVAL;
804 if (sk->sk_bound_dev_if &&
805 mreq.imr_ifindex != sk->sk_bound_dev_if)
806 break;
807
808 inet->mc_index = mreq.imr_ifindex;
809 inet->mc_addr = mreq.imr_address.s_addr;
810 err = 0;
811 break;
812 }
813
814 case IP_ADD_MEMBERSHIP:
815 case IP_DROP_MEMBERSHIP:
816 {
817 struct ip_mreqn mreq;
818
819 err = -EPROTO;
820 if (inet_sk(sk)->is_icsk)
821 break;
822
823 if (optlen < sizeof(struct ip_mreq))
824 goto e_inval;
825 err = -EFAULT;
826 if (optlen >= sizeof(struct ip_mreqn)) {
827 if (copy_from_user(&mreq, optval, sizeof(mreq)))
828 break;
829 } else {
830 memset(&mreq, 0, sizeof(mreq));
831 if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq)))
832 break;
833 }
834
835 if (optname == IP_ADD_MEMBERSHIP)
836 err = ip_mc_join_group(sk, &mreq);
837 else
838 err = ip_mc_leave_group(sk, &mreq);
839 break;
840 }
841 case IP_MSFILTER:
842 {
843 struct ip_msfilter *msf;
844
845 if (optlen < IP_MSFILTER_SIZE(0))
846 goto e_inval;
847 if (optlen > sysctl_optmem_max) {
848 err = -ENOBUFS;
849 break;
850 }
851 msf = kmalloc(optlen, GFP_KERNEL);
852 if (!msf) {
853 err = -ENOBUFS;
854 break;
855 }
856 err = -EFAULT;
857 if (copy_from_user(msf, optval, optlen)) {
858 kfree(msf);
859 break;
860 }
861 /* numsrc >= (1G-4) overflow in 32 bits */
862 if (msf->imsf_numsrc >= 0x3ffffffcU ||
863 msf->imsf_numsrc > sysctl_igmp_max_msf) {
864 kfree(msf);
865 err = -ENOBUFS;
866 break;
867 }
868 if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
869 kfree(msf);
870 err = -EINVAL;
871 break;
872 }
873 err = ip_mc_msfilter(sk, msf, 0);
874 kfree(msf);
875 break;
876 }
877 case IP_BLOCK_SOURCE:
878 case IP_UNBLOCK_SOURCE:
879 case IP_ADD_SOURCE_MEMBERSHIP:
880 case IP_DROP_SOURCE_MEMBERSHIP:
881 {
882 struct ip_mreq_source mreqs;
883 int omode, add;
884
885 if (optlen != sizeof(struct ip_mreq_source))
886 goto e_inval;
887 if (copy_from_user(&mreqs, optval, sizeof(mreqs))) {
888 err = -EFAULT;
889 break;
890 }
891 if (optname == IP_BLOCK_SOURCE) {
892 omode = MCAST_EXCLUDE;
893 add = 1;
894 } else if (optname == IP_UNBLOCK_SOURCE) {
895 omode = MCAST_EXCLUDE;
896 add = 0;
897 } else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
898 struct ip_mreqn mreq;
899
900 mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
901 mreq.imr_address.s_addr = mreqs.imr_interface;
902 mreq.imr_ifindex = 0;
903 err = ip_mc_join_group(sk, &mreq);
904 if (err && err != -EADDRINUSE)
905 break;
906 omode = MCAST_INCLUDE;
907 add = 1;
908 } else /* IP_DROP_SOURCE_MEMBERSHIP */ {
909 omode = MCAST_INCLUDE;
910 add = 0;
911 }
912 err = ip_mc_source(add, omode, sk, &mreqs, 0);
913 break;
914 }
915 case MCAST_JOIN_GROUP:
916 case MCAST_LEAVE_GROUP:
917 {
918 struct group_req greq;
919 struct sockaddr_in *psin;
920 struct ip_mreqn mreq;
921
922 if (optlen < sizeof(struct group_req))
923 goto e_inval;
924 err = -EFAULT;
925 if (copy_from_user(&greq, optval, sizeof(greq)))
926 break;
927 psin = (struct sockaddr_in *)&greq.gr_group;
928 if (psin->sin_family != AF_INET)
929 goto e_inval;
930 memset(&mreq, 0, sizeof(mreq));
931 mreq.imr_multiaddr = psin->sin_addr;
932 mreq.imr_ifindex = greq.gr_interface;
933
934 if (optname == MCAST_JOIN_GROUP)
935 err = ip_mc_join_group(sk, &mreq);
936 else
937 err = ip_mc_leave_group(sk, &mreq);
938 break;
939 }
940 case MCAST_JOIN_SOURCE_GROUP:
941 case MCAST_LEAVE_SOURCE_GROUP:
942 case MCAST_BLOCK_SOURCE:
943 case MCAST_UNBLOCK_SOURCE:
944 {
945 struct group_source_req greqs;
946 struct ip_mreq_source mreqs;
947 struct sockaddr_in *psin;
948 int omode, add;
949
950 if (optlen != sizeof(struct group_source_req))
951 goto e_inval;
952 if (copy_from_user(&greqs, optval, sizeof(greqs))) {
953 err = -EFAULT;
954 break;
955 }
956 if (greqs.gsr_group.ss_family != AF_INET ||
957 greqs.gsr_source.ss_family != AF_INET) {
958 err = -EADDRNOTAVAIL;
959 break;
960 }
961 psin = (struct sockaddr_in *)&greqs.gsr_group;
962 mreqs.imr_multiaddr = psin->sin_addr.s_addr;
963 psin = (struct sockaddr_in *)&greqs.gsr_source;
964 mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
965 mreqs.imr_interface = 0; /* use index for mc_source */
966
967 if (optname == MCAST_BLOCK_SOURCE) {
968 omode = MCAST_EXCLUDE;
969 add = 1;
970 } else if (optname == MCAST_UNBLOCK_SOURCE) {
971 omode = MCAST_EXCLUDE;
972 add = 0;
973 } else if (optname == MCAST_JOIN_SOURCE_GROUP) {
974 struct ip_mreqn mreq;
975
976 psin = (struct sockaddr_in *)&greqs.gsr_group;
977 mreq.imr_multiaddr = psin->sin_addr;
978 mreq.imr_address.s_addr = 0;
979 mreq.imr_ifindex = greqs.gsr_interface;
980 err = ip_mc_join_group(sk, &mreq);
981 if (err && err != -EADDRINUSE)
982 break;
983 greqs.gsr_interface = mreq.imr_ifindex;
984 omode = MCAST_INCLUDE;
985 add = 1;
986 } else /* MCAST_LEAVE_SOURCE_GROUP */ {
987 omode = MCAST_INCLUDE;
988 add = 0;
989 }
990 err = ip_mc_source(add, omode, sk, &mreqs,
991 greqs.gsr_interface);
992 break;
993 }
994 case MCAST_MSFILTER:
995 {
996 struct sockaddr_in *psin;
997 struct ip_msfilter *msf = NULL;
998 struct group_filter *gsf = NULL;
999 int msize, i, ifindex;
1000
1001 if (optlen < GROUP_FILTER_SIZE(0))
1002 goto e_inval;
1003 if (optlen > sysctl_optmem_max) {
1004 err = -ENOBUFS;
1005 break;
1006 }
1007 gsf = kmalloc(optlen, GFP_KERNEL);
1008 if (!gsf) {
1009 err = -ENOBUFS;
1010 break;
1011 }
1012 err = -EFAULT;
1013 if (copy_from_user(gsf, optval, optlen))
1014 goto mc_msf_out;
1015
1016 /* numsrc >= (4G-140)/128 overflow in 32 bits */
1017 if (gsf->gf_numsrc >= 0x1ffffff ||
1018 gsf->gf_numsrc > sysctl_igmp_max_msf) {
1019 err = -ENOBUFS;
1020 goto mc_msf_out;
1021 }
1022 if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
1023 err = -EINVAL;
1024 goto mc_msf_out;
1025 }
1026 msize = IP_MSFILTER_SIZE(gsf->gf_numsrc);
1027 msf = kmalloc(msize, GFP_KERNEL);
1028 if (!msf) {
1029 err = -ENOBUFS;
1030 goto mc_msf_out;
1031 }
1032 ifindex = gsf->gf_interface;
1033 psin = (struct sockaddr_in *)&gsf->gf_group;
1034 if (psin->sin_family != AF_INET) {
1035 err = -EADDRNOTAVAIL;
1036 goto mc_msf_out;
1037 }
1038 msf->imsf_multiaddr = psin->sin_addr.s_addr;
1039 msf->imsf_interface = 0;
1040 msf->imsf_fmode = gsf->gf_fmode;
1041 msf->imsf_numsrc = gsf->gf_numsrc;
1042 err = -EADDRNOTAVAIL;
1043 for (i = 0; i < gsf->gf_numsrc; ++i) {
1044 psin = (struct sockaddr_in *)&gsf->gf_slist[i];
1045
1046 if (psin->sin_family != AF_INET)
1047 goto mc_msf_out;
1048 msf->imsf_slist[i] = psin->sin_addr.s_addr;
1049 }
1050 kfree(gsf);
1051 gsf = NULL;
1052
1053 err = ip_mc_msfilter(sk, msf, ifindex);
1054 mc_msf_out:
1055 kfree(msf);
1056 kfree(gsf);
1057 break;
1058 }
1059 case IP_MULTICAST_ALL:
1060 if (optlen < 1)
1061 goto e_inval;
1062 if (val != 0 && val != 1)
1063 goto e_inval;
1064 inet->mc_all = val;
1065 break;
1066 case IP_ROUTER_ALERT:
1067 err = ip_ra_control(sk, val ? 1 : 0, NULL);
1068 break;
1069
1070 case IP_FREEBIND:
1071 if (optlen < 1)
1072 goto e_inval;
1073 inet->freebind = !!val;
1074 break;
1075
1076 case IP_IPSEC_POLICY:
1077 case IP_XFRM_POLICY:
1078 err = -EPERM;
1079 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1080 break;
1081 err = xfrm_user_policy(sk, optname, optval, optlen);
1082 break;
1083
1084 case IP_TRANSPARENT:
1085 if (!!val && !ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
1086 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1087 err = -EPERM;
1088 break;
1089 }
1090 if (optlen < 1)
1091 goto e_inval;
1092 inet->transparent = !!val;
1093 break;
1094
1095 case IP_MINTTL:
1096 if (optlen < 1)
1097 goto e_inval;
1098 if (val < 0 || val > 255)
1099 goto e_inval;
1100 inet->min_ttl = val;
1101 break;
1102
1103 default:
1104 err = -ENOPROTOOPT;
1105 break;
1106 }
1107 release_sock(sk);
1108 return err;
1109
1110 e_inval:
1111 release_sock(sk);
1112 return -EINVAL;
1113 }
1114
1115 /**
1116 * ipv4_pktinfo_prepare - transfer some info from rtable to skb
1117 * @sk: socket
1118 * @skb: buffer
1119 *
1120 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
1121 * destination in skb->cb[] before dst drop.
1122 * This way, receiver doesn't make cache line misses to read rtable.
1123 */
1124 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
1125 {
1126 struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
1127 bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) ||
1128 ipv6_sk_rxinfo(sk);
1129
1130 if (prepare && skb_rtable(skb)) {
1131 pktinfo->ipi_ifindex = inet_iif(skb);
1132 pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
1133 } else {
1134 pktinfo->ipi_ifindex = 0;
1135 pktinfo->ipi_spec_dst.s_addr = 0;
1136 }
1137 skb_dst_drop(skb);
1138 }
1139
1140 int ip_setsockopt(struct sock *sk, int level,
1141 int optname, char __user *optval, unsigned int optlen)
1142 {
1143 int err;
1144
1145 if (level != SOL_IP)
1146 return -ENOPROTOOPT;
1147
1148 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1149 #ifdef CONFIG_NETFILTER
1150 /* we need to exclude all possible ENOPROTOOPTs except default case */
1151 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1152 optname != IP_IPSEC_POLICY &&
1153 optname != IP_XFRM_POLICY &&
1154 !ip_mroute_opt(optname)) {
1155 lock_sock(sk);
1156 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
1157 release_sock(sk);
1158 }
1159 #endif
1160 return err;
1161 }
1162 EXPORT_SYMBOL(ip_setsockopt);
1163
1164 #ifdef CONFIG_COMPAT
1165 int compat_ip_setsockopt(struct sock *sk, int level, int optname,
1166 char __user *optval, unsigned int optlen)
1167 {
1168 int err;
1169
1170 if (level != SOL_IP)
1171 return -ENOPROTOOPT;
1172
1173 if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER)
1174 return compat_mc_setsockopt(sk, level, optname, optval, optlen,
1175 ip_setsockopt);
1176
1177 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1178 #ifdef CONFIG_NETFILTER
1179 /* we need to exclude all possible ENOPROTOOPTs except default case */
1180 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1181 optname != IP_IPSEC_POLICY &&
1182 optname != IP_XFRM_POLICY &&
1183 !ip_mroute_opt(optname)) {
1184 lock_sock(sk);
1185 err = compat_nf_setsockopt(sk, PF_INET, optname,
1186 optval, optlen);
1187 release_sock(sk);
1188 }
1189 #endif
1190 return err;
1191 }
1192 EXPORT_SYMBOL(compat_ip_setsockopt);
1193 #endif
1194
1195 /*
1196 * Get the options. Note for future reference. The GET of IP options gets
1197 * the _received_ ones. The set sets the _sent_ ones.
1198 */
1199
1200 static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1201 char __user *optval, int __user *optlen, unsigned int flags)
1202 {
1203 struct inet_sock *inet = inet_sk(sk);
1204 int val;
1205 int len;
1206
1207 if (level != SOL_IP)
1208 return -EOPNOTSUPP;
1209
1210 if (ip_mroute_opt(optname))
1211 return ip_mroute_getsockopt(sk, optname, optval, optlen);
1212
1213 if (get_user(len, optlen))
1214 return -EFAULT;
1215 if (len < 0)
1216 return -EINVAL;
1217
1218 lock_sock(sk);
1219
1220 switch (optname) {
1221 case IP_OPTIONS:
1222 {
1223 unsigned char optbuf[sizeof(struct ip_options)+40];
1224 struct ip_options *opt = (struct ip_options *)optbuf;
1225 struct ip_options_rcu *inet_opt;
1226
1227 inet_opt = rcu_dereference_protected(inet->inet_opt,
1228 sock_owned_by_user(sk));
1229 opt->optlen = 0;
1230 if (inet_opt)
1231 memcpy(optbuf, &inet_opt->opt,
1232 sizeof(struct ip_options) +
1233 inet_opt->opt.optlen);
1234 release_sock(sk);
1235
1236 if (opt->optlen == 0)
1237 return put_user(0, optlen);
1238
1239 ip_options_undo(opt);
1240
1241 len = min_t(unsigned int, len, opt->optlen);
1242 if (put_user(len, optlen))
1243 return -EFAULT;
1244 if (copy_to_user(optval, opt->__data, len))
1245 return -EFAULT;
1246 return 0;
1247 }
1248 case IP_PKTINFO:
1249 val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0;
1250 break;
1251 case IP_RECVTTL:
1252 val = (inet->cmsg_flags & IP_CMSG_TTL) != 0;
1253 break;
1254 case IP_RECVTOS:
1255 val = (inet->cmsg_flags & IP_CMSG_TOS) != 0;
1256 break;
1257 case IP_RECVOPTS:
1258 val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0;
1259 break;
1260 case IP_RETOPTS:
1261 val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0;
1262 break;
1263 case IP_PASSSEC:
1264 val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0;
1265 break;
1266 case IP_RECVORIGDSTADDR:
1267 val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0;
1268 break;
1269 case IP_CHECKSUM:
1270 val = (inet->cmsg_flags & IP_CMSG_CHECKSUM) != 0;
1271 break;
1272 case IP_TOS:
1273 val = inet->tos;
1274 break;
1275 case IP_TTL:
1276 val = (inet->uc_ttl == -1 ?
1277 sysctl_ip_default_ttl :
1278 inet->uc_ttl);
1279 break;
1280 case IP_HDRINCL:
1281 val = inet->hdrincl;
1282 break;
1283 case IP_NODEFRAG:
1284 val = inet->nodefrag;
1285 break;
1286 case IP_MTU_DISCOVER:
1287 val = inet->pmtudisc;
1288 break;
1289 case IP_MTU:
1290 {
1291 struct dst_entry *dst;
1292 val = 0;
1293 dst = sk_dst_get(sk);
1294 if (dst) {
1295 val = dst_mtu(dst);
1296 dst_release(dst);
1297 }
1298 if (!val) {
1299 release_sock(sk);
1300 return -ENOTCONN;
1301 }
1302 break;
1303 }
1304 case IP_RECVERR:
1305 val = inet->recverr;
1306 break;
1307 case IP_MULTICAST_TTL:
1308 val = inet->mc_ttl;
1309 break;
1310 case IP_MULTICAST_LOOP:
1311 val = inet->mc_loop;
1312 break;
1313 case IP_UNICAST_IF:
1314 val = (__force int)htonl((__u32) inet->uc_index);
1315 break;
1316 case IP_MULTICAST_IF:
1317 {
1318 struct in_addr addr;
1319 len = min_t(unsigned int, len, sizeof(struct in_addr));
1320 addr.s_addr = inet->mc_addr;
1321 release_sock(sk);
1322
1323 if (put_user(len, optlen))
1324 return -EFAULT;
1325 if (copy_to_user(optval, &addr, len))
1326 return -EFAULT;
1327 return 0;
1328 }
1329 case IP_MSFILTER:
1330 {
1331 struct ip_msfilter msf;
1332 int err;
1333
1334 if (len < IP_MSFILTER_SIZE(0)) {
1335 release_sock(sk);
1336 return -EINVAL;
1337 }
1338 if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) {
1339 release_sock(sk);
1340 return -EFAULT;
1341 }
1342 err = ip_mc_msfget(sk, &msf,
1343 (struct ip_msfilter __user *)optval, optlen);
1344 release_sock(sk);
1345 return err;
1346 }
1347 case MCAST_MSFILTER:
1348 {
1349 struct group_filter gsf;
1350 int err;
1351
1352 if (len < GROUP_FILTER_SIZE(0)) {
1353 release_sock(sk);
1354 return -EINVAL;
1355 }
1356 if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) {
1357 release_sock(sk);
1358 return -EFAULT;
1359 }
1360 err = ip_mc_gsfget(sk, &gsf,
1361 (struct group_filter __user *)optval,
1362 optlen);
1363 release_sock(sk);
1364 return err;
1365 }
1366 case IP_MULTICAST_ALL:
1367 val = inet->mc_all;
1368 break;
1369 case IP_PKTOPTIONS:
1370 {
1371 struct msghdr msg;
1372
1373 release_sock(sk);
1374
1375 if (sk->sk_type != SOCK_STREAM)
1376 return -ENOPROTOOPT;
1377
1378 msg.msg_control = (__force void *) optval;
1379 msg.msg_controllen = len;
1380 msg.msg_flags = flags;
1381
1382 if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
1383 struct in_pktinfo info;
1384
1385 info.ipi_addr.s_addr = inet->inet_rcv_saddr;
1386 info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr;
1387 info.ipi_ifindex = inet->mc_index;
1388 put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
1389 }
1390 if (inet->cmsg_flags & IP_CMSG_TTL) {
1391 int hlim = inet->mc_ttl;
1392 put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
1393 }
1394 if (inet->cmsg_flags & IP_CMSG_TOS) {
1395 int tos = inet->rcv_tos;
1396 put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
1397 }
1398 len -= msg.msg_controllen;
1399 return put_user(len, optlen);
1400 }
1401 case IP_FREEBIND:
1402 val = inet->freebind;
1403 break;
1404 case IP_TRANSPARENT:
1405 val = inet->transparent;
1406 break;
1407 case IP_MINTTL:
1408 val = inet->min_ttl;
1409 break;
1410 default:
1411 release_sock(sk);
1412 return -ENOPROTOOPT;
1413 }
1414 release_sock(sk);
1415
1416 if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
1417 unsigned char ucval = (unsigned char)val;
1418 len = 1;
1419 if (put_user(len, optlen))
1420 return -EFAULT;
1421 if (copy_to_user(optval, &ucval, 1))
1422 return -EFAULT;
1423 } else {
1424 len = min_t(unsigned int, sizeof(int), len);
1425 if (put_user(len, optlen))
1426 return -EFAULT;
1427 if (copy_to_user(optval, &val, len))
1428 return -EFAULT;
1429 }
1430 return 0;
1431 }
1432
1433 int ip_getsockopt(struct sock *sk, int level,
1434 int optname, char __user *optval, int __user *optlen)
1435 {
1436 int err;
1437
1438 err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
1439 #ifdef CONFIG_NETFILTER
1440 /* we need to exclude all possible ENOPROTOOPTs except default case */
1441 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1442 !ip_mroute_opt(optname)) {
1443 int len;
1444
1445 if (get_user(len, optlen))
1446 return -EFAULT;
1447
1448 lock_sock(sk);
1449 err = nf_getsockopt(sk, PF_INET, optname, optval,
1450 &len);
1451 release_sock(sk);
1452 if (err >= 0)
1453 err = put_user(len, optlen);
1454 return err;
1455 }
1456 #endif
1457 return err;
1458 }
1459 EXPORT_SYMBOL(ip_getsockopt);
1460
1461 #ifdef CONFIG_COMPAT
1462 int compat_ip_getsockopt(struct sock *sk, int level, int optname,
1463 char __user *optval, int __user *optlen)
1464 {
1465 int err;
1466
1467 if (optname == MCAST_MSFILTER)
1468 return compat_mc_getsockopt(sk, level, optname, optval, optlen,
1469 ip_getsockopt);
1470
1471 err = do_ip_getsockopt(sk, level, optname, optval, optlen,
1472 MSG_CMSG_COMPAT);
1473
1474 #ifdef CONFIG_NETFILTER
1475 /* we need to exclude all possible ENOPROTOOPTs except default case */
1476 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1477 !ip_mroute_opt(optname)) {
1478 int len;
1479
1480 if (get_user(len, optlen))
1481 return -EFAULT;
1482
1483 lock_sock(sk);
1484 err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
1485 release_sock(sk);
1486 if (err >= 0)
1487 err = put_user(len, optlen);
1488 return err;
1489 }
1490 #endif
1491 return err;
1492 }
1493 EXPORT_SYMBOL(compat_ip_getsockopt);
1494 #endif
This page took 0.0751039999999999 seconds and 5 git commands to generate.