Merge tag 'rxrpc-rewrite-20160908' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / net / ipv4 / ip_sockglue.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The IP to API glue.
7 *
8 * Authors: see ip.c
9 *
10 * Fixes:
11 * Many : Split from ip.c , see ip.c for history.
12 * Martin Mares : TOS setting fixed.
13 * Alan Cox : Fixed a couple of oopses in Martin's
14 * TOS tweaks.
15 * Mike McLagan : Routing by source
16 */
17
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/mm.h>
21 #include <linux/skbuff.h>
22 #include <linux/ip.h>
23 #include <linux/icmp.h>
24 #include <linux/inetdevice.h>
25 #include <linux/netdevice.h>
26 #include <linux/slab.h>
27 #include <net/sock.h>
28 #include <net/ip.h>
29 #include <net/icmp.h>
30 #include <net/tcp_states.h>
31 #include <linux/udp.h>
32 #include <linux/igmp.h>
33 #include <linux/netfilter.h>
34 #include <linux/route.h>
35 #include <linux/mroute.h>
36 #include <net/inet_ecn.h>
37 #include <net/route.h>
38 #include <net/xfrm.h>
39 #include <net/compat.h>
40 #include <net/checksum.h>
41 #if IS_ENABLED(CONFIG_IPV6)
42 #include <net/transp_v6.h>
43 #endif
44 #include <net/ip_fib.h>
45
46 #include <linux/errqueue.h>
47 #include <asm/uaccess.h>
48
49 /*
50 * SOL_IP control messages.
51 */
52
53 static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
54 {
55 struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
56
57 info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
58
59 put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
60 }
61
62 static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb)
63 {
64 int ttl = ip_hdr(skb)->ttl;
65 put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl);
66 }
67
68 static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb)
69 {
70 put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos);
71 }
72
73 static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
74 {
75 if (IPCB(skb)->opt.optlen == 0)
76 return;
77
78 put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
79 ip_hdr(skb) + 1);
80 }
81
82
83 static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
84 {
85 unsigned char optbuf[sizeof(struct ip_options) + 40];
86 struct ip_options *opt = (struct ip_options *)optbuf;
87
88 if (IPCB(skb)->opt.optlen == 0)
89 return;
90
91 if (ip_options_echo(opt, skb)) {
92 msg->msg_flags |= MSG_CTRUNC;
93 return;
94 }
95 ip_options_undo(opt);
96
97 put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
98 }
99
100 static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
101 int offset)
102 {
103 __wsum csum = skb->csum;
104
105 if (skb->ip_summed != CHECKSUM_COMPLETE)
106 return;
107
108 if (offset != 0)
109 csum = csum_sub(csum, csum_partial(skb_transport_header(skb),
110 offset, 0));
111
112 put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
113 }
114
115 static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
116 {
117 char *secdata;
118 u32 seclen, secid;
119 int err;
120
121 err = security_socket_getpeersec_dgram(NULL, skb, &secid);
122 if (err)
123 return;
124
125 err = security_secid_to_secctx(secid, &secdata, &seclen);
126 if (err)
127 return;
128
129 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
130 security_release_secctx(secdata, seclen);
131 }
132
133 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
134 {
135 struct sockaddr_in sin;
136 const struct iphdr *iph = ip_hdr(skb);
137 __be16 *ports = (__be16 *)skb_transport_header(skb);
138
139 if (skb_transport_offset(skb) + 4 > skb->len)
140 return;
141
142 /* All current transport protocols have the port numbers in the
143 * first four bytes of the transport header and this function is
144 * written with this assumption in mind.
145 */
146
147 sin.sin_family = AF_INET;
148 sin.sin_addr.s_addr = iph->daddr;
149 sin.sin_port = ports[1];
150 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
151
152 put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
153 }
154
155 void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
156 int offset)
157 {
158 struct inet_sock *inet = inet_sk(skb->sk);
159 unsigned int flags = inet->cmsg_flags;
160
161 /* Ordered by supposed usage frequency */
162 if (flags & IP_CMSG_PKTINFO) {
163 ip_cmsg_recv_pktinfo(msg, skb);
164
165 flags &= ~IP_CMSG_PKTINFO;
166 if (!flags)
167 return;
168 }
169
170 if (flags & IP_CMSG_TTL) {
171 ip_cmsg_recv_ttl(msg, skb);
172
173 flags &= ~IP_CMSG_TTL;
174 if (!flags)
175 return;
176 }
177
178 if (flags & IP_CMSG_TOS) {
179 ip_cmsg_recv_tos(msg, skb);
180
181 flags &= ~IP_CMSG_TOS;
182 if (!flags)
183 return;
184 }
185
186 if (flags & IP_CMSG_RECVOPTS) {
187 ip_cmsg_recv_opts(msg, skb);
188
189 flags &= ~IP_CMSG_RECVOPTS;
190 if (!flags)
191 return;
192 }
193
194 if (flags & IP_CMSG_RETOPTS) {
195 ip_cmsg_recv_retopts(msg, skb);
196
197 flags &= ~IP_CMSG_RETOPTS;
198 if (!flags)
199 return;
200 }
201
202 if (flags & IP_CMSG_PASSSEC) {
203 ip_cmsg_recv_security(msg, skb);
204
205 flags &= ~IP_CMSG_PASSSEC;
206 if (!flags)
207 return;
208 }
209
210 if (flags & IP_CMSG_ORIGDSTADDR) {
211 ip_cmsg_recv_dstaddr(msg, skb);
212
213 flags &= ~IP_CMSG_ORIGDSTADDR;
214 if (!flags)
215 return;
216 }
217
218 if (flags & IP_CMSG_CHECKSUM)
219 ip_cmsg_recv_checksum(msg, skb, offset);
220 }
221 EXPORT_SYMBOL(ip_cmsg_recv_offset);
222
223 int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
224 bool allow_ipv6)
225 {
226 int err, val;
227 struct cmsghdr *cmsg;
228 struct net *net = sock_net(sk);
229
230 for_each_cmsghdr(cmsg, msg) {
231 if (!CMSG_OK(msg, cmsg))
232 return -EINVAL;
233 #if IS_ENABLED(CONFIG_IPV6)
234 if (allow_ipv6 &&
235 cmsg->cmsg_level == SOL_IPV6 &&
236 cmsg->cmsg_type == IPV6_PKTINFO) {
237 struct in6_pktinfo *src_info;
238
239 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info)))
240 return -EINVAL;
241 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
242 if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
243 return -EINVAL;
244 ipc->oif = src_info->ipi6_ifindex;
245 ipc->addr = src_info->ipi6_addr.s6_addr32[3];
246 continue;
247 }
248 #endif
249 if (cmsg->cmsg_level == SOL_SOCKET) {
250 err = __sock_cmsg_send(sk, msg, cmsg, &ipc->sockc);
251 if (err)
252 return err;
253 continue;
254 }
255
256 if (cmsg->cmsg_level != SOL_IP)
257 continue;
258 switch (cmsg->cmsg_type) {
259 case IP_RETOPTS:
260 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
261
262 /* Our caller is responsible for freeing ipc->opt */
263 err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
264 err < 40 ? err : 40);
265 if (err)
266 return err;
267 break;
268 case IP_PKTINFO:
269 {
270 struct in_pktinfo *info;
271 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
272 return -EINVAL;
273 info = (struct in_pktinfo *)CMSG_DATA(cmsg);
274 ipc->oif = info->ipi_ifindex;
275 ipc->addr = info->ipi_spec_dst.s_addr;
276 break;
277 }
278 case IP_TTL:
279 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
280 return -EINVAL;
281 val = *(int *)CMSG_DATA(cmsg);
282 if (val < 1 || val > 255)
283 return -EINVAL;
284 ipc->ttl = val;
285 break;
286 case IP_TOS:
287 if (cmsg->cmsg_len == CMSG_LEN(sizeof(int)))
288 val = *(int *)CMSG_DATA(cmsg);
289 else if (cmsg->cmsg_len == CMSG_LEN(sizeof(u8)))
290 val = *(u8 *)CMSG_DATA(cmsg);
291 else
292 return -EINVAL;
293 if (val < 0 || val > 255)
294 return -EINVAL;
295 ipc->tos = val;
296 ipc->priority = rt_tos2priority(ipc->tos);
297 break;
298
299 default:
300 return -EINVAL;
301 }
302 }
303 return 0;
304 }
305
306
307 /* Special input handler for packets caught by router alert option.
308 They are selected only by protocol field, and then processed likely
309 local ones; but only if someone wants them! Otherwise, router
310 not running rsvpd will kill RSVP.
311
312 It is user level problem, what it will make with them.
313 I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
314 but receiver should be enough clever f.e. to forward mtrace requests,
315 sent to multicast group to reach destination designated router.
316 */
317 struct ip_ra_chain __rcu *ip_ra_chain;
318 static DEFINE_SPINLOCK(ip_ra_lock);
319
320
321 static void ip_ra_destroy_rcu(struct rcu_head *head)
322 {
323 struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
324
325 sock_put(ra->saved_sk);
326 kfree(ra);
327 }
328
329 int ip_ra_control(struct sock *sk, unsigned char on,
330 void (*destructor)(struct sock *))
331 {
332 struct ip_ra_chain *ra, *new_ra;
333 struct ip_ra_chain __rcu **rap;
334
335 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
336 return -EINVAL;
337
338 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
339
340 spin_lock_bh(&ip_ra_lock);
341 for (rap = &ip_ra_chain;
342 (ra = rcu_dereference_protected(*rap,
343 lockdep_is_held(&ip_ra_lock))) != NULL;
344 rap = &ra->next) {
345 if (ra->sk == sk) {
346 if (on) {
347 spin_unlock_bh(&ip_ra_lock);
348 kfree(new_ra);
349 return -EADDRINUSE;
350 }
351 /* dont let ip_call_ra_chain() use sk again */
352 ra->sk = NULL;
353 RCU_INIT_POINTER(*rap, ra->next);
354 spin_unlock_bh(&ip_ra_lock);
355
356 if (ra->destructor)
357 ra->destructor(sk);
358 /*
359 * Delay sock_put(sk) and kfree(ra) after one rcu grace
360 * period. This guarantee ip_call_ra_chain() dont need
361 * to mess with socket refcounts.
362 */
363 ra->saved_sk = sk;
364 call_rcu(&ra->rcu, ip_ra_destroy_rcu);
365 return 0;
366 }
367 }
368 if (!new_ra) {
369 spin_unlock_bh(&ip_ra_lock);
370 return -ENOBUFS;
371 }
372 new_ra->sk = sk;
373 new_ra->destructor = destructor;
374
375 RCU_INIT_POINTER(new_ra->next, ra);
376 rcu_assign_pointer(*rap, new_ra);
377 sock_hold(sk);
378 spin_unlock_bh(&ip_ra_lock);
379
380 return 0;
381 }
382
383 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
384 __be16 port, u32 info, u8 *payload)
385 {
386 struct sock_exterr_skb *serr;
387
388 skb = skb_clone(skb, GFP_ATOMIC);
389 if (!skb)
390 return;
391
392 serr = SKB_EXT_ERR(skb);
393 serr->ee.ee_errno = err;
394 serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
395 serr->ee.ee_type = icmp_hdr(skb)->type;
396 serr->ee.ee_code = icmp_hdr(skb)->code;
397 serr->ee.ee_pad = 0;
398 serr->ee.ee_info = info;
399 serr->ee.ee_data = 0;
400 serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
401 skb_network_header(skb);
402 serr->port = port;
403
404 if (skb_pull(skb, payload - skb->data)) {
405 skb_reset_transport_header(skb);
406 if (sock_queue_err_skb(sk, skb) == 0)
407 return;
408 }
409 kfree_skb(skb);
410 }
411
412 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
413 {
414 struct inet_sock *inet = inet_sk(sk);
415 struct sock_exterr_skb *serr;
416 struct iphdr *iph;
417 struct sk_buff *skb;
418
419 if (!inet->recverr)
420 return;
421
422 skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
423 if (!skb)
424 return;
425
426 skb_put(skb, sizeof(struct iphdr));
427 skb_reset_network_header(skb);
428 iph = ip_hdr(skb);
429 iph->daddr = daddr;
430
431 serr = SKB_EXT_ERR(skb);
432 serr->ee.ee_errno = err;
433 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
434 serr->ee.ee_type = 0;
435 serr->ee.ee_code = 0;
436 serr->ee.ee_pad = 0;
437 serr->ee.ee_info = info;
438 serr->ee.ee_data = 0;
439 serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
440 serr->port = port;
441
442 __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
443 skb_reset_transport_header(skb);
444
445 if (sock_queue_err_skb(sk, skb))
446 kfree_skb(skb);
447 }
448
449 /* For some errors we have valid addr_offset even with zero payload and
450 * zero port. Also, addr_offset should be supported if port is set.
451 */
452 static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
453 {
454 return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
455 serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
456 }
457
458 /* IPv4 supports cmsg on all imcp errors and some timestamps
459 *
460 * Timestamp code paths do not initialize the fields expected by cmsg:
461 * the PKTINFO fields in skb->cb[]. Fill those in here.
462 */
463 static bool ipv4_datagram_support_cmsg(const struct sock *sk,
464 struct sk_buff *skb,
465 int ee_origin)
466 {
467 struct in_pktinfo *info;
468
469 if (ee_origin == SO_EE_ORIGIN_ICMP)
470 return true;
471
472 if (ee_origin == SO_EE_ORIGIN_LOCAL)
473 return false;
474
475 /* Support IP_PKTINFO on tstamp packets if requested, to correlate
476 * timestamp with egress dev. Not possible for packets without dev
477 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
478 */
479 if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
480 (!skb->dev))
481 return false;
482
483 info = PKTINFO_SKB_CB(skb);
484 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
485 info->ipi_ifindex = skb->dev->ifindex;
486 return true;
487 }
488
489 /*
490 * Handle MSG_ERRQUEUE
491 */
492 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
493 {
494 struct sock_exterr_skb *serr;
495 struct sk_buff *skb;
496 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
497 struct {
498 struct sock_extended_err ee;
499 struct sockaddr_in offender;
500 } errhdr;
501 int err;
502 int copied;
503
504 WARN_ON_ONCE(sk->sk_family == AF_INET6);
505
506 err = -EAGAIN;
507 skb = sock_dequeue_err_skb(sk);
508 if (!skb)
509 goto out;
510
511 copied = skb->len;
512 if (copied > len) {
513 msg->msg_flags |= MSG_TRUNC;
514 copied = len;
515 }
516 err = skb_copy_datagram_msg(skb, 0, msg, copied);
517 if (unlikely(err)) {
518 kfree_skb(skb);
519 return err;
520 }
521 sock_recv_timestamp(msg, sk, skb);
522
523 serr = SKB_EXT_ERR(skb);
524
525 if (sin && ipv4_datagram_support_addr(serr)) {
526 sin->sin_family = AF_INET;
527 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
528 serr->addr_offset);
529 sin->sin_port = serr->port;
530 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
531 *addr_len = sizeof(*sin);
532 }
533
534 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
535 sin = &errhdr.offender;
536 memset(sin, 0, sizeof(*sin));
537
538 if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
539 sin->sin_family = AF_INET;
540 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
541 if (inet_sk(sk)->cmsg_flags)
542 ip_cmsg_recv(msg, skb);
543 }
544
545 put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr);
546
547 /* Now we could try to dump offended packet options */
548
549 msg->msg_flags |= MSG_ERRQUEUE;
550 err = copied;
551
552 consume_skb(skb);
553 out:
554 return err;
555 }
556
557
558 /*
559 * Socket option code for IP. This is the end of the line after any
560 * TCP,UDP etc options on an IP socket.
561 */
562 static bool setsockopt_needs_rtnl(int optname)
563 {
564 switch (optname) {
565 case IP_ADD_MEMBERSHIP:
566 case IP_ADD_SOURCE_MEMBERSHIP:
567 case IP_BLOCK_SOURCE:
568 case IP_DROP_MEMBERSHIP:
569 case IP_DROP_SOURCE_MEMBERSHIP:
570 case IP_MSFILTER:
571 case IP_UNBLOCK_SOURCE:
572 case MCAST_BLOCK_SOURCE:
573 case MCAST_MSFILTER:
574 case MCAST_JOIN_GROUP:
575 case MCAST_JOIN_SOURCE_GROUP:
576 case MCAST_LEAVE_GROUP:
577 case MCAST_LEAVE_SOURCE_GROUP:
578 case MCAST_UNBLOCK_SOURCE:
579 return true;
580 }
581 return false;
582 }
583
584 static int do_ip_setsockopt(struct sock *sk, int level,
585 int optname, char __user *optval, unsigned int optlen)
586 {
587 struct inet_sock *inet = inet_sk(sk);
588 struct net *net = sock_net(sk);
589 int val = 0, err;
590 bool needs_rtnl = setsockopt_needs_rtnl(optname);
591
592 switch (optname) {
593 case IP_PKTINFO:
594 case IP_RECVTTL:
595 case IP_RECVOPTS:
596 case IP_RECVTOS:
597 case IP_RETOPTS:
598 case IP_TOS:
599 case IP_TTL:
600 case IP_HDRINCL:
601 case IP_MTU_DISCOVER:
602 case IP_RECVERR:
603 case IP_ROUTER_ALERT:
604 case IP_FREEBIND:
605 case IP_PASSSEC:
606 case IP_TRANSPARENT:
607 case IP_MINTTL:
608 case IP_NODEFRAG:
609 case IP_BIND_ADDRESS_NO_PORT:
610 case IP_UNICAST_IF:
611 case IP_MULTICAST_TTL:
612 case IP_MULTICAST_ALL:
613 case IP_MULTICAST_LOOP:
614 case IP_RECVORIGDSTADDR:
615 case IP_CHECKSUM:
616 if (optlen >= sizeof(int)) {
617 if (get_user(val, (int __user *) optval))
618 return -EFAULT;
619 } else if (optlen >= sizeof(char)) {
620 unsigned char ucval;
621
622 if (get_user(ucval, (unsigned char __user *) optval))
623 return -EFAULT;
624 val = (int) ucval;
625 }
626 }
627
628 /* If optlen==0, it is equivalent to val == 0 */
629
630 if (ip_mroute_opt(optname))
631 return ip_mroute_setsockopt(sk, optname, optval, optlen);
632
633 err = 0;
634 if (needs_rtnl)
635 rtnl_lock();
636 lock_sock(sk);
637
638 switch (optname) {
639 case IP_OPTIONS:
640 {
641 struct ip_options_rcu *old, *opt = NULL;
642
643 if (optlen > 40)
644 goto e_inval;
645 err = ip_options_get_from_user(sock_net(sk), &opt,
646 optval, optlen);
647 if (err)
648 break;
649 old = rcu_dereference_protected(inet->inet_opt,
650 lockdep_sock_is_held(sk));
651 if (inet->is_icsk) {
652 struct inet_connection_sock *icsk = inet_csk(sk);
653 #if IS_ENABLED(CONFIG_IPV6)
654 if (sk->sk_family == PF_INET ||
655 (!((1 << sk->sk_state) &
656 (TCPF_LISTEN | TCPF_CLOSE)) &&
657 inet->inet_daddr != LOOPBACK4_IPV6)) {
658 #endif
659 if (old)
660 icsk->icsk_ext_hdr_len -= old->opt.optlen;
661 if (opt)
662 icsk->icsk_ext_hdr_len += opt->opt.optlen;
663 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
664 #if IS_ENABLED(CONFIG_IPV6)
665 }
666 #endif
667 }
668 rcu_assign_pointer(inet->inet_opt, opt);
669 if (old)
670 kfree_rcu(old, rcu);
671 break;
672 }
673 case IP_PKTINFO:
674 if (val)
675 inet->cmsg_flags |= IP_CMSG_PKTINFO;
676 else
677 inet->cmsg_flags &= ~IP_CMSG_PKTINFO;
678 break;
679 case IP_RECVTTL:
680 if (val)
681 inet->cmsg_flags |= IP_CMSG_TTL;
682 else
683 inet->cmsg_flags &= ~IP_CMSG_TTL;
684 break;
685 case IP_RECVTOS:
686 if (val)
687 inet->cmsg_flags |= IP_CMSG_TOS;
688 else
689 inet->cmsg_flags &= ~IP_CMSG_TOS;
690 break;
691 case IP_RECVOPTS:
692 if (val)
693 inet->cmsg_flags |= IP_CMSG_RECVOPTS;
694 else
695 inet->cmsg_flags &= ~IP_CMSG_RECVOPTS;
696 break;
697 case IP_RETOPTS:
698 if (val)
699 inet->cmsg_flags |= IP_CMSG_RETOPTS;
700 else
701 inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
702 break;
703 case IP_PASSSEC:
704 if (val)
705 inet->cmsg_flags |= IP_CMSG_PASSSEC;
706 else
707 inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
708 break;
709 case IP_RECVORIGDSTADDR:
710 if (val)
711 inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR;
712 else
713 inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
714 break;
715 case IP_CHECKSUM:
716 if (val) {
717 if (!(inet->cmsg_flags & IP_CMSG_CHECKSUM)) {
718 inet_inc_convert_csum(sk);
719 inet->cmsg_flags |= IP_CMSG_CHECKSUM;
720 }
721 } else {
722 if (inet->cmsg_flags & IP_CMSG_CHECKSUM) {
723 inet_dec_convert_csum(sk);
724 inet->cmsg_flags &= ~IP_CMSG_CHECKSUM;
725 }
726 }
727 break;
728 case IP_TOS: /* This sets both TOS and Precedence */
729 if (sk->sk_type == SOCK_STREAM) {
730 val &= ~INET_ECN_MASK;
731 val |= inet->tos & INET_ECN_MASK;
732 }
733 if (inet->tos != val) {
734 inet->tos = val;
735 sk->sk_priority = rt_tos2priority(val);
736 sk_dst_reset(sk);
737 }
738 break;
739 case IP_TTL:
740 if (optlen < 1)
741 goto e_inval;
742 if (val != -1 && (val < 1 || val > 255))
743 goto e_inval;
744 inet->uc_ttl = val;
745 break;
746 case IP_HDRINCL:
747 if (sk->sk_type != SOCK_RAW) {
748 err = -ENOPROTOOPT;
749 break;
750 }
751 inet->hdrincl = val ? 1 : 0;
752 break;
753 case IP_NODEFRAG:
754 if (sk->sk_type != SOCK_RAW) {
755 err = -ENOPROTOOPT;
756 break;
757 }
758 inet->nodefrag = val ? 1 : 0;
759 break;
760 case IP_BIND_ADDRESS_NO_PORT:
761 inet->bind_address_no_port = val ? 1 : 0;
762 break;
763 case IP_MTU_DISCOVER:
764 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
765 goto e_inval;
766 inet->pmtudisc = val;
767 break;
768 case IP_RECVERR:
769 inet->recverr = !!val;
770 if (!val)
771 skb_queue_purge(&sk->sk_error_queue);
772 break;
773 case IP_MULTICAST_TTL:
774 if (sk->sk_type == SOCK_STREAM)
775 goto e_inval;
776 if (optlen < 1)
777 goto e_inval;
778 if (val == -1)
779 val = 1;
780 if (val < 0 || val > 255)
781 goto e_inval;
782 inet->mc_ttl = val;
783 break;
784 case IP_MULTICAST_LOOP:
785 if (optlen < 1)
786 goto e_inval;
787 inet->mc_loop = !!val;
788 break;
789 case IP_UNICAST_IF:
790 {
791 struct net_device *dev = NULL;
792 int ifindex;
793
794 if (optlen != sizeof(int))
795 goto e_inval;
796
797 ifindex = (__force int)ntohl((__force __be32)val);
798 if (ifindex == 0) {
799 inet->uc_index = 0;
800 err = 0;
801 break;
802 }
803
804 dev = dev_get_by_index(sock_net(sk), ifindex);
805 err = -EADDRNOTAVAIL;
806 if (!dev)
807 break;
808 dev_put(dev);
809
810 err = -EINVAL;
811 if (sk->sk_bound_dev_if)
812 break;
813
814 inet->uc_index = ifindex;
815 err = 0;
816 break;
817 }
818 case IP_MULTICAST_IF:
819 {
820 struct ip_mreqn mreq;
821 struct net_device *dev = NULL;
822
823 if (sk->sk_type == SOCK_STREAM)
824 goto e_inval;
825 /*
826 * Check the arguments are allowable
827 */
828
829 if (optlen < sizeof(struct in_addr))
830 goto e_inval;
831
832 err = -EFAULT;
833 if (optlen >= sizeof(struct ip_mreqn)) {
834 if (copy_from_user(&mreq, optval, sizeof(mreq)))
835 break;
836 } else {
837 memset(&mreq, 0, sizeof(mreq));
838 if (optlen >= sizeof(struct ip_mreq)) {
839 if (copy_from_user(&mreq, optval,
840 sizeof(struct ip_mreq)))
841 break;
842 } else if (optlen >= sizeof(struct in_addr)) {
843 if (copy_from_user(&mreq.imr_address, optval,
844 sizeof(struct in_addr)))
845 break;
846 }
847 }
848
849 if (!mreq.imr_ifindex) {
850 if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
851 inet->mc_index = 0;
852 inet->mc_addr = 0;
853 err = 0;
854 break;
855 }
856 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
857 if (dev)
858 mreq.imr_ifindex = dev->ifindex;
859 } else
860 dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
861
862
863 err = -EADDRNOTAVAIL;
864 if (!dev)
865 break;
866 dev_put(dev);
867
868 err = -EINVAL;
869 if (sk->sk_bound_dev_if &&
870 mreq.imr_ifindex != sk->sk_bound_dev_if)
871 break;
872
873 inet->mc_index = mreq.imr_ifindex;
874 inet->mc_addr = mreq.imr_address.s_addr;
875 err = 0;
876 break;
877 }
878
879 case IP_ADD_MEMBERSHIP:
880 case IP_DROP_MEMBERSHIP:
881 {
882 struct ip_mreqn mreq;
883
884 err = -EPROTO;
885 if (inet_sk(sk)->is_icsk)
886 break;
887
888 if (optlen < sizeof(struct ip_mreq))
889 goto e_inval;
890 err = -EFAULT;
891 if (optlen >= sizeof(struct ip_mreqn)) {
892 if (copy_from_user(&mreq, optval, sizeof(mreq)))
893 break;
894 } else {
895 memset(&mreq, 0, sizeof(mreq));
896 if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq)))
897 break;
898 }
899
900 if (optname == IP_ADD_MEMBERSHIP)
901 err = ip_mc_join_group(sk, &mreq);
902 else
903 err = ip_mc_leave_group(sk, &mreq);
904 break;
905 }
906 case IP_MSFILTER:
907 {
908 struct ip_msfilter *msf;
909
910 if (optlen < IP_MSFILTER_SIZE(0))
911 goto e_inval;
912 if (optlen > sysctl_optmem_max) {
913 err = -ENOBUFS;
914 break;
915 }
916 msf = kmalloc(optlen, GFP_KERNEL);
917 if (!msf) {
918 err = -ENOBUFS;
919 break;
920 }
921 err = -EFAULT;
922 if (copy_from_user(msf, optval, optlen)) {
923 kfree(msf);
924 break;
925 }
926 /* numsrc >= (1G-4) overflow in 32 bits */
927 if (msf->imsf_numsrc >= 0x3ffffffcU ||
928 msf->imsf_numsrc > net->ipv4.sysctl_igmp_max_msf) {
929 kfree(msf);
930 err = -ENOBUFS;
931 break;
932 }
933 if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
934 kfree(msf);
935 err = -EINVAL;
936 break;
937 }
938 err = ip_mc_msfilter(sk, msf, 0);
939 kfree(msf);
940 break;
941 }
942 case IP_BLOCK_SOURCE:
943 case IP_UNBLOCK_SOURCE:
944 case IP_ADD_SOURCE_MEMBERSHIP:
945 case IP_DROP_SOURCE_MEMBERSHIP:
946 {
947 struct ip_mreq_source mreqs;
948 int omode, add;
949
950 if (optlen != sizeof(struct ip_mreq_source))
951 goto e_inval;
952 if (copy_from_user(&mreqs, optval, sizeof(mreqs))) {
953 err = -EFAULT;
954 break;
955 }
956 if (optname == IP_BLOCK_SOURCE) {
957 omode = MCAST_EXCLUDE;
958 add = 1;
959 } else if (optname == IP_UNBLOCK_SOURCE) {
960 omode = MCAST_EXCLUDE;
961 add = 0;
962 } else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
963 struct ip_mreqn mreq;
964
965 mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
966 mreq.imr_address.s_addr = mreqs.imr_interface;
967 mreq.imr_ifindex = 0;
968 err = ip_mc_join_group(sk, &mreq);
969 if (err && err != -EADDRINUSE)
970 break;
971 omode = MCAST_INCLUDE;
972 add = 1;
973 } else /* IP_DROP_SOURCE_MEMBERSHIP */ {
974 omode = MCAST_INCLUDE;
975 add = 0;
976 }
977 err = ip_mc_source(add, omode, sk, &mreqs, 0);
978 break;
979 }
980 case MCAST_JOIN_GROUP:
981 case MCAST_LEAVE_GROUP:
982 {
983 struct group_req greq;
984 struct sockaddr_in *psin;
985 struct ip_mreqn mreq;
986
987 if (optlen < sizeof(struct group_req))
988 goto e_inval;
989 err = -EFAULT;
990 if (copy_from_user(&greq, optval, sizeof(greq)))
991 break;
992 psin = (struct sockaddr_in *)&greq.gr_group;
993 if (psin->sin_family != AF_INET)
994 goto e_inval;
995 memset(&mreq, 0, sizeof(mreq));
996 mreq.imr_multiaddr = psin->sin_addr;
997 mreq.imr_ifindex = greq.gr_interface;
998
999 if (optname == MCAST_JOIN_GROUP)
1000 err = ip_mc_join_group(sk, &mreq);
1001 else
1002 err = ip_mc_leave_group(sk, &mreq);
1003 break;
1004 }
1005 case MCAST_JOIN_SOURCE_GROUP:
1006 case MCAST_LEAVE_SOURCE_GROUP:
1007 case MCAST_BLOCK_SOURCE:
1008 case MCAST_UNBLOCK_SOURCE:
1009 {
1010 struct group_source_req greqs;
1011 struct ip_mreq_source mreqs;
1012 struct sockaddr_in *psin;
1013 int omode, add;
1014
1015 if (optlen != sizeof(struct group_source_req))
1016 goto e_inval;
1017 if (copy_from_user(&greqs, optval, sizeof(greqs))) {
1018 err = -EFAULT;
1019 break;
1020 }
1021 if (greqs.gsr_group.ss_family != AF_INET ||
1022 greqs.gsr_source.ss_family != AF_INET) {
1023 err = -EADDRNOTAVAIL;
1024 break;
1025 }
1026 psin = (struct sockaddr_in *)&greqs.gsr_group;
1027 mreqs.imr_multiaddr = psin->sin_addr.s_addr;
1028 psin = (struct sockaddr_in *)&greqs.gsr_source;
1029 mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
1030 mreqs.imr_interface = 0; /* use index for mc_source */
1031
1032 if (optname == MCAST_BLOCK_SOURCE) {
1033 omode = MCAST_EXCLUDE;
1034 add = 1;
1035 } else if (optname == MCAST_UNBLOCK_SOURCE) {
1036 omode = MCAST_EXCLUDE;
1037 add = 0;
1038 } else if (optname == MCAST_JOIN_SOURCE_GROUP) {
1039 struct ip_mreqn mreq;
1040
1041 psin = (struct sockaddr_in *)&greqs.gsr_group;
1042 mreq.imr_multiaddr = psin->sin_addr;
1043 mreq.imr_address.s_addr = 0;
1044 mreq.imr_ifindex = greqs.gsr_interface;
1045 err = ip_mc_join_group(sk, &mreq);
1046 if (err && err != -EADDRINUSE)
1047 break;
1048 greqs.gsr_interface = mreq.imr_ifindex;
1049 omode = MCAST_INCLUDE;
1050 add = 1;
1051 } else /* MCAST_LEAVE_SOURCE_GROUP */ {
1052 omode = MCAST_INCLUDE;
1053 add = 0;
1054 }
1055 err = ip_mc_source(add, omode, sk, &mreqs,
1056 greqs.gsr_interface);
1057 break;
1058 }
1059 case MCAST_MSFILTER:
1060 {
1061 struct sockaddr_in *psin;
1062 struct ip_msfilter *msf = NULL;
1063 struct group_filter *gsf = NULL;
1064 int msize, i, ifindex;
1065
1066 if (optlen < GROUP_FILTER_SIZE(0))
1067 goto e_inval;
1068 if (optlen > sysctl_optmem_max) {
1069 err = -ENOBUFS;
1070 break;
1071 }
1072 gsf = kmalloc(optlen, GFP_KERNEL);
1073 if (!gsf) {
1074 err = -ENOBUFS;
1075 break;
1076 }
1077 err = -EFAULT;
1078 if (copy_from_user(gsf, optval, optlen))
1079 goto mc_msf_out;
1080
1081 /* numsrc >= (4G-140)/128 overflow in 32 bits */
1082 if (gsf->gf_numsrc >= 0x1ffffff ||
1083 gsf->gf_numsrc > net->ipv4.sysctl_igmp_max_msf) {
1084 err = -ENOBUFS;
1085 goto mc_msf_out;
1086 }
1087 if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
1088 err = -EINVAL;
1089 goto mc_msf_out;
1090 }
1091 msize = IP_MSFILTER_SIZE(gsf->gf_numsrc);
1092 msf = kmalloc(msize, GFP_KERNEL);
1093 if (!msf) {
1094 err = -ENOBUFS;
1095 goto mc_msf_out;
1096 }
1097 ifindex = gsf->gf_interface;
1098 psin = (struct sockaddr_in *)&gsf->gf_group;
1099 if (psin->sin_family != AF_INET) {
1100 err = -EADDRNOTAVAIL;
1101 goto mc_msf_out;
1102 }
1103 msf->imsf_multiaddr = psin->sin_addr.s_addr;
1104 msf->imsf_interface = 0;
1105 msf->imsf_fmode = gsf->gf_fmode;
1106 msf->imsf_numsrc = gsf->gf_numsrc;
1107 err = -EADDRNOTAVAIL;
1108 for (i = 0; i < gsf->gf_numsrc; ++i) {
1109 psin = (struct sockaddr_in *)&gsf->gf_slist[i];
1110
1111 if (psin->sin_family != AF_INET)
1112 goto mc_msf_out;
1113 msf->imsf_slist[i] = psin->sin_addr.s_addr;
1114 }
1115 kfree(gsf);
1116 gsf = NULL;
1117
1118 err = ip_mc_msfilter(sk, msf, ifindex);
1119 mc_msf_out:
1120 kfree(msf);
1121 kfree(gsf);
1122 break;
1123 }
1124 case IP_MULTICAST_ALL:
1125 if (optlen < 1)
1126 goto e_inval;
1127 if (val != 0 && val != 1)
1128 goto e_inval;
1129 inet->mc_all = val;
1130 break;
1131 case IP_ROUTER_ALERT:
1132 err = ip_ra_control(sk, val ? 1 : 0, NULL);
1133 break;
1134
1135 case IP_FREEBIND:
1136 if (optlen < 1)
1137 goto e_inval;
1138 inet->freebind = !!val;
1139 break;
1140
1141 case IP_IPSEC_POLICY:
1142 case IP_XFRM_POLICY:
1143 err = -EPERM;
1144 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1145 break;
1146 err = xfrm_user_policy(sk, optname, optval, optlen);
1147 break;
1148
1149 case IP_TRANSPARENT:
1150 if (!!val && !ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
1151 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1152 err = -EPERM;
1153 break;
1154 }
1155 if (optlen < 1)
1156 goto e_inval;
1157 inet->transparent = !!val;
1158 break;
1159
1160 case IP_MINTTL:
1161 if (optlen < 1)
1162 goto e_inval;
1163 if (val < 0 || val > 255)
1164 goto e_inval;
1165 inet->min_ttl = val;
1166 break;
1167
1168 default:
1169 err = -ENOPROTOOPT;
1170 break;
1171 }
1172 release_sock(sk);
1173 if (needs_rtnl)
1174 rtnl_unlock();
1175 return err;
1176
1177 e_inval:
1178 release_sock(sk);
1179 if (needs_rtnl)
1180 rtnl_unlock();
1181 return -EINVAL;
1182 }
1183
1184 /**
1185 * ipv4_pktinfo_prepare - transfer some info from rtable to skb
1186 * @sk: socket
1187 * @skb: buffer
1188 *
1189 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
1190 * destination in skb->cb[] before dst drop.
1191 * This way, receiver doesn't make cache line misses to read rtable.
1192 */
1193 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
1194 {
1195 struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
1196 bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) ||
1197 ipv6_sk_rxinfo(sk);
1198
1199 if (prepare && skb_rtable(skb)) {
1200 /* skb->cb is overloaded: prior to this point it is IP{6}CB
1201 * which has interface index (iif) as the first member of the
1202 * underlying inet{6}_skb_parm struct. This code then overlays
1203 * PKTINFO_SKB_CB and in_pktinfo also has iif as the first
1204 * element so the iif is picked up from the prior IPCB
1205 */
1206 pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
1207 } else {
1208 pktinfo->ipi_ifindex = 0;
1209 pktinfo->ipi_spec_dst.s_addr = 0;
1210 }
1211 skb_dst_drop(skb);
1212 }
1213
1214 int ip_setsockopt(struct sock *sk, int level,
1215 int optname, char __user *optval, unsigned int optlen)
1216 {
1217 int err;
1218
1219 if (level != SOL_IP)
1220 return -ENOPROTOOPT;
1221
1222 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1223 #ifdef CONFIG_NETFILTER
1224 /* we need to exclude all possible ENOPROTOOPTs except default case */
1225 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1226 optname != IP_IPSEC_POLICY &&
1227 optname != IP_XFRM_POLICY &&
1228 !ip_mroute_opt(optname)) {
1229 lock_sock(sk);
1230 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
1231 release_sock(sk);
1232 }
1233 #endif
1234 return err;
1235 }
1236 EXPORT_SYMBOL(ip_setsockopt);
1237
1238 #ifdef CONFIG_COMPAT
1239 int compat_ip_setsockopt(struct sock *sk, int level, int optname,
1240 char __user *optval, unsigned int optlen)
1241 {
1242 int err;
1243
1244 if (level != SOL_IP)
1245 return -ENOPROTOOPT;
1246
1247 if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER)
1248 return compat_mc_setsockopt(sk, level, optname, optval, optlen,
1249 ip_setsockopt);
1250
1251 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1252 #ifdef CONFIG_NETFILTER
1253 /* we need to exclude all possible ENOPROTOOPTs except default case */
1254 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1255 optname != IP_IPSEC_POLICY &&
1256 optname != IP_XFRM_POLICY &&
1257 !ip_mroute_opt(optname)) {
1258 lock_sock(sk);
1259 err = compat_nf_setsockopt(sk, PF_INET, optname,
1260 optval, optlen);
1261 release_sock(sk);
1262 }
1263 #endif
1264 return err;
1265 }
1266 EXPORT_SYMBOL(compat_ip_setsockopt);
1267 #endif
1268
1269 /*
1270 * Get the options. Note for future reference. The GET of IP options gets
1271 * the _received_ ones. The set sets the _sent_ ones.
1272 */
1273
1274 static bool getsockopt_needs_rtnl(int optname)
1275 {
1276 switch (optname) {
1277 case IP_MSFILTER:
1278 case MCAST_MSFILTER:
1279 return true;
1280 }
1281 return false;
1282 }
1283
1284 static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1285 char __user *optval, int __user *optlen, unsigned int flags)
1286 {
1287 struct inet_sock *inet = inet_sk(sk);
1288 bool needs_rtnl = getsockopt_needs_rtnl(optname);
1289 int val, err = 0;
1290 int len;
1291
1292 if (level != SOL_IP)
1293 return -EOPNOTSUPP;
1294
1295 if (ip_mroute_opt(optname))
1296 return ip_mroute_getsockopt(sk, optname, optval, optlen);
1297
1298 if (get_user(len, optlen))
1299 return -EFAULT;
1300 if (len < 0)
1301 return -EINVAL;
1302
1303 if (needs_rtnl)
1304 rtnl_lock();
1305 lock_sock(sk);
1306
1307 switch (optname) {
1308 case IP_OPTIONS:
1309 {
1310 unsigned char optbuf[sizeof(struct ip_options)+40];
1311 struct ip_options *opt = (struct ip_options *)optbuf;
1312 struct ip_options_rcu *inet_opt;
1313
1314 inet_opt = rcu_dereference_protected(inet->inet_opt,
1315 lockdep_sock_is_held(sk));
1316 opt->optlen = 0;
1317 if (inet_opt)
1318 memcpy(optbuf, &inet_opt->opt,
1319 sizeof(struct ip_options) +
1320 inet_opt->opt.optlen);
1321 release_sock(sk);
1322
1323 if (opt->optlen == 0)
1324 return put_user(0, optlen);
1325
1326 ip_options_undo(opt);
1327
1328 len = min_t(unsigned int, len, opt->optlen);
1329 if (put_user(len, optlen))
1330 return -EFAULT;
1331 if (copy_to_user(optval, opt->__data, len))
1332 return -EFAULT;
1333 return 0;
1334 }
1335 case IP_PKTINFO:
1336 val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0;
1337 break;
1338 case IP_RECVTTL:
1339 val = (inet->cmsg_flags & IP_CMSG_TTL) != 0;
1340 break;
1341 case IP_RECVTOS:
1342 val = (inet->cmsg_flags & IP_CMSG_TOS) != 0;
1343 break;
1344 case IP_RECVOPTS:
1345 val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0;
1346 break;
1347 case IP_RETOPTS:
1348 val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0;
1349 break;
1350 case IP_PASSSEC:
1351 val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0;
1352 break;
1353 case IP_RECVORIGDSTADDR:
1354 val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0;
1355 break;
1356 case IP_CHECKSUM:
1357 val = (inet->cmsg_flags & IP_CMSG_CHECKSUM) != 0;
1358 break;
1359 case IP_TOS:
1360 val = inet->tos;
1361 break;
1362 case IP_TTL:
1363 {
1364 struct net *net = sock_net(sk);
1365 val = (inet->uc_ttl == -1 ?
1366 net->ipv4.sysctl_ip_default_ttl :
1367 inet->uc_ttl);
1368 break;
1369 }
1370 case IP_HDRINCL:
1371 val = inet->hdrincl;
1372 break;
1373 case IP_NODEFRAG:
1374 val = inet->nodefrag;
1375 break;
1376 case IP_BIND_ADDRESS_NO_PORT:
1377 val = inet->bind_address_no_port;
1378 break;
1379 case IP_MTU_DISCOVER:
1380 val = inet->pmtudisc;
1381 break;
1382 case IP_MTU:
1383 {
1384 struct dst_entry *dst;
1385 val = 0;
1386 dst = sk_dst_get(sk);
1387 if (dst) {
1388 val = dst_mtu(dst);
1389 dst_release(dst);
1390 }
1391 if (!val) {
1392 release_sock(sk);
1393 return -ENOTCONN;
1394 }
1395 break;
1396 }
1397 case IP_RECVERR:
1398 val = inet->recverr;
1399 break;
1400 case IP_MULTICAST_TTL:
1401 val = inet->mc_ttl;
1402 break;
1403 case IP_MULTICAST_LOOP:
1404 val = inet->mc_loop;
1405 break;
1406 case IP_UNICAST_IF:
1407 val = (__force int)htonl((__u32) inet->uc_index);
1408 break;
1409 case IP_MULTICAST_IF:
1410 {
1411 struct in_addr addr;
1412 len = min_t(unsigned int, len, sizeof(struct in_addr));
1413 addr.s_addr = inet->mc_addr;
1414 release_sock(sk);
1415
1416 if (put_user(len, optlen))
1417 return -EFAULT;
1418 if (copy_to_user(optval, &addr, len))
1419 return -EFAULT;
1420 return 0;
1421 }
1422 case IP_MSFILTER:
1423 {
1424 struct ip_msfilter msf;
1425
1426 if (len < IP_MSFILTER_SIZE(0)) {
1427 err = -EINVAL;
1428 goto out;
1429 }
1430 if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) {
1431 err = -EFAULT;
1432 goto out;
1433 }
1434 err = ip_mc_msfget(sk, &msf,
1435 (struct ip_msfilter __user *)optval, optlen);
1436 goto out;
1437 }
1438 case MCAST_MSFILTER:
1439 {
1440 struct group_filter gsf;
1441
1442 if (len < GROUP_FILTER_SIZE(0)) {
1443 err = -EINVAL;
1444 goto out;
1445 }
1446 if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) {
1447 err = -EFAULT;
1448 goto out;
1449 }
1450 err = ip_mc_gsfget(sk, &gsf,
1451 (struct group_filter __user *)optval,
1452 optlen);
1453 goto out;
1454 }
1455 case IP_MULTICAST_ALL:
1456 val = inet->mc_all;
1457 break;
1458 case IP_PKTOPTIONS:
1459 {
1460 struct msghdr msg;
1461
1462 release_sock(sk);
1463
1464 if (sk->sk_type != SOCK_STREAM)
1465 return -ENOPROTOOPT;
1466
1467 msg.msg_control = (__force void *) optval;
1468 msg.msg_controllen = len;
1469 msg.msg_flags = flags;
1470
1471 if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
1472 struct in_pktinfo info;
1473
1474 info.ipi_addr.s_addr = inet->inet_rcv_saddr;
1475 info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr;
1476 info.ipi_ifindex = inet->mc_index;
1477 put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
1478 }
1479 if (inet->cmsg_flags & IP_CMSG_TTL) {
1480 int hlim = inet->mc_ttl;
1481 put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
1482 }
1483 if (inet->cmsg_flags & IP_CMSG_TOS) {
1484 int tos = inet->rcv_tos;
1485 put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
1486 }
1487 len -= msg.msg_controllen;
1488 return put_user(len, optlen);
1489 }
1490 case IP_FREEBIND:
1491 val = inet->freebind;
1492 break;
1493 case IP_TRANSPARENT:
1494 val = inet->transparent;
1495 break;
1496 case IP_MINTTL:
1497 val = inet->min_ttl;
1498 break;
1499 default:
1500 release_sock(sk);
1501 return -ENOPROTOOPT;
1502 }
1503 release_sock(sk);
1504
1505 if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
1506 unsigned char ucval = (unsigned char)val;
1507 len = 1;
1508 if (put_user(len, optlen))
1509 return -EFAULT;
1510 if (copy_to_user(optval, &ucval, 1))
1511 return -EFAULT;
1512 } else {
1513 len = min_t(unsigned int, sizeof(int), len);
1514 if (put_user(len, optlen))
1515 return -EFAULT;
1516 if (copy_to_user(optval, &val, len))
1517 return -EFAULT;
1518 }
1519 return 0;
1520
1521 out:
1522 release_sock(sk);
1523 if (needs_rtnl)
1524 rtnl_unlock();
1525 return err;
1526 }
1527
1528 int ip_getsockopt(struct sock *sk, int level,
1529 int optname, char __user *optval, int __user *optlen)
1530 {
1531 int err;
1532
1533 err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
1534 #ifdef CONFIG_NETFILTER
1535 /* we need to exclude all possible ENOPROTOOPTs except default case */
1536 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1537 !ip_mroute_opt(optname)) {
1538 int len;
1539
1540 if (get_user(len, optlen))
1541 return -EFAULT;
1542
1543 lock_sock(sk);
1544 err = nf_getsockopt(sk, PF_INET, optname, optval,
1545 &len);
1546 release_sock(sk);
1547 if (err >= 0)
1548 err = put_user(len, optlen);
1549 return err;
1550 }
1551 #endif
1552 return err;
1553 }
1554 EXPORT_SYMBOL(ip_getsockopt);
1555
1556 #ifdef CONFIG_COMPAT
1557 int compat_ip_getsockopt(struct sock *sk, int level, int optname,
1558 char __user *optval, int __user *optlen)
1559 {
1560 int err;
1561
1562 if (optname == MCAST_MSFILTER)
1563 return compat_mc_getsockopt(sk, level, optname, optval, optlen,
1564 ip_getsockopt);
1565
1566 err = do_ip_getsockopt(sk, level, optname, optval, optlen,
1567 MSG_CMSG_COMPAT);
1568
1569 #ifdef CONFIG_NETFILTER
1570 /* we need to exclude all possible ENOPROTOOPTs except default case */
1571 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1572 !ip_mroute_opt(optname)) {
1573 int len;
1574
1575 if (get_user(len, optlen))
1576 return -EFAULT;
1577
1578 lock_sock(sk);
1579 err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
1580 release_sock(sk);
1581 if (err >= 0)
1582 err = put_user(len, optlen);
1583 return err;
1584 }
1585 #endif
1586 return err;
1587 }
1588 EXPORT_SYMBOL(compat_ip_getsockopt);
1589 #endif
This page took 0.078732 seconds and 6 git commands to generate.