Make __xfs_xattr_put_listen preperly report errors.
[deliverable/linux.git] / net / ipv4 / ip_sockglue.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The IP to API glue.
7 *
8 * Authors: see ip.c
9 *
10 * Fixes:
11 * Many : Split from ip.c , see ip.c for history.
12 * Martin Mares : TOS setting fixed.
13 * Alan Cox : Fixed a couple of oopses in Martin's
14 * TOS tweaks.
15 * Mike McLagan : Routing by source
16 */
17
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/mm.h>
21 #include <linux/skbuff.h>
22 #include <linux/ip.h>
23 #include <linux/icmp.h>
24 #include <linux/inetdevice.h>
25 #include <linux/netdevice.h>
26 #include <linux/slab.h>
27 #include <net/sock.h>
28 #include <net/ip.h>
29 #include <net/icmp.h>
30 #include <net/tcp_states.h>
31 #include <linux/udp.h>
32 #include <linux/igmp.h>
33 #include <linux/netfilter.h>
34 #include <linux/route.h>
35 #include <linux/mroute.h>
36 #include <net/inet_ecn.h>
37 #include <net/route.h>
38 #include <net/xfrm.h>
39 #include <net/compat.h>
40 #include <net/checksum.h>
41 #if IS_ENABLED(CONFIG_IPV6)
42 #include <net/transp_v6.h>
43 #endif
44 #include <net/ip_fib.h>
45
46 #include <linux/errqueue.h>
47 #include <asm/uaccess.h>
48
49 /*
50 * SOL_IP control messages.
51 */
52
53 static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
54 {
55 struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
56
57 info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
58
59 put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
60 }
61
62 static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb)
63 {
64 int ttl = ip_hdr(skb)->ttl;
65 put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl);
66 }
67
68 static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb)
69 {
70 put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos);
71 }
72
73 static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
74 {
75 if (IPCB(skb)->opt.optlen == 0)
76 return;
77
78 put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
79 ip_hdr(skb) + 1);
80 }
81
82
83 static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
84 {
85 unsigned char optbuf[sizeof(struct ip_options) + 40];
86 struct ip_options *opt = (struct ip_options *)optbuf;
87
88 if (IPCB(skb)->opt.optlen == 0)
89 return;
90
91 if (ip_options_echo(opt, skb)) {
92 msg->msg_flags |= MSG_CTRUNC;
93 return;
94 }
95 ip_options_undo(opt);
96
97 put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
98 }
99
100 static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
101 int offset)
102 {
103 __wsum csum = skb->csum;
104
105 if (skb->ip_summed != CHECKSUM_COMPLETE)
106 return;
107
108 if (offset != 0)
109 csum = csum_sub(csum, csum_partial(skb_transport_header(skb),
110 offset, 0));
111
112 put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
113 }
114
115 static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
116 {
117 char *secdata;
118 u32 seclen, secid;
119 int err;
120
121 err = security_socket_getpeersec_dgram(NULL, skb, &secid);
122 if (err)
123 return;
124
125 err = security_secid_to_secctx(secid, &secdata, &seclen);
126 if (err)
127 return;
128
129 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
130 security_release_secctx(secdata, seclen);
131 }
132
133 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
134 {
135 struct sockaddr_in sin;
136 const struct iphdr *iph = ip_hdr(skb);
137 __be16 *ports = (__be16 *)skb_transport_header(skb);
138
139 if (skb_transport_offset(skb) + 4 > skb->len)
140 return;
141
142 /* All current transport protocols have the port numbers in the
143 * first four bytes of the transport header and this function is
144 * written with this assumption in mind.
145 */
146
147 sin.sin_family = AF_INET;
148 sin.sin_addr.s_addr = iph->daddr;
149 sin.sin_port = ports[1];
150 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
151
152 put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
153 }
154
155 void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
156 int offset)
157 {
158 struct inet_sock *inet = inet_sk(skb->sk);
159 unsigned int flags = inet->cmsg_flags;
160
161 /* Ordered by supposed usage frequency */
162 if (flags & IP_CMSG_PKTINFO) {
163 ip_cmsg_recv_pktinfo(msg, skb);
164
165 flags &= ~IP_CMSG_PKTINFO;
166 if (!flags)
167 return;
168 }
169
170 if (flags & IP_CMSG_TTL) {
171 ip_cmsg_recv_ttl(msg, skb);
172
173 flags &= ~IP_CMSG_TTL;
174 if (!flags)
175 return;
176 }
177
178 if (flags & IP_CMSG_TOS) {
179 ip_cmsg_recv_tos(msg, skb);
180
181 flags &= ~IP_CMSG_TOS;
182 if (!flags)
183 return;
184 }
185
186 if (flags & IP_CMSG_RECVOPTS) {
187 ip_cmsg_recv_opts(msg, skb);
188
189 flags &= ~IP_CMSG_RECVOPTS;
190 if (!flags)
191 return;
192 }
193
194 if (flags & IP_CMSG_RETOPTS) {
195 ip_cmsg_recv_retopts(msg, skb);
196
197 flags &= ~IP_CMSG_RETOPTS;
198 if (!flags)
199 return;
200 }
201
202 if (flags & IP_CMSG_PASSSEC) {
203 ip_cmsg_recv_security(msg, skb);
204
205 flags &= ~IP_CMSG_PASSSEC;
206 if (!flags)
207 return;
208 }
209
210 if (flags & IP_CMSG_ORIGDSTADDR) {
211 ip_cmsg_recv_dstaddr(msg, skb);
212
213 flags &= ~IP_CMSG_ORIGDSTADDR;
214 if (!flags)
215 return;
216 }
217
218 if (flags & IP_CMSG_CHECKSUM)
219 ip_cmsg_recv_checksum(msg, skb, offset);
220 }
221 EXPORT_SYMBOL(ip_cmsg_recv_offset);
222
223 int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
224 bool allow_ipv6)
225 {
226 int err, val;
227 struct cmsghdr *cmsg;
228 struct net *net = sock_net(sk);
229
230 for_each_cmsghdr(cmsg, msg) {
231 if (!CMSG_OK(msg, cmsg))
232 return -EINVAL;
233 #if IS_ENABLED(CONFIG_IPV6)
234 if (allow_ipv6 &&
235 cmsg->cmsg_level == SOL_IPV6 &&
236 cmsg->cmsg_type == IPV6_PKTINFO) {
237 struct in6_pktinfo *src_info;
238
239 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info)))
240 return -EINVAL;
241 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
242 if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
243 return -EINVAL;
244 ipc->oif = src_info->ipi6_ifindex;
245 ipc->addr = src_info->ipi6_addr.s6_addr32[3];
246 continue;
247 }
248 #endif
249 if (cmsg->cmsg_level == SOL_SOCKET) {
250 err = __sock_cmsg_send(sk, msg, cmsg, &ipc->sockc);
251 if (err)
252 return err;
253 continue;
254 }
255
256 if (cmsg->cmsg_level != SOL_IP)
257 continue;
258 switch (cmsg->cmsg_type) {
259 case IP_RETOPTS:
260 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
261
262 /* Our caller is responsible for freeing ipc->opt */
263 err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
264 err < 40 ? err : 40);
265 if (err)
266 return err;
267 break;
268 case IP_PKTINFO:
269 {
270 struct in_pktinfo *info;
271 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
272 return -EINVAL;
273 info = (struct in_pktinfo *)CMSG_DATA(cmsg);
274 ipc->oif = info->ipi_ifindex;
275 ipc->addr = info->ipi_spec_dst.s_addr;
276 break;
277 }
278 case IP_TTL:
279 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
280 return -EINVAL;
281 val = *(int *)CMSG_DATA(cmsg);
282 if (val < 1 || val > 255)
283 return -EINVAL;
284 ipc->ttl = val;
285 break;
286 case IP_TOS:
287 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
288 return -EINVAL;
289 val = *(int *)CMSG_DATA(cmsg);
290 if (val < 0 || val > 255)
291 return -EINVAL;
292 ipc->tos = val;
293 ipc->priority = rt_tos2priority(ipc->tos);
294 break;
295
296 default:
297 return -EINVAL;
298 }
299 }
300 return 0;
301 }
302
303
304 /* Special input handler for packets caught by router alert option.
305 They are selected only by protocol field, and then processed likely
306 local ones; but only if someone wants them! Otherwise, router
307 not running rsvpd will kill RSVP.
308
309 It is user level problem, what it will make with them.
310 I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
311 but receiver should be enough clever f.e. to forward mtrace requests,
312 sent to multicast group to reach destination designated router.
313 */
314 struct ip_ra_chain __rcu *ip_ra_chain;
315 static DEFINE_SPINLOCK(ip_ra_lock);
316
317
318 static void ip_ra_destroy_rcu(struct rcu_head *head)
319 {
320 struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
321
322 sock_put(ra->saved_sk);
323 kfree(ra);
324 }
325
326 int ip_ra_control(struct sock *sk, unsigned char on,
327 void (*destructor)(struct sock *))
328 {
329 struct ip_ra_chain *ra, *new_ra;
330 struct ip_ra_chain __rcu **rap;
331
332 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
333 return -EINVAL;
334
335 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
336
337 spin_lock_bh(&ip_ra_lock);
338 for (rap = &ip_ra_chain;
339 (ra = rcu_dereference_protected(*rap,
340 lockdep_is_held(&ip_ra_lock))) != NULL;
341 rap = &ra->next) {
342 if (ra->sk == sk) {
343 if (on) {
344 spin_unlock_bh(&ip_ra_lock);
345 kfree(new_ra);
346 return -EADDRINUSE;
347 }
348 /* dont let ip_call_ra_chain() use sk again */
349 ra->sk = NULL;
350 RCU_INIT_POINTER(*rap, ra->next);
351 spin_unlock_bh(&ip_ra_lock);
352
353 if (ra->destructor)
354 ra->destructor(sk);
355 /*
356 * Delay sock_put(sk) and kfree(ra) after one rcu grace
357 * period. This guarantee ip_call_ra_chain() dont need
358 * to mess with socket refcounts.
359 */
360 ra->saved_sk = sk;
361 call_rcu(&ra->rcu, ip_ra_destroy_rcu);
362 return 0;
363 }
364 }
365 if (!new_ra) {
366 spin_unlock_bh(&ip_ra_lock);
367 return -ENOBUFS;
368 }
369 new_ra->sk = sk;
370 new_ra->destructor = destructor;
371
372 RCU_INIT_POINTER(new_ra->next, ra);
373 rcu_assign_pointer(*rap, new_ra);
374 sock_hold(sk);
375 spin_unlock_bh(&ip_ra_lock);
376
377 return 0;
378 }
379
380 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
381 __be16 port, u32 info, u8 *payload)
382 {
383 struct sock_exterr_skb *serr;
384
385 skb = skb_clone(skb, GFP_ATOMIC);
386 if (!skb)
387 return;
388
389 serr = SKB_EXT_ERR(skb);
390 serr->ee.ee_errno = err;
391 serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
392 serr->ee.ee_type = icmp_hdr(skb)->type;
393 serr->ee.ee_code = icmp_hdr(skb)->code;
394 serr->ee.ee_pad = 0;
395 serr->ee.ee_info = info;
396 serr->ee.ee_data = 0;
397 serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
398 skb_network_header(skb);
399 serr->port = port;
400
401 if (skb_pull(skb, payload - skb->data)) {
402 skb_reset_transport_header(skb);
403 if (sock_queue_err_skb(sk, skb) == 0)
404 return;
405 }
406 kfree_skb(skb);
407 }
408
409 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
410 {
411 struct inet_sock *inet = inet_sk(sk);
412 struct sock_exterr_skb *serr;
413 struct iphdr *iph;
414 struct sk_buff *skb;
415
416 if (!inet->recverr)
417 return;
418
419 skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
420 if (!skb)
421 return;
422
423 skb_put(skb, sizeof(struct iphdr));
424 skb_reset_network_header(skb);
425 iph = ip_hdr(skb);
426 iph->daddr = daddr;
427
428 serr = SKB_EXT_ERR(skb);
429 serr->ee.ee_errno = err;
430 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
431 serr->ee.ee_type = 0;
432 serr->ee.ee_code = 0;
433 serr->ee.ee_pad = 0;
434 serr->ee.ee_info = info;
435 serr->ee.ee_data = 0;
436 serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
437 serr->port = port;
438
439 __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
440 skb_reset_transport_header(skb);
441
442 if (sock_queue_err_skb(sk, skb))
443 kfree_skb(skb);
444 }
445
446 /* For some errors we have valid addr_offset even with zero payload and
447 * zero port. Also, addr_offset should be supported if port is set.
448 */
449 static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
450 {
451 return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
452 serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
453 }
454
455 /* IPv4 supports cmsg on all imcp errors and some timestamps
456 *
457 * Timestamp code paths do not initialize the fields expected by cmsg:
458 * the PKTINFO fields in skb->cb[]. Fill those in here.
459 */
460 static bool ipv4_datagram_support_cmsg(const struct sock *sk,
461 struct sk_buff *skb,
462 int ee_origin)
463 {
464 struct in_pktinfo *info;
465
466 if (ee_origin == SO_EE_ORIGIN_ICMP)
467 return true;
468
469 if (ee_origin == SO_EE_ORIGIN_LOCAL)
470 return false;
471
472 /* Support IP_PKTINFO on tstamp packets if requested, to correlate
473 * timestamp with egress dev. Not possible for packets without dev
474 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
475 */
476 if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
477 (!skb->dev))
478 return false;
479
480 info = PKTINFO_SKB_CB(skb);
481 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
482 info->ipi_ifindex = skb->dev->ifindex;
483 return true;
484 }
485
486 /*
487 * Handle MSG_ERRQUEUE
488 */
489 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
490 {
491 struct sock_exterr_skb *serr;
492 struct sk_buff *skb;
493 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
494 struct {
495 struct sock_extended_err ee;
496 struct sockaddr_in offender;
497 } errhdr;
498 int err;
499 int copied;
500
501 WARN_ON_ONCE(sk->sk_family == AF_INET6);
502
503 err = -EAGAIN;
504 skb = sock_dequeue_err_skb(sk);
505 if (!skb)
506 goto out;
507
508 copied = skb->len;
509 if (copied > len) {
510 msg->msg_flags |= MSG_TRUNC;
511 copied = len;
512 }
513 err = skb_copy_datagram_msg(skb, 0, msg, copied);
514 if (unlikely(err)) {
515 kfree_skb(skb);
516 return err;
517 }
518 sock_recv_timestamp(msg, sk, skb);
519
520 serr = SKB_EXT_ERR(skb);
521
522 if (sin && ipv4_datagram_support_addr(serr)) {
523 sin->sin_family = AF_INET;
524 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
525 serr->addr_offset);
526 sin->sin_port = serr->port;
527 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
528 *addr_len = sizeof(*sin);
529 }
530
531 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
532 sin = &errhdr.offender;
533 memset(sin, 0, sizeof(*sin));
534
535 if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
536 sin->sin_family = AF_INET;
537 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
538 if (inet_sk(sk)->cmsg_flags)
539 ip_cmsg_recv(msg, skb);
540 }
541
542 put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr);
543
544 /* Now we could try to dump offended packet options */
545
546 msg->msg_flags |= MSG_ERRQUEUE;
547 err = copied;
548
549 consume_skb(skb);
550 out:
551 return err;
552 }
553
554
555 /*
556 * Socket option code for IP. This is the end of the line after any
557 * TCP,UDP etc options on an IP socket.
558 */
559 static bool setsockopt_needs_rtnl(int optname)
560 {
561 switch (optname) {
562 case IP_ADD_MEMBERSHIP:
563 case IP_ADD_SOURCE_MEMBERSHIP:
564 case IP_BLOCK_SOURCE:
565 case IP_DROP_MEMBERSHIP:
566 case IP_DROP_SOURCE_MEMBERSHIP:
567 case IP_MSFILTER:
568 case IP_UNBLOCK_SOURCE:
569 case MCAST_BLOCK_SOURCE:
570 case MCAST_MSFILTER:
571 case MCAST_JOIN_GROUP:
572 case MCAST_JOIN_SOURCE_GROUP:
573 case MCAST_LEAVE_GROUP:
574 case MCAST_LEAVE_SOURCE_GROUP:
575 case MCAST_UNBLOCK_SOURCE:
576 return true;
577 }
578 return false;
579 }
580
581 static int do_ip_setsockopt(struct sock *sk, int level,
582 int optname, char __user *optval, unsigned int optlen)
583 {
584 struct inet_sock *inet = inet_sk(sk);
585 struct net *net = sock_net(sk);
586 int val = 0, err;
587 bool needs_rtnl = setsockopt_needs_rtnl(optname);
588
589 switch (optname) {
590 case IP_PKTINFO:
591 case IP_RECVTTL:
592 case IP_RECVOPTS:
593 case IP_RECVTOS:
594 case IP_RETOPTS:
595 case IP_TOS:
596 case IP_TTL:
597 case IP_HDRINCL:
598 case IP_MTU_DISCOVER:
599 case IP_RECVERR:
600 case IP_ROUTER_ALERT:
601 case IP_FREEBIND:
602 case IP_PASSSEC:
603 case IP_TRANSPARENT:
604 case IP_MINTTL:
605 case IP_NODEFRAG:
606 case IP_BIND_ADDRESS_NO_PORT:
607 case IP_UNICAST_IF:
608 case IP_MULTICAST_TTL:
609 case IP_MULTICAST_ALL:
610 case IP_MULTICAST_LOOP:
611 case IP_RECVORIGDSTADDR:
612 case IP_CHECKSUM:
613 if (optlen >= sizeof(int)) {
614 if (get_user(val, (int __user *) optval))
615 return -EFAULT;
616 } else if (optlen >= sizeof(char)) {
617 unsigned char ucval;
618
619 if (get_user(ucval, (unsigned char __user *) optval))
620 return -EFAULT;
621 val = (int) ucval;
622 }
623 }
624
625 /* If optlen==0, it is equivalent to val == 0 */
626
627 if (ip_mroute_opt(optname))
628 return ip_mroute_setsockopt(sk, optname, optval, optlen);
629
630 err = 0;
631 if (needs_rtnl)
632 rtnl_lock();
633 lock_sock(sk);
634
635 switch (optname) {
636 case IP_OPTIONS:
637 {
638 struct ip_options_rcu *old, *opt = NULL;
639
640 if (optlen > 40)
641 goto e_inval;
642 err = ip_options_get_from_user(sock_net(sk), &opt,
643 optval, optlen);
644 if (err)
645 break;
646 old = rcu_dereference_protected(inet->inet_opt,
647 lockdep_sock_is_held(sk));
648 if (inet->is_icsk) {
649 struct inet_connection_sock *icsk = inet_csk(sk);
650 #if IS_ENABLED(CONFIG_IPV6)
651 if (sk->sk_family == PF_INET ||
652 (!((1 << sk->sk_state) &
653 (TCPF_LISTEN | TCPF_CLOSE)) &&
654 inet->inet_daddr != LOOPBACK4_IPV6)) {
655 #endif
656 if (old)
657 icsk->icsk_ext_hdr_len -= old->opt.optlen;
658 if (opt)
659 icsk->icsk_ext_hdr_len += opt->opt.optlen;
660 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
661 #if IS_ENABLED(CONFIG_IPV6)
662 }
663 #endif
664 }
665 rcu_assign_pointer(inet->inet_opt, opt);
666 if (old)
667 kfree_rcu(old, rcu);
668 break;
669 }
670 case IP_PKTINFO:
671 if (val)
672 inet->cmsg_flags |= IP_CMSG_PKTINFO;
673 else
674 inet->cmsg_flags &= ~IP_CMSG_PKTINFO;
675 break;
676 case IP_RECVTTL:
677 if (val)
678 inet->cmsg_flags |= IP_CMSG_TTL;
679 else
680 inet->cmsg_flags &= ~IP_CMSG_TTL;
681 break;
682 case IP_RECVTOS:
683 if (val)
684 inet->cmsg_flags |= IP_CMSG_TOS;
685 else
686 inet->cmsg_flags &= ~IP_CMSG_TOS;
687 break;
688 case IP_RECVOPTS:
689 if (val)
690 inet->cmsg_flags |= IP_CMSG_RECVOPTS;
691 else
692 inet->cmsg_flags &= ~IP_CMSG_RECVOPTS;
693 break;
694 case IP_RETOPTS:
695 if (val)
696 inet->cmsg_flags |= IP_CMSG_RETOPTS;
697 else
698 inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
699 break;
700 case IP_PASSSEC:
701 if (val)
702 inet->cmsg_flags |= IP_CMSG_PASSSEC;
703 else
704 inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
705 break;
706 case IP_RECVORIGDSTADDR:
707 if (val)
708 inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR;
709 else
710 inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
711 break;
712 case IP_CHECKSUM:
713 if (val) {
714 if (!(inet->cmsg_flags & IP_CMSG_CHECKSUM)) {
715 inet_inc_convert_csum(sk);
716 inet->cmsg_flags |= IP_CMSG_CHECKSUM;
717 }
718 } else {
719 if (inet->cmsg_flags & IP_CMSG_CHECKSUM) {
720 inet_dec_convert_csum(sk);
721 inet->cmsg_flags &= ~IP_CMSG_CHECKSUM;
722 }
723 }
724 break;
725 case IP_TOS: /* This sets both TOS and Precedence */
726 if (sk->sk_type == SOCK_STREAM) {
727 val &= ~INET_ECN_MASK;
728 val |= inet->tos & INET_ECN_MASK;
729 }
730 if (inet->tos != val) {
731 inet->tos = val;
732 sk->sk_priority = rt_tos2priority(val);
733 sk_dst_reset(sk);
734 }
735 break;
736 case IP_TTL:
737 if (optlen < 1)
738 goto e_inval;
739 if (val != -1 && (val < 1 || val > 255))
740 goto e_inval;
741 inet->uc_ttl = val;
742 break;
743 case IP_HDRINCL:
744 if (sk->sk_type != SOCK_RAW) {
745 err = -ENOPROTOOPT;
746 break;
747 }
748 inet->hdrincl = val ? 1 : 0;
749 break;
750 case IP_NODEFRAG:
751 if (sk->sk_type != SOCK_RAW) {
752 err = -ENOPROTOOPT;
753 break;
754 }
755 inet->nodefrag = val ? 1 : 0;
756 break;
757 case IP_BIND_ADDRESS_NO_PORT:
758 inet->bind_address_no_port = val ? 1 : 0;
759 break;
760 case IP_MTU_DISCOVER:
761 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
762 goto e_inval;
763 inet->pmtudisc = val;
764 break;
765 case IP_RECVERR:
766 inet->recverr = !!val;
767 if (!val)
768 skb_queue_purge(&sk->sk_error_queue);
769 break;
770 case IP_MULTICAST_TTL:
771 if (sk->sk_type == SOCK_STREAM)
772 goto e_inval;
773 if (optlen < 1)
774 goto e_inval;
775 if (val == -1)
776 val = 1;
777 if (val < 0 || val > 255)
778 goto e_inval;
779 inet->mc_ttl = val;
780 break;
781 case IP_MULTICAST_LOOP:
782 if (optlen < 1)
783 goto e_inval;
784 inet->mc_loop = !!val;
785 break;
786 case IP_UNICAST_IF:
787 {
788 struct net_device *dev = NULL;
789 int ifindex;
790
791 if (optlen != sizeof(int))
792 goto e_inval;
793
794 ifindex = (__force int)ntohl((__force __be32)val);
795 if (ifindex == 0) {
796 inet->uc_index = 0;
797 err = 0;
798 break;
799 }
800
801 dev = dev_get_by_index(sock_net(sk), ifindex);
802 err = -EADDRNOTAVAIL;
803 if (!dev)
804 break;
805 dev_put(dev);
806
807 err = -EINVAL;
808 if (sk->sk_bound_dev_if)
809 break;
810
811 inet->uc_index = ifindex;
812 err = 0;
813 break;
814 }
815 case IP_MULTICAST_IF:
816 {
817 struct ip_mreqn mreq;
818 struct net_device *dev = NULL;
819
820 if (sk->sk_type == SOCK_STREAM)
821 goto e_inval;
822 /*
823 * Check the arguments are allowable
824 */
825
826 if (optlen < sizeof(struct in_addr))
827 goto e_inval;
828
829 err = -EFAULT;
830 if (optlen >= sizeof(struct ip_mreqn)) {
831 if (copy_from_user(&mreq, optval, sizeof(mreq)))
832 break;
833 } else {
834 memset(&mreq, 0, sizeof(mreq));
835 if (optlen >= sizeof(struct ip_mreq)) {
836 if (copy_from_user(&mreq, optval,
837 sizeof(struct ip_mreq)))
838 break;
839 } else if (optlen >= sizeof(struct in_addr)) {
840 if (copy_from_user(&mreq.imr_address, optval,
841 sizeof(struct in_addr)))
842 break;
843 }
844 }
845
846 if (!mreq.imr_ifindex) {
847 if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
848 inet->mc_index = 0;
849 inet->mc_addr = 0;
850 err = 0;
851 break;
852 }
853 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
854 if (dev)
855 mreq.imr_ifindex = dev->ifindex;
856 } else
857 dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
858
859
860 err = -EADDRNOTAVAIL;
861 if (!dev)
862 break;
863 dev_put(dev);
864
865 err = -EINVAL;
866 if (sk->sk_bound_dev_if &&
867 mreq.imr_ifindex != sk->sk_bound_dev_if)
868 break;
869
870 inet->mc_index = mreq.imr_ifindex;
871 inet->mc_addr = mreq.imr_address.s_addr;
872 err = 0;
873 break;
874 }
875
876 case IP_ADD_MEMBERSHIP:
877 case IP_DROP_MEMBERSHIP:
878 {
879 struct ip_mreqn mreq;
880
881 err = -EPROTO;
882 if (inet_sk(sk)->is_icsk)
883 break;
884
885 if (optlen < sizeof(struct ip_mreq))
886 goto e_inval;
887 err = -EFAULT;
888 if (optlen >= sizeof(struct ip_mreqn)) {
889 if (copy_from_user(&mreq, optval, sizeof(mreq)))
890 break;
891 } else {
892 memset(&mreq, 0, sizeof(mreq));
893 if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq)))
894 break;
895 }
896
897 if (optname == IP_ADD_MEMBERSHIP)
898 err = ip_mc_join_group(sk, &mreq);
899 else
900 err = ip_mc_leave_group(sk, &mreq);
901 break;
902 }
903 case IP_MSFILTER:
904 {
905 struct ip_msfilter *msf;
906
907 if (optlen < IP_MSFILTER_SIZE(0))
908 goto e_inval;
909 if (optlen > sysctl_optmem_max) {
910 err = -ENOBUFS;
911 break;
912 }
913 msf = kmalloc(optlen, GFP_KERNEL);
914 if (!msf) {
915 err = -ENOBUFS;
916 break;
917 }
918 err = -EFAULT;
919 if (copy_from_user(msf, optval, optlen)) {
920 kfree(msf);
921 break;
922 }
923 /* numsrc >= (1G-4) overflow in 32 bits */
924 if (msf->imsf_numsrc >= 0x3ffffffcU ||
925 msf->imsf_numsrc > net->ipv4.sysctl_igmp_max_msf) {
926 kfree(msf);
927 err = -ENOBUFS;
928 break;
929 }
930 if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
931 kfree(msf);
932 err = -EINVAL;
933 break;
934 }
935 err = ip_mc_msfilter(sk, msf, 0);
936 kfree(msf);
937 break;
938 }
939 case IP_BLOCK_SOURCE:
940 case IP_UNBLOCK_SOURCE:
941 case IP_ADD_SOURCE_MEMBERSHIP:
942 case IP_DROP_SOURCE_MEMBERSHIP:
943 {
944 struct ip_mreq_source mreqs;
945 int omode, add;
946
947 if (optlen != sizeof(struct ip_mreq_source))
948 goto e_inval;
949 if (copy_from_user(&mreqs, optval, sizeof(mreqs))) {
950 err = -EFAULT;
951 break;
952 }
953 if (optname == IP_BLOCK_SOURCE) {
954 omode = MCAST_EXCLUDE;
955 add = 1;
956 } else if (optname == IP_UNBLOCK_SOURCE) {
957 omode = MCAST_EXCLUDE;
958 add = 0;
959 } else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
960 struct ip_mreqn mreq;
961
962 mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
963 mreq.imr_address.s_addr = mreqs.imr_interface;
964 mreq.imr_ifindex = 0;
965 err = ip_mc_join_group(sk, &mreq);
966 if (err && err != -EADDRINUSE)
967 break;
968 omode = MCAST_INCLUDE;
969 add = 1;
970 } else /* IP_DROP_SOURCE_MEMBERSHIP */ {
971 omode = MCAST_INCLUDE;
972 add = 0;
973 }
974 err = ip_mc_source(add, omode, sk, &mreqs, 0);
975 break;
976 }
977 case MCAST_JOIN_GROUP:
978 case MCAST_LEAVE_GROUP:
979 {
980 struct group_req greq;
981 struct sockaddr_in *psin;
982 struct ip_mreqn mreq;
983
984 if (optlen < sizeof(struct group_req))
985 goto e_inval;
986 err = -EFAULT;
987 if (copy_from_user(&greq, optval, sizeof(greq)))
988 break;
989 psin = (struct sockaddr_in *)&greq.gr_group;
990 if (psin->sin_family != AF_INET)
991 goto e_inval;
992 memset(&mreq, 0, sizeof(mreq));
993 mreq.imr_multiaddr = psin->sin_addr;
994 mreq.imr_ifindex = greq.gr_interface;
995
996 if (optname == MCAST_JOIN_GROUP)
997 err = ip_mc_join_group(sk, &mreq);
998 else
999 err = ip_mc_leave_group(sk, &mreq);
1000 break;
1001 }
1002 case MCAST_JOIN_SOURCE_GROUP:
1003 case MCAST_LEAVE_SOURCE_GROUP:
1004 case MCAST_BLOCK_SOURCE:
1005 case MCAST_UNBLOCK_SOURCE:
1006 {
1007 struct group_source_req greqs;
1008 struct ip_mreq_source mreqs;
1009 struct sockaddr_in *psin;
1010 int omode, add;
1011
1012 if (optlen != sizeof(struct group_source_req))
1013 goto e_inval;
1014 if (copy_from_user(&greqs, optval, sizeof(greqs))) {
1015 err = -EFAULT;
1016 break;
1017 }
1018 if (greqs.gsr_group.ss_family != AF_INET ||
1019 greqs.gsr_source.ss_family != AF_INET) {
1020 err = -EADDRNOTAVAIL;
1021 break;
1022 }
1023 psin = (struct sockaddr_in *)&greqs.gsr_group;
1024 mreqs.imr_multiaddr = psin->sin_addr.s_addr;
1025 psin = (struct sockaddr_in *)&greqs.gsr_source;
1026 mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
1027 mreqs.imr_interface = 0; /* use index for mc_source */
1028
1029 if (optname == MCAST_BLOCK_SOURCE) {
1030 omode = MCAST_EXCLUDE;
1031 add = 1;
1032 } else if (optname == MCAST_UNBLOCK_SOURCE) {
1033 omode = MCAST_EXCLUDE;
1034 add = 0;
1035 } else if (optname == MCAST_JOIN_SOURCE_GROUP) {
1036 struct ip_mreqn mreq;
1037
1038 psin = (struct sockaddr_in *)&greqs.gsr_group;
1039 mreq.imr_multiaddr = psin->sin_addr;
1040 mreq.imr_address.s_addr = 0;
1041 mreq.imr_ifindex = greqs.gsr_interface;
1042 err = ip_mc_join_group(sk, &mreq);
1043 if (err && err != -EADDRINUSE)
1044 break;
1045 greqs.gsr_interface = mreq.imr_ifindex;
1046 omode = MCAST_INCLUDE;
1047 add = 1;
1048 } else /* MCAST_LEAVE_SOURCE_GROUP */ {
1049 omode = MCAST_INCLUDE;
1050 add = 0;
1051 }
1052 err = ip_mc_source(add, omode, sk, &mreqs,
1053 greqs.gsr_interface);
1054 break;
1055 }
1056 case MCAST_MSFILTER:
1057 {
1058 struct sockaddr_in *psin;
1059 struct ip_msfilter *msf = NULL;
1060 struct group_filter *gsf = NULL;
1061 int msize, i, ifindex;
1062
1063 if (optlen < GROUP_FILTER_SIZE(0))
1064 goto e_inval;
1065 if (optlen > sysctl_optmem_max) {
1066 err = -ENOBUFS;
1067 break;
1068 }
1069 gsf = kmalloc(optlen, GFP_KERNEL);
1070 if (!gsf) {
1071 err = -ENOBUFS;
1072 break;
1073 }
1074 err = -EFAULT;
1075 if (copy_from_user(gsf, optval, optlen))
1076 goto mc_msf_out;
1077
1078 /* numsrc >= (4G-140)/128 overflow in 32 bits */
1079 if (gsf->gf_numsrc >= 0x1ffffff ||
1080 gsf->gf_numsrc > net->ipv4.sysctl_igmp_max_msf) {
1081 err = -ENOBUFS;
1082 goto mc_msf_out;
1083 }
1084 if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
1085 err = -EINVAL;
1086 goto mc_msf_out;
1087 }
1088 msize = IP_MSFILTER_SIZE(gsf->gf_numsrc);
1089 msf = kmalloc(msize, GFP_KERNEL);
1090 if (!msf) {
1091 err = -ENOBUFS;
1092 goto mc_msf_out;
1093 }
1094 ifindex = gsf->gf_interface;
1095 psin = (struct sockaddr_in *)&gsf->gf_group;
1096 if (psin->sin_family != AF_INET) {
1097 err = -EADDRNOTAVAIL;
1098 goto mc_msf_out;
1099 }
1100 msf->imsf_multiaddr = psin->sin_addr.s_addr;
1101 msf->imsf_interface = 0;
1102 msf->imsf_fmode = gsf->gf_fmode;
1103 msf->imsf_numsrc = gsf->gf_numsrc;
1104 err = -EADDRNOTAVAIL;
1105 for (i = 0; i < gsf->gf_numsrc; ++i) {
1106 psin = (struct sockaddr_in *)&gsf->gf_slist[i];
1107
1108 if (psin->sin_family != AF_INET)
1109 goto mc_msf_out;
1110 msf->imsf_slist[i] = psin->sin_addr.s_addr;
1111 }
1112 kfree(gsf);
1113 gsf = NULL;
1114
1115 err = ip_mc_msfilter(sk, msf, ifindex);
1116 mc_msf_out:
1117 kfree(msf);
1118 kfree(gsf);
1119 break;
1120 }
1121 case IP_MULTICAST_ALL:
1122 if (optlen < 1)
1123 goto e_inval;
1124 if (val != 0 && val != 1)
1125 goto e_inval;
1126 inet->mc_all = val;
1127 break;
1128 case IP_ROUTER_ALERT:
1129 err = ip_ra_control(sk, val ? 1 : 0, NULL);
1130 break;
1131
1132 case IP_FREEBIND:
1133 if (optlen < 1)
1134 goto e_inval;
1135 inet->freebind = !!val;
1136 break;
1137
1138 case IP_IPSEC_POLICY:
1139 case IP_XFRM_POLICY:
1140 err = -EPERM;
1141 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1142 break;
1143 err = xfrm_user_policy(sk, optname, optval, optlen);
1144 break;
1145
1146 case IP_TRANSPARENT:
1147 if (!!val && !ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
1148 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1149 err = -EPERM;
1150 break;
1151 }
1152 if (optlen < 1)
1153 goto e_inval;
1154 inet->transparent = !!val;
1155 break;
1156
1157 case IP_MINTTL:
1158 if (optlen < 1)
1159 goto e_inval;
1160 if (val < 0 || val > 255)
1161 goto e_inval;
1162 inet->min_ttl = val;
1163 break;
1164
1165 default:
1166 err = -ENOPROTOOPT;
1167 break;
1168 }
1169 release_sock(sk);
1170 if (needs_rtnl)
1171 rtnl_unlock();
1172 return err;
1173
1174 e_inval:
1175 release_sock(sk);
1176 if (needs_rtnl)
1177 rtnl_unlock();
1178 return -EINVAL;
1179 }
1180
1181 /**
1182 * ipv4_pktinfo_prepare - transfer some info from rtable to skb
1183 * @sk: socket
1184 * @skb: buffer
1185 *
1186 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
1187 * destination in skb->cb[] before dst drop.
1188 * This way, receiver doesn't make cache line misses to read rtable.
1189 */
1190 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
1191 {
1192 struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
1193 bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) ||
1194 ipv6_sk_rxinfo(sk);
1195
1196 if (prepare && skb_rtable(skb)) {
1197 /* skb->cb is overloaded: prior to this point it is IP{6}CB
1198 * which has interface index (iif) as the first member of the
1199 * underlying inet{6}_skb_parm struct. This code then overlays
1200 * PKTINFO_SKB_CB and in_pktinfo also has iif as the first
1201 * element so the iif is picked up from the prior IPCB
1202 */
1203 pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
1204 } else {
1205 pktinfo->ipi_ifindex = 0;
1206 pktinfo->ipi_spec_dst.s_addr = 0;
1207 }
1208 skb_dst_drop(skb);
1209 }
1210
1211 int ip_setsockopt(struct sock *sk, int level,
1212 int optname, char __user *optval, unsigned int optlen)
1213 {
1214 int err;
1215
1216 if (level != SOL_IP)
1217 return -ENOPROTOOPT;
1218
1219 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1220 #ifdef CONFIG_NETFILTER
1221 /* we need to exclude all possible ENOPROTOOPTs except default case */
1222 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1223 optname != IP_IPSEC_POLICY &&
1224 optname != IP_XFRM_POLICY &&
1225 !ip_mroute_opt(optname)) {
1226 lock_sock(sk);
1227 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
1228 release_sock(sk);
1229 }
1230 #endif
1231 return err;
1232 }
1233 EXPORT_SYMBOL(ip_setsockopt);
1234
1235 #ifdef CONFIG_COMPAT
1236 int compat_ip_setsockopt(struct sock *sk, int level, int optname,
1237 char __user *optval, unsigned int optlen)
1238 {
1239 int err;
1240
1241 if (level != SOL_IP)
1242 return -ENOPROTOOPT;
1243
1244 if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER)
1245 return compat_mc_setsockopt(sk, level, optname, optval, optlen,
1246 ip_setsockopt);
1247
1248 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1249 #ifdef CONFIG_NETFILTER
1250 /* we need to exclude all possible ENOPROTOOPTs except default case */
1251 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1252 optname != IP_IPSEC_POLICY &&
1253 optname != IP_XFRM_POLICY &&
1254 !ip_mroute_opt(optname)) {
1255 lock_sock(sk);
1256 err = compat_nf_setsockopt(sk, PF_INET, optname,
1257 optval, optlen);
1258 release_sock(sk);
1259 }
1260 #endif
1261 return err;
1262 }
1263 EXPORT_SYMBOL(compat_ip_setsockopt);
1264 #endif
1265
1266 /*
1267 * Get the options. Note for future reference. The GET of IP options gets
1268 * the _received_ ones. The set sets the _sent_ ones.
1269 */
1270
1271 static bool getsockopt_needs_rtnl(int optname)
1272 {
1273 switch (optname) {
1274 case IP_MSFILTER:
1275 case MCAST_MSFILTER:
1276 return true;
1277 }
1278 return false;
1279 }
1280
1281 static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1282 char __user *optval, int __user *optlen, unsigned int flags)
1283 {
1284 struct inet_sock *inet = inet_sk(sk);
1285 bool needs_rtnl = getsockopt_needs_rtnl(optname);
1286 int val, err = 0;
1287 int len;
1288
1289 if (level != SOL_IP)
1290 return -EOPNOTSUPP;
1291
1292 if (ip_mroute_opt(optname))
1293 return ip_mroute_getsockopt(sk, optname, optval, optlen);
1294
1295 if (get_user(len, optlen))
1296 return -EFAULT;
1297 if (len < 0)
1298 return -EINVAL;
1299
1300 if (needs_rtnl)
1301 rtnl_lock();
1302 lock_sock(sk);
1303
1304 switch (optname) {
1305 case IP_OPTIONS:
1306 {
1307 unsigned char optbuf[sizeof(struct ip_options)+40];
1308 struct ip_options *opt = (struct ip_options *)optbuf;
1309 struct ip_options_rcu *inet_opt;
1310
1311 inet_opt = rcu_dereference_protected(inet->inet_opt,
1312 lockdep_sock_is_held(sk));
1313 opt->optlen = 0;
1314 if (inet_opt)
1315 memcpy(optbuf, &inet_opt->opt,
1316 sizeof(struct ip_options) +
1317 inet_opt->opt.optlen);
1318 release_sock(sk);
1319
1320 if (opt->optlen == 0)
1321 return put_user(0, optlen);
1322
1323 ip_options_undo(opt);
1324
1325 len = min_t(unsigned int, len, opt->optlen);
1326 if (put_user(len, optlen))
1327 return -EFAULT;
1328 if (copy_to_user(optval, opt->__data, len))
1329 return -EFAULT;
1330 return 0;
1331 }
1332 case IP_PKTINFO:
1333 val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0;
1334 break;
1335 case IP_RECVTTL:
1336 val = (inet->cmsg_flags & IP_CMSG_TTL) != 0;
1337 break;
1338 case IP_RECVTOS:
1339 val = (inet->cmsg_flags & IP_CMSG_TOS) != 0;
1340 break;
1341 case IP_RECVOPTS:
1342 val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0;
1343 break;
1344 case IP_RETOPTS:
1345 val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0;
1346 break;
1347 case IP_PASSSEC:
1348 val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0;
1349 break;
1350 case IP_RECVORIGDSTADDR:
1351 val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0;
1352 break;
1353 case IP_CHECKSUM:
1354 val = (inet->cmsg_flags & IP_CMSG_CHECKSUM) != 0;
1355 break;
1356 case IP_TOS:
1357 val = inet->tos;
1358 break;
1359 case IP_TTL:
1360 {
1361 struct net *net = sock_net(sk);
1362 val = (inet->uc_ttl == -1 ?
1363 net->ipv4.sysctl_ip_default_ttl :
1364 inet->uc_ttl);
1365 break;
1366 }
1367 case IP_HDRINCL:
1368 val = inet->hdrincl;
1369 break;
1370 case IP_NODEFRAG:
1371 val = inet->nodefrag;
1372 break;
1373 case IP_BIND_ADDRESS_NO_PORT:
1374 val = inet->bind_address_no_port;
1375 break;
1376 case IP_MTU_DISCOVER:
1377 val = inet->pmtudisc;
1378 break;
1379 case IP_MTU:
1380 {
1381 struct dst_entry *dst;
1382 val = 0;
1383 dst = sk_dst_get(sk);
1384 if (dst) {
1385 val = dst_mtu(dst);
1386 dst_release(dst);
1387 }
1388 if (!val) {
1389 release_sock(sk);
1390 return -ENOTCONN;
1391 }
1392 break;
1393 }
1394 case IP_RECVERR:
1395 val = inet->recverr;
1396 break;
1397 case IP_MULTICAST_TTL:
1398 val = inet->mc_ttl;
1399 break;
1400 case IP_MULTICAST_LOOP:
1401 val = inet->mc_loop;
1402 break;
1403 case IP_UNICAST_IF:
1404 val = (__force int)htonl((__u32) inet->uc_index);
1405 break;
1406 case IP_MULTICAST_IF:
1407 {
1408 struct in_addr addr;
1409 len = min_t(unsigned int, len, sizeof(struct in_addr));
1410 addr.s_addr = inet->mc_addr;
1411 release_sock(sk);
1412
1413 if (put_user(len, optlen))
1414 return -EFAULT;
1415 if (copy_to_user(optval, &addr, len))
1416 return -EFAULT;
1417 return 0;
1418 }
1419 case IP_MSFILTER:
1420 {
1421 struct ip_msfilter msf;
1422
1423 if (len < IP_MSFILTER_SIZE(0)) {
1424 err = -EINVAL;
1425 goto out;
1426 }
1427 if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) {
1428 err = -EFAULT;
1429 goto out;
1430 }
1431 err = ip_mc_msfget(sk, &msf,
1432 (struct ip_msfilter __user *)optval, optlen);
1433 goto out;
1434 }
1435 case MCAST_MSFILTER:
1436 {
1437 struct group_filter gsf;
1438
1439 if (len < GROUP_FILTER_SIZE(0)) {
1440 err = -EINVAL;
1441 goto out;
1442 }
1443 if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) {
1444 err = -EFAULT;
1445 goto out;
1446 }
1447 err = ip_mc_gsfget(sk, &gsf,
1448 (struct group_filter __user *)optval,
1449 optlen);
1450 goto out;
1451 }
1452 case IP_MULTICAST_ALL:
1453 val = inet->mc_all;
1454 break;
1455 case IP_PKTOPTIONS:
1456 {
1457 struct msghdr msg;
1458
1459 release_sock(sk);
1460
1461 if (sk->sk_type != SOCK_STREAM)
1462 return -ENOPROTOOPT;
1463
1464 msg.msg_control = (__force void *) optval;
1465 msg.msg_controllen = len;
1466 msg.msg_flags = flags;
1467
1468 if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
1469 struct in_pktinfo info;
1470
1471 info.ipi_addr.s_addr = inet->inet_rcv_saddr;
1472 info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr;
1473 info.ipi_ifindex = inet->mc_index;
1474 put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
1475 }
1476 if (inet->cmsg_flags & IP_CMSG_TTL) {
1477 int hlim = inet->mc_ttl;
1478 put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
1479 }
1480 if (inet->cmsg_flags & IP_CMSG_TOS) {
1481 int tos = inet->rcv_tos;
1482 put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
1483 }
1484 len -= msg.msg_controllen;
1485 return put_user(len, optlen);
1486 }
1487 case IP_FREEBIND:
1488 val = inet->freebind;
1489 break;
1490 case IP_TRANSPARENT:
1491 val = inet->transparent;
1492 break;
1493 case IP_MINTTL:
1494 val = inet->min_ttl;
1495 break;
1496 default:
1497 release_sock(sk);
1498 return -ENOPROTOOPT;
1499 }
1500 release_sock(sk);
1501
1502 if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
1503 unsigned char ucval = (unsigned char)val;
1504 len = 1;
1505 if (put_user(len, optlen))
1506 return -EFAULT;
1507 if (copy_to_user(optval, &ucval, 1))
1508 return -EFAULT;
1509 } else {
1510 len = min_t(unsigned int, sizeof(int), len);
1511 if (put_user(len, optlen))
1512 return -EFAULT;
1513 if (copy_to_user(optval, &val, len))
1514 return -EFAULT;
1515 }
1516 return 0;
1517
1518 out:
1519 release_sock(sk);
1520 if (needs_rtnl)
1521 rtnl_unlock();
1522 return err;
1523 }
1524
1525 int ip_getsockopt(struct sock *sk, int level,
1526 int optname, char __user *optval, int __user *optlen)
1527 {
1528 int err;
1529
1530 err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
1531 #ifdef CONFIG_NETFILTER
1532 /* we need to exclude all possible ENOPROTOOPTs except default case */
1533 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1534 !ip_mroute_opt(optname)) {
1535 int len;
1536
1537 if (get_user(len, optlen))
1538 return -EFAULT;
1539
1540 lock_sock(sk);
1541 err = nf_getsockopt(sk, PF_INET, optname, optval,
1542 &len);
1543 release_sock(sk);
1544 if (err >= 0)
1545 err = put_user(len, optlen);
1546 return err;
1547 }
1548 #endif
1549 return err;
1550 }
1551 EXPORT_SYMBOL(ip_getsockopt);
1552
1553 #ifdef CONFIG_COMPAT
1554 int compat_ip_getsockopt(struct sock *sk, int level, int optname,
1555 char __user *optval, int __user *optlen)
1556 {
1557 int err;
1558
1559 if (optname == MCAST_MSFILTER)
1560 return compat_mc_getsockopt(sk, level, optname, optval, optlen,
1561 ip_getsockopt);
1562
1563 err = do_ip_getsockopt(sk, level, optname, optval, optlen,
1564 MSG_CMSG_COMPAT);
1565
1566 #ifdef CONFIG_NETFILTER
1567 /* we need to exclude all possible ENOPROTOOPTs except default case */
1568 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1569 !ip_mroute_opt(optname)) {
1570 int len;
1571
1572 if (get_user(len, optlen))
1573 return -EFAULT;
1574
1575 lock_sock(sk);
1576 err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
1577 release_sock(sk);
1578 if (err >= 0)
1579 err = put_user(len, optlen);
1580 return err;
1581 }
1582 #endif
1583 return err;
1584 }
1585 EXPORT_SYMBOL(compat_ip_getsockopt);
1586 #endif
This page took 0.072166 seconds and 5 git commands to generate.