Fix common misspellings
[deliverable/linux.git] / net / unix / af_unix.c
1 /*
2 * NET4: Implementation of BSD Unix domain sockets.
3 *
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
73 *
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
81 */
82
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/signal.h>
86 #include <linux/sched.h>
87 #include <linux/errno.h>
88 #include <linux/string.h>
89 #include <linux/stat.h>
90 #include <linux/dcache.h>
91 #include <linux/namei.h>
92 #include <linux/socket.h>
93 #include <linux/un.h>
94 #include <linux/fcntl.h>
95 #include <linux/termios.h>
96 #include <linux/sockios.h>
97 #include <linux/net.h>
98 #include <linux/in.h>
99 #include <linux/fs.h>
100 #include <linux/slab.h>
101 #include <asm/uaccess.h>
102 #include <linux/skbuff.h>
103 #include <linux/netdevice.h>
104 #include <net/net_namespace.h>
105 #include <net/sock.h>
106 #include <net/tcp_states.h>
107 #include <net/af_unix.h>
108 #include <linux/proc_fs.h>
109 #include <linux/seq_file.h>
110 #include <net/scm.h>
111 #include <linux/init.h>
112 #include <linux/poll.h>
113 #include <linux/rtnetlink.h>
114 #include <linux/mount.h>
115 #include <net/checksum.h>
116 #include <linux/security.h>
117
118 static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
119 static DEFINE_SPINLOCK(unix_table_lock);
120 static atomic_long_t unix_nr_socks;
121
122 #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
123
124 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
125
126 #ifdef CONFIG_SECURITY_NETWORK
127 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
128 {
129 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
130 }
131
132 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
133 {
134 scm->secid = *UNIXSID(skb);
135 }
136 #else
137 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
138 { }
139
140 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
141 { }
142 #endif /* CONFIG_SECURITY_NETWORK */
143
144 /*
145 * SMP locking strategy:
146 * hash table is protected with spinlock unix_table_lock
147 * each socket state is protected by separate spin lock.
148 */
149
150 static inline unsigned unix_hash_fold(__wsum n)
151 {
152 unsigned hash = (__force unsigned)n;
153 hash ^= hash>>16;
154 hash ^= hash>>8;
155 return hash&(UNIX_HASH_SIZE-1);
156 }
157
158 #define unix_peer(sk) (unix_sk(sk)->peer)
159
160 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
161 {
162 return unix_peer(osk) == sk;
163 }
164
165 static inline int unix_may_send(struct sock *sk, struct sock *osk)
166 {
167 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
168 }
169
170 static inline int unix_recvq_full(struct sock const *sk)
171 {
172 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
173 }
174
175 static struct sock *unix_peer_get(struct sock *s)
176 {
177 struct sock *peer;
178
179 unix_state_lock(s);
180 peer = unix_peer(s);
181 if (peer)
182 sock_hold(peer);
183 unix_state_unlock(s);
184 return peer;
185 }
186
187 static inline void unix_release_addr(struct unix_address *addr)
188 {
189 if (atomic_dec_and_test(&addr->refcnt))
190 kfree(addr);
191 }
192
193 /*
194 * Check unix socket name:
195 * - should be not zero length.
196 * - if started by not zero, should be NULL terminated (FS object)
197 * - if started by zero, it is abstract name.
198 */
199
200 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)
201 {
202 if (len <= sizeof(short) || len > sizeof(*sunaddr))
203 return -EINVAL;
204 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
205 return -EINVAL;
206 if (sunaddr->sun_path[0]) {
207 /*
208 * This may look like an off by one error but it is a bit more
209 * subtle. 108 is the longest valid AF_UNIX path for a binding.
210 * sun_path[108] doesn't as such exist. However in kernel space
211 * we are guaranteed that it is a valid memory location in our
212 * kernel address buffer.
213 */
214 ((char *)sunaddr)[len] = 0;
215 len = strlen(sunaddr->sun_path)+1+sizeof(short);
216 return len;
217 }
218
219 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
220 return len;
221 }
222
223 static void __unix_remove_socket(struct sock *sk)
224 {
225 sk_del_node_init(sk);
226 }
227
228 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
229 {
230 WARN_ON(!sk_unhashed(sk));
231 sk_add_node(sk, list);
232 }
233
234 static inline void unix_remove_socket(struct sock *sk)
235 {
236 spin_lock(&unix_table_lock);
237 __unix_remove_socket(sk);
238 spin_unlock(&unix_table_lock);
239 }
240
241 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
242 {
243 spin_lock(&unix_table_lock);
244 __unix_insert_socket(list, sk);
245 spin_unlock(&unix_table_lock);
246 }
247
248 static struct sock *__unix_find_socket_byname(struct net *net,
249 struct sockaddr_un *sunname,
250 int len, int type, unsigned hash)
251 {
252 struct sock *s;
253 struct hlist_node *node;
254
255 sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
256 struct unix_sock *u = unix_sk(s);
257
258 if (!net_eq(sock_net(s), net))
259 continue;
260
261 if (u->addr->len == len &&
262 !memcmp(u->addr->name, sunname, len))
263 goto found;
264 }
265 s = NULL;
266 found:
267 return s;
268 }
269
270 static inline struct sock *unix_find_socket_byname(struct net *net,
271 struct sockaddr_un *sunname,
272 int len, int type,
273 unsigned hash)
274 {
275 struct sock *s;
276
277 spin_lock(&unix_table_lock);
278 s = __unix_find_socket_byname(net, sunname, len, type, hash);
279 if (s)
280 sock_hold(s);
281 spin_unlock(&unix_table_lock);
282 return s;
283 }
284
285 static struct sock *unix_find_socket_byinode(struct inode *i)
286 {
287 struct sock *s;
288 struct hlist_node *node;
289
290 spin_lock(&unix_table_lock);
291 sk_for_each(s, node,
292 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
293 struct dentry *dentry = unix_sk(s)->dentry;
294
295 if (dentry && dentry->d_inode == i) {
296 sock_hold(s);
297 goto found;
298 }
299 }
300 s = NULL;
301 found:
302 spin_unlock(&unix_table_lock);
303 return s;
304 }
305
306 static inline int unix_writable(struct sock *sk)
307 {
308 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
309 }
310
311 static void unix_write_space(struct sock *sk)
312 {
313 struct socket_wq *wq;
314
315 rcu_read_lock();
316 if (unix_writable(sk)) {
317 wq = rcu_dereference(sk->sk_wq);
318 if (wq_has_sleeper(wq))
319 wake_up_interruptible_sync_poll(&wq->wait,
320 POLLOUT | POLLWRNORM | POLLWRBAND);
321 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
322 }
323 rcu_read_unlock();
324 }
325
326 /* When dgram socket disconnects (or changes its peer), we clear its receive
327 * queue of packets arrived from previous peer. First, it allows to do
328 * flow control based only on wmem_alloc; second, sk connected to peer
329 * may receive messages only from that peer. */
330 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
331 {
332 if (!skb_queue_empty(&sk->sk_receive_queue)) {
333 skb_queue_purge(&sk->sk_receive_queue);
334 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
335
336 /* If one link of bidirectional dgram pipe is disconnected,
337 * we signal error. Messages are lost. Do not make this,
338 * when peer was not connected to us.
339 */
340 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
341 other->sk_err = ECONNRESET;
342 other->sk_error_report(other);
343 }
344 }
345 }
346
347 static void unix_sock_destructor(struct sock *sk)
348 {
349 struct unix_sock *u = unix_sk(sk);
350
351 skb_queue_purge(&sk->sk_receive_queue);
352
353 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
354 WARN_ON(!sk_unhashed(sk));
355 WARN_ON(sk->sk_socket);
356 if (!sock_flag(sk, SOCK_DEAD)) {
357 printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
358 return;
359 }
360
361 if (u->addr)
362 unix_release_addr(u->addr);
363
364 atomic_long_dec(&unix_nr_socks);
365 local_bh_disable();
366 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
367 local_bh_enable();
368 #ifdef UNIX_REFCNT_DEBUG
369 printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
370 atomic_long_read(&unix_nr_socks));
371 #endif
372 }
373
374 static int unix_release_sock(struct sock *sk, int embrion)
375 {
376 struct unix_sock *u = unix_sk(sk);
377 struct dentry *dentry;
378 struct vfsmount *mnt;
379 struct sock *skpair;
380 struct sk_buff *skb;
381 int state;
382
383 unix_remove_socket(sk);
384
385 /* Clear state */
386 unix_state_lock(sk);
387 sock_orphan(sk);
388 sk->sk_shutdown = SHUTDOWN_MASK;
389 dentry = u->dentry;
390 u->dentry = NULL;
391 mnt = u->mnt;
392 u->mnt = NULL;
393 state = sk->sk_state;
394 sk->sk_state = TCP_CLOSE;
395 unix_state_unlock(sk);
396
397 wake_up_interruptible_all(&u->peer_wait);
398
399 skpair = unix_peer(sk);
400
401 if (skpair != NULL) {
402 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
403 unix_state_lock(skpair);
404 /* No more writes */
405 skpair->sk_shutdown = SHUTDOWN_MASK;
406 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
407 skpair->sk_err = ECONNRESET;
408 unix_state_unlock(skpair);
409 skpair->sk_state_change(skpair);
410 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
411 }
412 sock_put(skpair); /* It may now die */
413 unix_peer(sk) = NULL;
414 }
415
416 /* Try to flush out this socket. Throw out buffers at least */
417
418 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
419 if (state == TCP_LISTEN)
420 unix_release_sock(skb->sk, 1);
421 /* passed fds are erased in the kfree_skb hook */
422 kfree_skb(skb);
423 }
424
425 if (dentry) {
426 dput(dentry);
427 mntput(mnt);
428 }
429
430 sock_put(sk);
431
432 /* ---- Socket is dead now and most probably destroyed ---- */
433
434 /*
435 * Fixme: BSD difference: In BSD all sockets connected to use get
436 * ECONNRESET and we die on the spot. In Linux we behave
437 * like files and pipes do and wait for the last
438 * dereference.
439 *
440 * Can't we simply set sock->err?
441 *
442 * What the above comment does talk about? --ANK(980817)
443 */
444
445 if (unix_tot_inflight)
446 unix_gc(); /* Garbage collect fds */
447
448 return 0;
449 }
450
451 static void init_peercred(struct sock *sk)
452 {
453 put_pid(sk->sk_peer_pid);
454 if (sk->sk_peer_cred)
455 put_cred(sk->sk_peer_cred);
456 sk->sk_peer_pid = get_pid(task_tgid(current));
457 sk->sk_peer_cred = get_current_cred();
458 }
459
460 static void copy_peercred(struct sock *sk, struct sock *peersk)
461 {
462 put_pid(sk->sk_peer_pid);
463 if (sk->sk_peer_cred)
464 put_cred(sk->sk_peer_cred);
465 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
466 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
467 }
468
469 static int unix_listen(struct socket *sock, int backlog)
470 {
471 int err;
472 struct sock *sk = sock->sk;
473 struct unix_sock *u = unix_sk(sk);
474 struct pid *old_pid = NULL;
475 const struct cred *old_cred = NULL;
476
477 err = -EOPNOTSUPP;
478 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
479 goto out; /* Only stream/seqpacket sockets accept */
480 err = -EINVAL;
481 if (!u->addr)
482 goto out; /* No listens on an unbound socket */
483 unix_state_lock(sk);
484 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
485 goto out_unlock;
486 if (backlog > sk->sk_max_ack_backlog)
487 wake_up_interruptible_all(&u->peer_wait);
488 sk->sk_max_ack_backlog = backlog;
489 sk->sk_state = TCP_LISTEN;
490 /* set credentials so connect can copy them */
491 init_peercred(sk);
492 err = 0;
493
494 out_unlock:
495 unix_state_unlock(sk);
496 put_pid(old_pid);
497 if (old_cred)
498 put_cred(old_cred);
499 out:
500 return err;
501 }
502
503 static int unix_release(struct socket *);
504 static int unix_bind(struct socket *, struct sockaddr *, int);
505 static int unix_stream_connect(struct socket *, struct sockaddr *,
506 int addr_len, int flags);
507 static int unix_socketpair(struct socket *, struct socket *);
508 static int unix_accept(struct socket *, struct socket *, int);
509 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
510 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
511 static unsigned int unix_dgram_poll(struct file *, struct socket *,
512 poll_table *);
513 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
514 static int unix_shutdown(struct socket *, int);
515 static int unix_stream_sendmsg(struct kiocb *, struct socket *,
516 struct msghdr *, size_t);
517 static int unix_stream_recvmsg(struct kiocb *, struct socket *,
518 struct msghdr *, size_t, int);
519 static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
520 struct msghdr *, size_t);
521 static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
522 struct msghdr *, size_t, int);
523 static int unix_dgram_connect(struct socket *, struct sockaddr *,
524 int, int);
525 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
526 struct msghdr *, size_t);
527
528 static const struct proto_ops unix_stream_ops = {
529 .family = PF_UNIX,
530 .owner = THIS_MODULE,
531 .release = unix_release,
532 .bind = unix_bind,
533 .connect = unix_stream_connect,
534 .socketpair = unix_socketpair,
535 .accept = unix_accept,
536 .getname = unix_getname,
537 .poll = unix_poll,
538 .ioctl = unix_ioctl,
539 .listen = unix_listen,
540 .shutdown = unix_shutdown,
541 .setsockopt = sock_no_setsockopt,
542 .getsockopt = sock_no_getsockopt,
543 .sendmsg = unix_stream_sendmsg,
544 .recvmsg = unix_stream_recvmsg,
545 .mmap = sock_no_mmap,
546 .sendpage = sock_no_sendpage,
547 };
548
549 static const struct proto_ops unix_dgram_ops = {
550 .family = PF_UNIX,
551 .owner = THIS_MODULE,
552 .release = unix_release,
553 .bind = unix_bind,
554 .connect = unix_dgram_connect,
555 .socketpair = unix_socketpair,
556 .accept = sock_no_accept,
557 .getname = unix_getname,
558 .poll = unix_dgram_poll,
559 .ioctl = unix_ioctl,
560 .listen = sock_no_listen,
561 .shutdown = unix_shutdown,
562 .setsockopt = sock_no_setsockopt,
563 .getsockopt = sock_no_getsockopt,
564 .sendmsg = unix_dgram_sendmsg,
565 .recvmsg = unix_dgram_recvmsg,
566 .mmap = sock_no_mmap,
567 .sendpage = sock_no_sendpage,
568 };
569
570 static const struct proto_ops unix_seqpacket_ops = {
571 .family = PF_UNIX,
572 .owner = THIS_MODULE,
573 .release = unix_release,
574 .bind = unix_bind,
575 .connect = unix_stream_connect,
576 .socketpair = unix_socketpair,
577 .accept = unix_accept,
578 .getname = unix_getname,
579 .poll = unix_dgram_poll,
580 .ioctl = unix_ioctl,
581 .listen = unix_listen,
582 .shutdown = unix_shutdown,
583 .setsockopt = sock_no_setsockopt,
584 .getsockopt = sock_no_getsockopt,
585 .sendmsg = unix_seqpacket_sendmsg,
586 .recvmsg = unix_dgram_recvmsg,
587 .mmap = sock_no_mmap,
588 .sendpage = sock_no_sendpage,
589 };
590
591 static struct proto unix_proto = {
592 .name = "UNIX",
593 .owner = THIS_MODULE,
594 .obj_size = sizeof(struct unix_sock),
595 };
596
597 /*
598 * AF_UNIX sockets do not interact with hardware, hence they
599 * dont trigger interrupts - so it's safe for them to have
600 * bh-unsafe locking for their sk_receive_queue.lock. Split off
601 * this special lock-class by reinitializing the spinlock key:
602 */
603 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
604
605 static struct sock *unix_create1(struct net *net, struct socket *sock)
606 {
607 struct sock *sk = NULL;
608 struct unix_sock *u;
609
610 atomic_long_inc(&unix_nr_socks);
611 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
612 goto out;
613
614 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
615 if (!sk)
616 goto out;
617
618 sock_init_data(sock, sk);
619 lockdep_set_class(&sk->sk_receive_queue.lock,
620 &af_unix_sk_receive_queue_lock_key);
621
622 sk->sk_write_space = unix_write_space;
623 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
624 sk->sk_destruct = unix_sock_destructor;
625 u = unix_sk(sk);
626 u->dentry = NULL;
627 u->mnt = NULL;
628 spin_lock_init(&u->lock);
629 atomic_long_set(&u->inflight, 0);
630 INIT_LIST_HEAD(&u->link);
631 mutex_init(&u->readlock); /* single task reading lock */
632 init_waitqueue_head(&u->peer_wait);
633 unix_insert_socket(unix_sockets_unbound, sk);
634 out:
635 if (sk == NULL)
636 atomic_long_dec(&unix_nr_socks);
637 else {
638 local_bh_disable();
639 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
640 local_bh_enable();
641 }
642 return sk;
643 }
644
645 static int unix_create(struct net *net, struct socket *sock, int protocol,
646 int kern)
647 {
648 if (protocol && protocol != PF_UNIX)
649 return -EPROTONOSUPPORT;
650
651 sock->state = SS_UNCONNECTED;
652
653 switch (sock->type) {
654 case SOCK_STREAM:
655 sock->ops = &unix_stream_ops;
656 break;
657 /*
658 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
659 * nothing uses it.
660 */
661 case SOCK_RAW:
662 sock->type = SOCK_DGRAM;
663 case SOCK_DGRAM:
664 sock->ops = &unix_dgram_ops;
665 break;
666 case SOCK_SEQPACKET:
667 sock->ops = &unix_seqpacket_ops;
668 break;
669 default:
670 return -ESOCKTNOSUPPORT;
671 }
672
673 return unix_create1(net, sock) ? 0 : -ENOMEM;
674 }
675
676 static int unix_release(struct socket *sock)
677 {
678 struct sock *sk = sock->sk;
679
680 if (!sk)
681 return 0;
682
683 sock->sk = NULL;
684
685 return unix_release_sock(sk, 0);
686 }
687
688 static int unix_autobind(struct socket *sock)
689 {
690 struct sock *sk = sock->sk;
691 struct net *net = sock_net(sk);
692 struct unix_sock *u = unix_sk(sk);
693 static u32 ordernum = 1;
694 struct unix_address *addr;
695 int err;
696 unsigned int retries = 0;
697
698 mutex_lock(&u->readlock);
699
700 err = 0;
701 if (u->addr)
702 goto out;
703
704 err = -ENOMEM;
705 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
706 if (!addr)
707 goto out;
708
709 addr->name->sun_family = AF_UNIX;
710 atomic_set(&addr->refcnt, 1);
711
712 retry:
713 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
714 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
715
716 spin_lock(&unix_table_lock);
717 ordernum = (ordernum+1)&0xFFFFF;
718
719 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
720 addr->hash)) {
721 spin_unlock(&unix_table_lock);
722 /*
723 * __unix_find_socket_byname() may take long time if many names
724 * are already in use.
725 */
726 cond_resched();
727 /* Give up if all names seems to be in use. */
728 if (retries++ == 0xFFFFF) {
729 err = -ENOSPC;
730 kfree(addr);
731 goto out;
732 }
733 goto retry;
734 }
735 addr->hash ^= sk->sk_type;
736
737 __unix_remove_socket(sk);
738 u->addr = addr;
739 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
740 spin_unlock(&unix_table_lock);
741 err = 0;
742
743 out: mutex_unlock(&u->readlock);
744 return err;
745 }
746
747 static struct sock *unix_find_other(struct net *net,
748 struct sockaddr_un *sunname, int len,
749 int type, unsigned hash, int *error)
750 {
751 struct sock *u;
752 struct path path;
753 int err = 0;
754
755 if (sunname->sun_path[0]) {
756 struct inode *inode;
757 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
758 if (err)
759 goto fail;
760 inode = path.dentry->d_inode;
761 err = inode_permission(inode, MAY_WRITE);
762 if (err)
763 goto put_fail;
764
765 err = -ECONNREFUSED;
766 if (!S_ISSOCK(inode->i_mode))
767 goto put_fail;
768 u = unix_find_socket_byinode(inode);
769 if (!u)
770 goto put_fail;
771
772 if (u->sk_type == type)
773 touch_atime(path.mnt, path.dentry);
774
775 path_put(&path);
776
777 err = -EPROTOTYPE;
778 if (u->sk_type != type) {
779 sock_put(u);
780 goto fail;
781 }
782 } else {
783 err = -ECONNREFUSED;
784 u = unix_find_socket_byname(net, sunname, len, type, hash);
785 if (u) {
786 struct dentry *dentry;
787 dentry = unix_sk(u)->dentry;
788 if (dentry)
789 touch_atime(unix_sk(u)->mnt, dentry);
790 } else
791 goto fail;
792 }
793 return u;
794
795 put_fail:
796 path_put(&path);
797 fail:
798 *error = err;
799 return NULL;
800 }
801
802
803 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
804 {
805 struct sock *sk = sock->sk;
806 struct net *net = sock_net(sk);
807 struct unix_sock *u = unix_sk(sk);
808 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
809 struct dentry *dentry = NULL;
810 struct nameidata nd;
811 int err;
812 unsigned hash;
813 struct unix_address *addr;
814 struct hlist_head *list;
815
816 err = -EINVAL;
817 if (sunaddr->sun_family != AF_UNIX)
818 goto out;
819
820 if (addr_len == sizeof(short)) {
821 err = unix_autobind(sock);
822 goto out;
823 }
824
825 err = unix_mkname(sunaddr, addr_len, &hash);
826 if (err < 0)
827 goto out;
828 addr_len = err;
829
830 mutex_lock(&u->readlock);
831
832 err = -EINVAL;
833 if (u->addr)
834 goto out_up;
835
836 err = -ENOMEM;
837 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
838 if (!addr)
839 goto out_up;
840
841 memcpy(addr->name, sunaddr, addr_len);
842 addr->len = addr_len;
843 addr->hash = hash ^ sk->sk_type;
844 atomic_set(&addr->refcnt, 1);
845
846 if (sunaddr->sun_path[0]) {
847 unsigned int mode;
848 err = 0;
849 /*
850 * Get the parent directory, calculate the hash for last
851 * component.
852 */
853 err = kern_path_parent(sunaddr->sun_path, &nd);
854 if (err)
855 goto out_mknod_parent;
856
857 dentry = lookup_create(&nd, 0);
858 err = PTR_ERR(dentry);
859 if (IS_ERR(dentry))
860 goto out_mknod_unlock;
861
862 /*
863 * All right, let's create it.
864 */
865 mode = S_IFSOCK |
866 (SOCK_INODE(sock)->i_mode & ~current_umask());
867 err = mnt_want_write(nd.path.mnt);
868 if (err)
869 goto out_mknod_dput;
870 err = security_path_mknod(&nd.path, dentry, mode, 0);
871 if (err)
872 goto out_mknod_drop_write;
873 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
874 out_mknod_drop_write:
875 mnt_drop_write(nd.path.mnt);
876 if (err)
877 goto out_mknod_dput;
878 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
879 dput(nd.path.dentry);
880 nd.path.dentry = dentry;
881
882 addr->hash = UNIX_HASH_SIZE;
883 }
884
885 spin_lock(&unix_table_lock);
886
887 if (!sunaddr->sun_path[0]) {
888 err = -EADDRINUSE;
889 if (__unix_find_socket_byname(net, sunaddr, addr_len,
890 sk->sk_type, hash)) {
891 unix_release_addr(addr);
892 goto out_unlock;
893 }
894
895 list = &unix_socket_table[addr->hash];
896 } else {
897 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
898 u->dentry = nd.path.dentry;
899 u->mnt = nd.path.mnt;
900 }
901
902 err = 0;
903 __unix_remove_socket(sk);
904 u->addr = addr;
905 __unix_insert_socket(list, sk);
906
907 out_unlock:
908 spin_unlock(&unix_table_lock);
909 out_up:
910 mutex_unlock(&u->readlock);
911 out:
912 return err;
913
914 out_mknod_dput:
915 dput(dentry);
916 out_mknod_unlock:
917 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
918 path_put(&nd.path);
919 out_mknod_parent:
920 if (err == -EEXIST)
921 err = -EADDRINUSE;
922 unix_release_addr(addr);
923 goto out_up;
924 }
925
926 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
927 {
928 if (unlikely(sk1 == sk2) || !sk2) {
929 unix_state_lock(sk1);
930 return;
931 }
932 if (sk1 < sk2) {
933 unix_state_lock(sk1);
934 unix_state_lock_nested(sk2);
935 } else {
936 unix_state_lock(sk2);
937 unix_state_lock_nested(sk1);
938 }
939 }
940
941 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
942 {
943 if (unlikely(sk1 == sk2) || !sk2) {
944 unix_state_unlock(sk1);
945 return;
946 }
947 unix_state_unlock(sk1);
948 unix_state_unlock(sk2);
949 }
950
951 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
952 int alen, int flags)
953 {
954 struct sock *sk = sock->sk;
955 struct net *net = sock_net(sk);
956 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
957 struct sock *other;
958 unsigned hash;
959 int err;
960
961 if (addr->sa_family != AF_UNSPEC) {
962 err = unix_mkname(sunaddr, alen, &hash);
963 if (err < 0)
964 goto out;
965 alen = err;
966
967 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
968 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
969 goto out;
970
971 restart:
972 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
973 if (!other)
974 goto out;
975
976 unix_state_double_lock(sk, other);
977
978 /* Apparently VFS overslept socket death. Retry. */
979 if (sock_flag(other, SOCK_DEAD)) {
980 unix_state_double_unlock(sk, other);
981 sock_put(other);
982 goto restart;
983 }
984
985 err = -EPERM;
986 if (!unix_may_send(sk, other))
987 goto out_unlock;
988
989 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
990 if (err)
991 goto out_unlock;
992
993 } else {
994 /*
995 * 1003.1g breaking connected state with AF_UNSPEC
996 */
997 other = NULL;
998 unix_state_double_lock(sk, other);
999 }
1000
1001 /*
1002 * If it was connected, reconnect.
1003 */
1004 if (unix_peer(sk)) {
1005 struct sock *old_peer = unix_peer(sk);
1006 unix_peer(sk) = other;
1007 unix_state_double_unlock(sk, other);
1008
1009 if (other != old_peer)
1010 unix_dgram_disconnected(sk, old_peer);
1011 sock_put(old_peer);
1012 } else {
1013 unix_peer(sk) = other;
1014 unix_state_double_unlock(sk, other);
1015 }
1016 return 0;
1017
1018 out_unlock:
1019 unix_state_double_unlock(sk, other);
1020 sock_put(other);
1021 out:
1022 return err;
1023 }
1024
1025 static long unix_wait_for_peer(struct sock *other, long timeo)
1026 {
1027 struct unix_sock *u = unix_sk(other);
1028 int sched;
1029 DEFINE_WAIT(wait);
1030
1031 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1032
1033 sched = !sock_flag(other, SOCK_DEAD) &&
1034 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1035 unix_recvq_full(other);
1036
1037 unix_state_unlock(other);
1038
1039 if (sched)
1040 timeo = schedule_timeout(timeo);
1041
1042 finish_wait(&u->peer_wait, &wait);
1043 return timeo;
1044 }
1045
1046 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1047 int addr_len, int flags)
1048 {
1049 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1050 struct sock *sk = sock->sk;
1051 struct net *net = sock_net(sk);
1052 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1053 struct sock *newsk = NULL;
1054 struct sock *other = NULL;
1055 struct sk_buff *skb = NULL;
1056 unsigned hash;
1057 int st;
1058 int err;
1059 long timeo;
1060
1061 err = unix_mkname(sunaddr, addr_len, &hash);
1062 if (err < 0)
1063 goto out;
1064 addr_len = err;
1065
1066 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1067 (err = unix_autobind(sock)) != 0)
1068 goto out;
1069
1070 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1071
1072 /* First of all allocate resources.
1073 If we will make it after state is locked,
1074 we will have to recheck all again in any case.
1075 */
1076
1077 err = -ENOMEM;
1078
1079 /* create new sock for complete connection */
1080 newsk = unix_create1(sock_net(sk), NULL);
1081 if (newsk == NULL)
1082 goto out;
1083
1084 /* Allocate skb for sending to listening sock */
1085 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1086 if (skb == NULL)
1087 goto out;
1088
1089 restart:
1090 /* Find listening sock. */
1091 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1092 if (!other)
1093 goto out;
1094
1095 /* Latch state of peer */
1096 unix_state_lock(other);
1097
1098 /* Apparently VFS overslept socket death. Retry. */
1099 if (sock_flag(other, SOCK_DEAD)) {
1100 unix_state_unlock(other);
1101 sock_put(other);
1102 goto restart;
1103 }
1104
1105 err = -ECONNREFUSED;
1106 if (other->sk_state != TCP_LISTEN)
1107 goto out_unlock;
1108 if (other->sk_shutdown & RCV_SHUTDOWN)
1109 goto out_unlock;
1110
1111 if (unix_recvq_full(other)) {
1112 err = -EAGAIN;
1113 if (!timeo)
1114 goto out_unlock;
1115
1116 timeo = unix_wait_for_peer(other, timeo);
1117
1118 err = sock_intr_errno(timeo);
1119 if (signal_pending(current))
1120 goto out;
1121 sock_put(other);
1122 goto restart;
1123 }
1124
1125 /* Latch our state.
1126
1127 It is tricky place. We need to grab our state lock and cannot
1128 drop lock on peer. It is dangerous because deadlock is
1129 possible. Connect to self case and simultaneous
1130 attempt to connect are eliminated by checking socket
1131 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1132 check this before attempt to grab lock.
1133
1134 Well, and we have to recheck the state after socket locked.
1135 */
1136 st = sk->sk_state;
1137
1138 switch (st) {
1139 case TCP_CLOSE:
1140 /* This is ok... continue with connect */
1141 break;
1142 case TCP_ESTABLISHED:
1143 /* Socket is already connected */
1144 err = -EISCONN;
1145 goto out_unlock;
1146 default:
1147 err = -EINVAL;
1148 goto out_unlock;
1149 }
1150
1151 unix_state_lock_nested(sk);
1152
1153 if (sk->sk_state != st) {
1154 unix_state_unlock(sk);
1155 unix_state_unlock(other);
1156 sock_put(other);
1157 goto restart;
1158 }
1159
1160 err = security_unix_stream_connect(sk, other, newsk);
1161 if (err) {
1162 unix_state_unlock(sk);
1163 goto out_unlock;
1164 }
1165
1166 /* The way is open! Fastly set all the necessary fields... */
1167
1168 sock_hold(sk);
1169 unix_peer(newsk) = sk;
1170 newsk->sk_state = TCP_ESTABLISHED;
1171 newsk->sk_type = sk->sk_type;
1172 init_peercred(newsk);
1173 newu = unix_sk(newsk);
1174 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1175 otheru = unix_sk(other);
1176
1177 /* copy address information from listening to new sock*/
1178 if (otheru->addr) {
1179 atomic_inc(&otheru->addr->refcnt);
1180 newu->addr = otheru->addr;
1181 }
1182 if (otheru->dentry) {
1183 newu->dentry = dget(otheru->dentry);
1184 newu->mnt = mntget(otheru->mnt);
1185 }
1186
1187 /* Set credentials */
1188 copy_peercred(sk, other);
1189
1190 sock->state = SS_CONNECTED;
1191 sk->sk_state = TCP_ESTABLISHED;
1192 sock_hold(newsk);
1193
1194 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1195 unix_peer(sk) = newsk;
1196
1197 unix_state_unlock(sk);
1198
1199 /* take ten and and send info to listening sock */
1200 spin_lock(&other->sk_receive_queue.lock);
1201 __skb_queue_tail(&other->sk_receive_queue, skb);
1202 spin_unlock(&other->sk_receive_queue.lock);
1203 unix_state_unlock(other);
1204 other->sk_data_ready(other, 0);
1205 sock_put(other);
1206 return 0;
1207
1208 out_unlock:
1209 if (other)
1210 unix_state_unlock(other);
1211
1212 out:
1213 kfree_skb(skb);
1214 if (newsk)
1215 unix_release_sock(newsk, 0);
1216 if (other)
1217 sock_put(other);
1218 return err;
1219 }
1220
1221 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1222 {
1223 struct sock *ska = socka->sk, *skb = sockb->sk;
1224
1225 /* Join our sockets back to back */
1226 sock_hold(ska);
1227 sock_hold(skb);
1228 unix_peer(ska) = skb;
1229 unix_peer(skb) = ska;
1230 init_peercred(ska);
1231 init_peercred(skb);
1232
1233 if (ska->sk_type != SOCK_DGRAM) {
1234 ska->sk_state = TCP_ESTABLISHED;
1235 skb->sk_state = TCP_ESTABLISHED;
1236 socka->state = SS_CONNECTED;
1237 sockb->state = SS_CONNECTED;
1238 }
1239 return 0;
1240 }
1241
1242 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1243 {
1244 struct sock *sk = sock->sk;
1245 struct sock *tsk;
1246 struct sk_buff *skb;
1247 int err;
1248
1249 err = -EOPNOTSUPP;
1250 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1251 goto out;
1252
1253 err = -EINVAL;
1254 if (sk->sk_state != TCP_LISTEN)
1255 goto out;
1256
1257 /* If socket state is TCP_LISTEN it cannot change (for now...),
1258 * so that no locks are necessary.
1259 */
1260
1261 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1262 if (!skb) {
1263 /* This means receive shutdown. */
1264 if (err == 0)
1265 err = -EINVAL;
1266 goto out;
1267 }
1268
1269 tsk = skb->sk;
1270 skb_free_datagram(sk, skb);
1271 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1272
1273 /* attach accepted sock to socket */
1274 unix_state_lock(tsk);
1275 newsock->state = SS_CONNECTED;
1276 sock_graft(tsk, newsock);
1277 unix_state_unlock(tsk);
1278 return 0;
1279
1280 out:
1281 return err;
1282 }
1283
1284
1285 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1286 {
1287 struct sock *sk = sock->sk;
1288 struct unix_sock *u;
1289 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1290 int err = 0;
1291
1292 if (peer) {
1293 sk = unix_peer_get(sk);
1294
1295 err = -ENOTCONN;
1296 if (!sk)
1297 goto out;
1298 err = 0;
1299 } else {
1300 sock_hold(sk);
1301 }
1302
1303 u = unix_sk(sk);
1304 unix_state_lock(sk);
1305 if (!u->addr) {
1306 sunaddr->sun_family = AF_UNIX;
1307 sunaddr->sun_path[0] = 0;
1308 *uaddr_len = sizeof(short);
1309 } else {
1310 struct unix_address *addr = u->addr;
1311
1312 *uaddr_len = addr->len;
1313 memcpy(sunaddr, addr->name, *uaddr_len);
1314 }
1315 unix_state_unlock(sk);
1316 sock_put(sk);
1317 out:
1318 return err;
1319 }
1320
1321 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1322 {
1323 int i;
1324
1325 scm->fp = UNIXCB(skb).fp;
1326 UNIXCB(skb).fp = NULL;
1327
1328 for (i = scm->fp->count-1; i >= 0; i--)
1329 unix_notinflight(scm->fp->fp[i]);
1330 }
1331
1332 static void unix_destruct_scm(struct sk_buff *skb)
1333 {
1334 struct scm_cookie scm;
1335 memset(&scm, 0, sizeof(scm));
1336 scm.pid = UNIXCB(skb).pid;
1337 scm.cred = UNIXCB(skb).cred;
1338 if (UNIXCB(skb).fp)
1339 unix_detach_fds(&scm, skb);
1340
1341 /* Alas, it calls VFS */
1342 /* So fscking what? fput() had been SMP-safe since the last Summer */
1343 scm_destroy(&scm);
1344 sock_wfree(skb);
1345 }
1346
1347 #define MAX_RECURSION_LEVEL 4
1348
1349 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1350 {
1351 int i;
1352 unsigned char max_level = 0;
1353 int unix_sock_count = 0;
1354
1355 for (i = scm->fp->count - 1; i >= 0; i--) {
1356 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1357
1358 if (sk) {
1359 unix_sock_count++;
1360 max_level = max(max_level,
1361 unix_sk(sk)->recursion_level);
1362 }
1363 }
1364 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1365 return -ETOOMANYREFS;
1366
1367 /*
1368 * Need to duplicate file references for the sake of garbage
1369 * collection. Otherwise a socket in the fps might become a
1370 * candidate for GC while the skb is not yet queued.
1371 */
1372 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1373 if (!UNIXCB(skb).fp)
1374 return -ENOMEM;
1375
1376 if (unix_sock_count) {
1377 for (i = scm->fp->count - 1; i >= 0; i--)
1378 unix_inflight(scm->fp->fp[i]);
1379 }
1380 return max_level;
1381 }
1382
1383 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1384 {
1385 int err = 0;
1386 UNIXCB(skb).pid = get_pid(scm->pid);
1387 UNIXCB(skb).cred = get_cred(scm->cred);
1388 UNIXCB(skb).fp = NULL;
1389 if (scm->fp && send_fds)
1390 err = unix_attach_fds(scm, skb);
1391
1392 skb->destructor = unix_destruct_scm;
1393 return err;
1394 }
1395
1396 /*
1397 * Send AF_UNIX data.
1398 */
1399
1400 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1401 struct msghdr *msg, size_t len)
1402 {
1403 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1404 struct sock *sk = sock->sk;
1405 struct net *net = sock_net(sk);
1406 struct unix_sock *u = unix_sk(sk);
1407 struct sockaddr_un *sunaddr = msg->msg_name;
1408 struct sock *other = NULL;
1409 int namelen = 0; /* fake GCC */
1410 int err;
1411 unsigned hash;
1412 struct sk_buff *skb;
1413 long timeo;
1414 struct scm_cookie tmp_scm;
1415 int max_level;
1416
1417 if (NULL == siocb->scm)
1418 siocb->scm = &tmp_scm;
1419 wait_for_unix_gc();
1420 err = scm_send(sock, msg, siocb->scm);
1421 if (err < 0)
1422 return err;
1423
1424 err = -EOPNOTSUPP;
1425 if (msg->msg_flags&MSG_OOB)
1426 goto out;
1427
1428 if (msg->msg_namelen) {
1429 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1430 if (err < 0)
1431 goto out;
1432 namelen = err;
1433 } else {
1434 sunaddr = NULL;
1435 err = -ENOTCONN;
1436 other = unix_peer_get(sk);
1437 if (!other)
1438 goto out;
1439 }
1440
1441 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1442 && (err = unix_autobind(sock)) != 0)
1443 goto out;
1444
1445 err = -EMSGSIZE;
1446 if (len > sk->sk_sndbuf - 32)
1447 goto out;
1448
1449 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1450 if (skb == NULL)
1451 goto out;
1452
1453 err = unix_scm_to_skb(siocb->scm, skb, true);
1454 if (err < 0)
1455 goto out_free;
1456 max_level = err + 1;
1457 unix_get_secdata(siocb->scm, skb);
1458
1459 skb_reset_transport_header(skb);
1460 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1461 if (err)
1462 goto out_free;
1463
1464 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1465
1466 restart:
1467 if (!other) {
1468 err = -ECONNRESET;
1469 if (sunaddr == NULL)
1470 goto out_free;
1471
1472 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1473 hash, &err);
1474 if (other == NULL)
1475 goto out_free;
1476 }
1477
1478 if (sk_filter(other, skb) < 0) {
1479 /* Toss the packet but do not return any error to the sender */
1480 err = len;
1481 goto out_free;
1482 }
1483
1484 unix_state_lock(other);
1485 err = -EPERM;
1486 if (!unix_may_send(sk, other))
1487 goto out_unlock;
1488
1489 if (sock_flag(other, SOCK_DEAD)) {
1490 /*
1491 * Check with 1003.1g - what should
1492 * datagram error
1493 */
1494 unix_state_unlock(other);
1495 sock_put(other);
1496
1497 err = 0;
1498 unix_state_lock(sk);
1499 if (unix_peer(sk) == other) {
1500 unix_peer(sk) = NULL;
1501 unix_state_unlock(sk);
1502
1503 unix_dgram_disconnected(sk, other);
1504 sock_put(other);
1505 err = -ECONNREFUSED;
1506 } else {
1507 unix_state_unlock(sk);
1508 }
1509
1510 other = NULL;
1511 if (err)
1512 goto out_free;
1513 goto restart;
1514 }
1515
1516 err = -EPIPE;
1517 if (other->sk_shutdown & RCV_SHUTDOWN)
1518 goto out_unlock;
1519
1520 if (sk->sk_type != SOCK_SEQPACKET) {
1521 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1522 if (err)
1523 goto out_unlock;
1524 }
1525
1526 if (unix_peer(other) != sk && unix_recvq_full(other)) {
1527 if (!timeo) {
1528 err = -EAGAIN;
1529 goto out_unlock;
1530 }
1531
1532 timeo = unix_wait_for_peer(other, timeo);
1533
1534 err = sock_intr_errno(timeo);
1535 if (signal_pending(current))
1536 goto out_free;
1537
1538 goto restart;
1539 }
1540
1541 if (sock_flag(other, SOCK_RCVTSTAMP))
1542 __net_timestamp(skb);
1543 skb_queue_tail(&other->sk_receive_queue, skb);
1544 if (max_level > unix_sk(other)->recursion_level)
1545 unix_sk(other)->recursion_level = max_level;
1546 unix_state_unlock(other);
1547 other->sk_data_ready(other, len);
1548 sock_put(other);
1549 scm_destroy(siocb->scm);
1550 return len;
1551
1552 out_unlock:
1553 unix_state_unlock(other);
1554 out_free:
1555 kfree_skb(skb);
1556 out:
1557 if (other)
1558 sock_put(other);
1559 scm_destroy(siocb->scm);
1560 return err;
1561 }
1562
1563
1564 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1565 struct msghdr *msg, size_t len)
1566 {
1567 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1568 struct sock *sk = sock->sk;
1569 struct sock *other = NULL;
1570 int err, size;
1571 struct sk_buff *skb;
1572 int sent = 0;
1573 struct scm_cookie tmp_scm;
1574 bool fds_sent = false;
1575 int max_level;
1576
1577 if (NULL == siocb->scm)
1578 siocb->scm = &tmp_scm;
1579 wait_for_unix_gc();
1580 err = scm_send(sock, msg, siocb->scm);
1581 if (err < 0)
1582 return err;
1583
1584 err = -EOPNOTSUPP;
1585 if (msg->msg_flags&MSG_OOB)
1586 goto out_err;
1587
1588 if (msg->msg_namelen) {
1589 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1590 goto out_err;
1591 } else {
1592 err = -ENOTCONN;
1593 other = unix_peer(sk);
1594 if (!other)
1595 goto out_err;
1596 }
1597
1598 if (sk->sk_shutdown & SEND_SHUTDOWN)
1599 goto pipe_err;
1600
1601 while (sent < len) {
1602 /*
1603 * Optimisation for the fact that under 0.01% of X
1604 * messages typically need breaking up.
1605 */
1606
1607 size = len-sent;
1608
1609 /* Keep two messages in the pipe so it schedules better */
1610 if (size > ((sk->sk_sndbuf >> 1) - 64))
1611 size = (sk->sk_sndbuf >> 1) - 64;
1612
1613 if (size > SKB_MAX_ALLOC)
1614 size = SKB_MAX_ALLOC;
1615
1616 /*
1617 * Grab a buffer
1618 */
1619
1620 skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
1621 &err);
1622
1623 if (skb == NULL)
1624 goto out_err;
1625
1626 /*
1627 * If you pass two values to the sock_alloc_send_skb
1628 * it tries to grab the large buffer with GFP_NOFS
1629 * (which can fail easily), and if it fails grab the
1630 * fallback size buffer which is under a page and will
1631 * succeed. [Alan]
1632 */
1633 size = min_t(int, size, skb_tailroom(skb));
1634
1635
1636 /* Only send the fds in the first buffer */
1637 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1638 if (err < 0) {
1639 kfree_skb(skb);
1640 goto out_err;
1641 }
1642 max_level = err + 1;
1643 fds_sent = true;
1644
1645 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1646 if (err) {
1647 kfree_skb(skb);
1648 goto out_err;
1649 }
1650
1651 unix_state_lock(other);
1652
1653 if (sock_flag(other, SOCK_DEAD) ||
1654 (other->sk_shutdown & RCV_SHUTDOWN))
1655 goto pipe_err_free;
1656
1657 skb_queue_tail(&other->sk_receive_queue, skb);
1658 if (max_level > unix_sk(other)->recursion_level)
1659 unix_sk(other)->recursion_level = max_level;
1660 unix_state_unlock(other);
1661 other->sk_data_ready(other, size);
1662 sent += size;
1663 }
1664
1665 scm_destroy(siocb->scm);
1666 siocb->scm = NULL;
1667
1668 return sent;
1669
1670 pipe_err_free:
1671 unix_state_unlock(other);
1672 kfree_skb(skb);
1673 pipe_err:
1674 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1675 send_sig(SIGPIPE, current, 0);
1676 err = -EPIPE;
1677 out_err:
1678 scm_destroy(siocb->scm);
1679 siocb->scm = NULL;
1680 return sent ? : err;
1681 }
1682
1683 static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1684 struct msghdr *msg, size_t len)
1685 {
1686 int err;
1687 struct sock *sk = sock->sk;
1688
1689 err = sock_error(sk);
1690 if (err)
1691 return err;
1692
1693 if (sk->sk_state != TCP_ESTABLISHED)
1694 return -ENOTCONN;
1695
1696 if (msg->msg_namelen)
1697 msg->msg_namelen = 0;
1698
1699 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1700 }
1701
1702 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1703 {
1704 struct unix_sock *u = unix_sk(sk);
1705
1706 msg->msg_namelen = 0;
1707 if (u->addr) {
1708 msg->msg_namelen = u->addr->len;
1709 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1710 }
1711 }
1712
1713 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1714 struct msghdr *msg, size_t size,
1715 int flags)
1716 {
1717 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1718 struct scm_cookie tmp_scm;
1719 struct sock *sk = sock->sk;
1720 struct unix_sock *u = unix_sk(sk);
1721 int noblock = flags & MSG_DONTWAIT;
1722 struct sk_buff *skb;
1723 int err;
1724
1725 err = -EOPNOTSUPP;
1726 if (flags&MSG_OOB)
1727 goto out;
1728
1729 msg->msg_namelen = 0;
1730
1731 err = mutex_lock_interruptible(&u->readlock);
1732 if (err) {
1733 err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
1734 goto out;
1735 }
1736
1737 skb = skb_recv_datagram(sk, flags, noblock, &err);
1738 if (!skb) {
1739 unix_state_lock(sk);
1740 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1741 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1742 (sk->sk_shutdown & RCV_SHUTDOWN))
1743 err = 0;
1744 unix_state_unlock(sk);
1745 goto out_unlock;
1746 }
1747
1748 wake_up_interruptible_sync_poll(&u->peer_wait,
1749 POLLOUT | POLLWRNORM | POLLWRBAND);
1750
1751 if (msg->msg_name)
1752 unix_copy_addr(msg, skb->sk);
1753
1754 if (size > skb->len)
1755 size = skb->len;
1756 else if (size < skb->len)
1757 msg->msg_flags |= MSG_TRUNC;
1758
1759 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1760 if (err)
1761 goto out_free;
1762
1763 if (sock_flag(sk, SOCK_RCVTSTAMP))
1764 __sock_recv_timestamp(msg, sk, skb);
1765
1766 if (!siocb->scm) {
1767 siocb->scm = &tmp_scm;
1768 memset(&tmp_scm, 0, sizeof(tmp_scm));
1769 }
1770 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1771 unix_set_secdata(siocb->scm, skb);
1772
1773 if (!(flags & MSG_PEEK)) {
1774 if (UNIXCB(skb).fp)
1775 unix_detach_fds(siocb->scm, skb);
1776 } else {
1777 /* It is questionable: on PEEK we could:
1778 - do not return fds - good, but too simple 8)
1779 - return fds, and do not return them on read (old strategy,
1780 apparently wrong)
1781 - clone fds (I chose it for now, it is the most universal
1782 solution)
1783
1784 POSIX 1003.1g does not actually define this clearly
1785 at all. POSIX 1003.1g doesn't define a lot of things
1786 clearly however!
1787
1788 */
1789 if (UNIXCB(skb).fp)
1790 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1791 }
1792 err = size;
1793
1794 scm_recv(sock, msg, siocb->scm, flags);
1795
1796 out_free:
1797 skb_free_datagram(sk, skb);
1798 out_unlock:
1799 mutex_unlock(&u->readlock);
1800 out:
1801 return err;
1802 }
1803
1804 /*
1805 * Sleep until data has arrive. But check for races..
1806 */
1807
1808 static long unix_stream_data_wait(struct sock *sk, long timeo)
1809 {
1810 DEFINE_WAIT(wait);
1811
1812 unix_state_lock(sk);
1813
1814 for (;;) {
1815 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1816
1817 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1818 sk->sk_err ||
1819 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1820 signal_pending(current) ||
1821 !timeo)
1822 break;
1823
1824 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1825 unix_state_unlock(sk);
1826 timeo = schedule_timeout(timeo);
1827 unix_state_lock(sk);
1828 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1829 }
1830
1831 finish_wait(sk_sleep(sk), &wait);
1832 unix_state_unlock(sk);
1833 return timeo;
1834 }
1835
1836
1837
1838 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1839 struct msghdr *msg, size_t size,
1840 int flags)
1841 {
1842 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1843 struct scm_cookie tmp_scm;
1844 struct sock *sk = sock->sk;
1845 struct unix_sock *u = unix_sk(sk);
1846 struct sockaddr_un *sunaddr = msg->msg_name;
1847 int copied = 0;
1848 int check_creds = 0;
1849 int target;
1850 int err = 0;
1851 long timeo;
1852
1853 err = -EINVAL;
1854 if (sk->sk_state != TCP_ESTABLISHED)
1855 goto out;
1856
1857 err = -EOPNOTSUPP;
1858 if (flags&MSG_OOB)
1859 goto out;
1860
1861 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1862 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1863
1864 msg->msg_namelen = 0;
1865
1866 /* Lock the socket to prevent queue disordering
1867 * while sleeps in memcpy_tomsg
1868 */
1869
1870 if (!siocb->scm) {
1871 siocb->scm = &tmp_scm;
1872 memset(&tmp_scm, 0, sizeof(tmp_scm));
1873 }
1874
1875 err = mutex_lock_interruptible(&u->readlock);
1876 if (err) {
1877 err = sock_intr_errno(timeo);
1878 goto out;
1879 }
1880
1881 do {
1882 int chunk;
1883 struct sk_buff *skb;
1884
1885 unix_state_lock(sk);
1886 skb = skb_dequeue(&sk->sk_receive_queue);
1887 if (skb == NULL) {
1888 unix_sk(sk)->recursion_level = 0;
1889 if (copied >= target)
1890 goto unlock;
1891
1892 /*
1893 * POSIX 1003.1g mandates this order.
1894 */
1895
1896 err = sock_error(sk);
1897 if (err)
1898 goto unlock;
1899 if (sk->sk_shutdown & RCV_SHUTDOWN)
1900 goto unlock;
1901
1902 unix_state_unlock(sk);
1903 err = -EAGAIN;
1904 if (!timeo)
1905 break;
1906 mutex_unlock(&u->readlock);
1907
1908 timeo = unix_stream_data_wait(sk, timeo);
1909
1910 if (signal_pending(current)
1911 || mutex_lock_interruptible(&u->readlock)) {
1912 err = sock_intr_errno(timeo);
1913 goto out;
1914 }
1915
1916 continue;
1917 unlock:
1918 unix_state_unlock(sk);
1919 break;
1920 }
1921 unix_state_unlock(sk);
1922
1923 if (check_creds) {
1924 /* Never glue messages from different writers */
1925 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
1926 (UNIXCB(skb).cred != siocb->scm->cred)) {
1927 skb_queue_head(&sk->sk_receive_queue, skb);
1928 break;
1929 }
1930 } else {
1931 /* Copy credentials */
1932 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1933 check_creds = 1;
1934 }
1935
1936 /* Copy address just once */
1937 if (sunaddr) {
1938 unix_copy_addr(msg, skb->sk);
1939 sunaddr = NULL;
1940 }
1941
1942 chunk = min_t(unsigned int, skb->len, size);
1943 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1944 skb_queue_head(&sk->sk_receive_queue, skb);
1945 if (copied == 0)
1946 copied = -EFAULT;
1947 break;
1948 }
1949 copied += chunk;
1950 size -= chunk;
1951
1952 /* Mark read part of skb as used */
1953 if (!(flags & MSG_PEEK)) {
1954 skb_pull(skb, chunk);
1955
1956 if (UNIXCB(skb).fp)
1957 unix_detach_fds(siocb->scm, skb);
1958
1959 /* put the skb back if we didn't use it up.. */
1960 if (skb->len) {
1961 skb_queue_head(&sk->sk_receive_queue, skb);
1962 break;
1963 }
1964
1965 consume_skb(skb);
1966
1967 if (siocb->scm->fp)
1968 break;
1969 } else {
1970 /* It is questionable, see note in unix_dgram_recvmsg.
1971 */
1972 if (UNIXCB(skb).fp)
1973 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1974
1975 /* put message back and return */
1976 skb_queue_head(&sk->sk_receive_queue, skb);
1977 break;
1978 }
1979 } while (size);
1980
1981 mutex_unlock(&u->readlock);
1982 scm_recv(sock, msg, siocb->scm, flags);
1983 out:
1984 return copied ? : err;
1985 }
1986
1987 static int unix_shutdown(struct socket *sock, int mode)
1988 {
1989 struct sock *sk = sock->sk;
1990 struct sock *other;
1991
1992 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
1993
1994 if (!mode)
1995 return 0;
1996
1997 unix_state_lock(sk);
1998 sk->sk_shutdown |= mode;
1999 other = unix_peer(sk);
2000 if (other)
2001 sock_hold(other);
2002 unix_state_unlock(sk);
2003 sk->sk_state_change(sk);
2004
2005 if (other &&
2006 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2007
2008 int peer_mode = 0;
2009
2010 if (mode&RCV_SHUTDOWN)
2011 peer_mode |= SEND_SHUTDOWN;
2012 if (mode&SEND_SHUTDOWN)
2013 peer_mode |= RCV_SHUTDOWN;
2014 unix_state_lock(other);
2015 other->sk_shutdown |= peer_mode;
2016 unix_state_unlock(other);
2017 other->sk_state_change(other);
2018 if (peer_mode == SHUTDOWN_MASK)
2019 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2020 else if (peer_mode & RCV_SHUTDOWN)
2021 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2022 }
2023 if (other)
2024 sock_put(other);
2025
2026 return 0;
2027 }
2028
2029 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2030 {
2031 struct sock *sk = sock->sk;
2032 long amount = 0;
2033 int err;
2034
2035 switch (cmd) {
2036 case SIOCOUTQ:
2037 amount = sk_wmem_alloc_get(sk);
2038 err = put_user(amount, (int __user *)arg);
2039 break;
2040 case SIOCINQ:
2041 {
2042 struct sk_buff *skb;
2043
2044 if (sk->sk_state == TCP_LISTEN) {
2045 err = -EINVAL;
2046 break;
2047 }
2048
2049 spin_lock(&sk->sk_receive_queue.lock);
2050 if (sk->sk_type == SOCK_STREAM ||
2051 sk->sk_type == SOCK_SEQPACKET) {
2052 skb_queue_walk(&sk->sk_receive_queue, skb)
2053 amount += skb->len;
2054 } else {
2055 skb = skb_peek(&sk->sk_receive_queue);
2056 if (skb)
2057 amount = skb->len;
2058 }
2059 spin_unlock(&sk->sk_receive_queue.lock);
2060 err = put_user(amount, (int __user *)arg);
2061 break;
2062 }
2063
2064 default:
2065 err = -ENOIOCTLCMD;
2066 break;
2067 }
2068 return err;
2069 }
2070
2071 static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2072 {
2073 struct sock *sk = sock->sk;
2074 unsigned int mask;
2075
2076 sock_poll_wait(file, sk_sleep(sk), wait);
2077 mask = 0;
2078
2079 /* exceptional events? */
2080 if (sk->sk_err)
2081 mask |= POLLERR;
2082 if (sk->sk_shutdown == SHUTDOWN_MASK)
2083 mask |= POLLHUP;
2084 if (sk->sk_shutdown & RCV_SHUTDOWN)
2085 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2086
2087 /* readable? */
2088 if (!skb_queue_empty(&sk->sk_receive_queue))
2089 mask |= POLLIN | POLLRDNORM;
2090
2091 /* Connection-based need to check for termination and startup */
2092 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2093 sk->sk_state == TCP_CLOSE)
2094 mask |= POLLHUP;
2095
2096 /*
2097 * we set writable also when the other side has shut down the
2098 * connection. This prevents stuck sockets.
2099 */
2100 if (unix_writable(sk))
2101 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2102
2103 return mask;
2104 }
2105
2106 static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2107 poll_table *wait)
2108 {
2109 struct sock *sk = sock->sk, *other;
2110 unsigned int mask, writable;
2111
2112 sock_poll_wait(file, sk_sleep(sk), wait);
2113 mask = 0;
2114
2115 /* exceptional events? */
2116 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2117 mask |= POLLERR;
2118 if (sk->sk_shutdown & RCV_SHUTDOWN)
2119 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2120 if (sk->sk_shutdown == SHUTDOWN_MASK)
2121 mask |= POLLHUP;
2122
2123 /* readable? */
2124 if (!skb_queue_empty(&sk->sk_receive_queue))
2125 mask |= POLLIN | POLLRDNORM;
2126
2127 /* Connection-based need to check for termination and startup */
2128 if (sk->sk_type == SOCK_SEQPACKET) {
2129 if (sk->sk_state == TCP_CLOSE)
2130 mask |= POLLHUP;
2131 /* connection hasn't started yet? */
2132 if (sk->sk_state == TCP_SYN_SENT)
2133 return mask;
2134 }
2135
2136 /* No write status requested, avoid expensive OUT tests. */
2137 if (wait && !(wait->key & (POLLWRBAND | POLLWRNORM | POLLOUT)))
2138 return mask;
2139
2140 writable = unix_writable(sk);
2141 other = unix_peer_get(sk);
2142 if (other) {
2143 if (unix_peer(other) != sk) {
2144 sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
2145 if (unix_recvq_full(other))
2146 writable = 0;
2147 }
2148 sock_put(other);
2149 }
2150
2151 if (writable)
2152 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2153 else
2154 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2155
2156 return mask;
2157 }
2158
2159 #ifdef CONFIG_PROC_FS
2160 static struct sock *first_unix_socket(int *i)
2161 {
2162 for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
2163 if (!hlist_empty(&unix_socket_table[*i]))
2164 return __sk_head(&unix_socket_table[*i]);
2165 }
2166 return NULL;
2167 }
2168
2169 static struct sock *next_unix_socket(int *i, struct sock *s)
2170 {
2171 struct sock *next = sk_next(s);
2172 /* More in this chain? */
2173 if (next)
2174 return next;
2175 /* Look for next non-empty chain. */
2176 for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
2177 if (!hlist_empty(&unix_socket_table[*i]))
2178 return __sk_head(&unix_socket_table[*i]);
2179 }
2180 return NULL;
2181 }
2182
2183 struct unix_iter_state {
2184 struct seq_net_private p;
2185 int i;
2186 };
2187
2188 static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
2189 {
2190 struct unix_iter_state *iter = seq->private;
2191 loff_t off = 0;
2192 struct sock *s;
2193
2194 for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
2195 if (sock_net(s) != seq_file_net(seq))
2196 continue;
2197 if (off == pos)
2198 return s;
2199 ++off;
2200 }
2201 return NULL;
2202 }
2203
2204 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2205 __acquires(unix_table_lock)
2206 {
2207 spin_lock(&unix_table_lock);
2208 return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2209 }
2210
2211 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2212 {
2213 struct unix_iter_state *iter = seq->private;
2214 struct sock *sk = v;
2215 ++*pos;
2216
2217 if (v == SEQ_START_TOKEN)
2218 sk = first_unix_socket(&iter->i);
2219 else
2220 sk = next_unix_socket(&iter->i, sk);
2221 while (sk && (sock_net(sk) != seq_file_net(seq)))
2222 sk = next_unix_socket(&iter->i, sk);
2223 return sk;
2224 }
2225
2226 static void unix_seq_stop(struct seq_file *seq, void *v)
2227 __releases(unix_table_lock)
2228 {
2229 spin_unlock(&unix_table_lock);
2230 }
2231
2232 static int unix_seq_show(struct seq_file *seq, void *v)
2233 {
2234
2235 if (v == SEQ_START_TOKEN)
2236 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2237 "Inode Path\n");
2238 else {
2239 struct sock *s = v;
2240 struct unix_sock *u = unix_sk(s);
2241 unix_state_lock(s);
2242
2243 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
2244 s,
2245 atomic_read(&s->sk_refcnt),
2246 0,
2247 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2248 s->sk_type,
2249 s->sk_socket ?
2250 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2251 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2252 sock_i_ino(s));
2253
2254 if (u->addr) {
2255 int i, len;
2256 seq_putc(seq, ' ');
2257
2258 i = 0;
2259 len = u->addr->len - sizeof(short);
2260 if (!UNIX_ABSTRACT(s))
2261 len--;
2262 else {
2263 seq_putc(seq, '@');
2264 i++;
2265 }
2266 for ( ; i < len; i++)
2267 seq_putc(seq, u->addr->name->sun_path[i]);
2268 }
2269 unix_state_unlock(s);
2270 seq_putc(seq, '\n');
2271 }
2272
2273 return 0;
2274 }
2275
2276 static const struct seq_operations unix_seq_ops = {
2277 .start = unix_seq_start,
2278 .next = unix_seq_next,
2279 .stop = unix_seq_stop,
2280 .show = unix_seq_show,
2281 };
2282
2283 static int unix_seq_open(struct inode *inode, struct file *file)
2284 {
2285 return seq_open_net(inode, file, &unix_seq_ops,
2286 sizeof(struct unix_iter_state));
2287 }
2288
2289 static const struct file_operations unix_seq_fops = {
2290 .owner = THIS_MODULE,
2291 .open = unix_seq_open,
2292 .read = seq_read,
2293 .llseek = seq_lseek,
2294 .release = seq_release_net,
2295 };
2296
2297 #endif
2298
2299 static const struct net_proto_family unix_family_ops = {
2300 .family = PF_UNIX,
2301 .create = unix_create,
2302 .owner = THIS_MODULE,
2303 };
2304
2305
2306 static int __net_init unix_net_init(struct net *net)
2307 {
2308 int error = -ENOMEM;
2309
2310 net->unx.sysctl_max_dgram_qlen = 10;
2311 if (unix_sysctl_register(net))
2312 goto out;
2313
2314 #ifdef CONFIG_PROC_FS
2315 if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) {
2316 unix_sysctl_unregister(net);
2317 goto out;
2318 }
2319 #endif
2320 error = 0;
2321 out:
2322 return error;
2323 }
2324
2325 static void __net_exit unix_net_exit(struct net *net)
2326 {
2327 unix_sysctl_unregister(net);
2328 proc_net_remove(net, "unix");
2329 }
2330
2331 static struct pernet_operations unix_net_ops = {
2332 .init = unix_net_init,
2333 .exit = unix_net_exit,
2334 };
2335
2336 static int __init af_unix_init(void)
2337 {
2338 int rc = -1;
2339 struct sk_buff *dummy_skb;
2340
2341 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
2342
2343 rc = proto_register(&unix_proto, 1);
2344 if (rc != 0) {
2345 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2346 __func__);
2347 goto out;
2348 }
2349
2350 sock_register(&unix_family_ops);
2351 register_pernet_subsys(&unix_net_ops);
2352 out:
2353 return rc;
2354 }
2355
2356 static void __exit af_unix_exit(void)
2357 {
2358 sock_unregister(PF_UNIX);
2359 proto_unregister(&unix_proto);
2360 unregister_pernet_subsys(&unix_net_ops);
2361 }
2362
2363 /* Earlier than device_initcall() so that other drivers invoking
2364 request_module() don't end up in a loop when modprobe tries
2365 to use a UNIX socket. But later than subsys_initcall() because
2366 we depend on stuff initialised there */
2367 fs_initcall(af_unix_init);
2368 module_exit(af_unix_exit);
2369
2370 MODULE_LICENSE("GPL");
2371 MODULE_ALIAS_NETPROTO(PF_UNIX);
This page took 0.088614 seconds and 5 git commands to generate.