tracing: extend sched_pi_setprio
[deliverable/linux.git] / fs / pipe.c
1 /*
2 * linux/fs/pipe.c
3 *
4 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
5 */
6
7 #include <linux/mm.h>
8 #include <linux/file.h>
9 #include <linux/poll.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/fs.h>
14 #include <linux/log2.h>
15 #include <linux/mount.h>
16 #include <linux/magic.h>
17 #include <linux/pipe_fs_i.h>
18 #include <linux/uio.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/audit.h>
22 #include <linux/syscalls.h>
23 #include <linux/fcntl.h>
24 #include <linux/memcontrol.h>
25
26 #include <asm/uaccess.h>
27 #include <asm/ioctls.h>
28
29 #include "internal.h"
30
31 /*
32 * The max size that a non-root user is allowed to grow the pipe. Can
33 * be set by root in /proc/sys/fs/pipe-max-size
34 */
35 unsigned int pipe_max_size = 1048576;
36
37 /*
38 * Minimum pipe size, as required by POSIX
39 */
40 unsigned int pipe_min_size = PAGE_SIZE;
41
42 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
43 * matches default values.
44 */
45 unsigned long pipe_user_pages_hard;
46 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
47
48 /*
49 * We use a start+len construction, which provides full use of the
50 * allocated memory.
51 * -- Florian Coosmann (FGC)
52 *
53 * Reads with count = 0 should always return 0.
54 * -- Julian Bradfield 1999-06-07.
55 *
56 * FIFOs and Pipes now generate SIGIO for both readers and writers.
57 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
58 *
59 * pipe_read & write cleanup
60 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
61 */
62
63 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
64 {
65 if (pipe->files)
66 mutex_lock_nested(&pipe->mutex, subclass);
67 }
68
69 void pipe_lock(struct pipe_inode_info *pipe)
70 {
71 /*
72 * pipe_lock() nests non-pipe inode locks (for writing to a file)
73 */
74 pipe_lock_nested(pipe, I_MUTEX_PARENT);
75 }
76 EXPORT_SYMBOL(pipe_lock);
77
78 void pipe_unlock(struct pipe_inode_info *pipe)
79 {
80 if (pipe->files)
81 mutex_unlock(&pipe->mutex);
82 }
83 EXPORT_SYMBOL(pipe_unlock);
84
85 static inline void __pipe_lock(struct pipe_inode_info *pipe)
86 {
87 mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
88 }
89
90 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
91 {
92 mutex_unlock(&pipe->mutex);
93 }
94
95 void pipe_double_lock(struct pipe_inode_info *pipe1,
96 struct pipe_inode_info *pipe2)
97 {
98 BUG_ON(pipe1 == pipe2);
99
100 if (pipe1 < pipe2) {
101 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
102 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
103 } else {
104 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
105 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
106 }
107 }
108
109 /* Drop the inode semaphore and wait for a pipe event, atomically */
110 void pipe_wait(struct pipe_inode_info *pipe)
111 {
112 DEFINE_WAIT(wait);
113
114 /*
115 * Pipes are system-local resources, so sleeping on them
116 * is considered a noninteractive wait:
117 */
118 prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
119 pipe_unlock(pipe);
120 schedule();
121 finish_wait(&pipe->wait, &wait);
122 pipe_lock(pipe);
123 }
124
125 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
126 struct pipe_buffer *buf)
127 {
128 struct page *page = buf->page;
129
130 /*
131 * If nobody else uses this page, and we don't already have a
132 * temporary page, let's keep track of it as a one-deep
133 * allocation cache. (Otherwise just release our reference to it)
134 */
135 if (page_count(page) == 1 && !pipe->tmp_page)
136 pipe->tmp_page = page;
137 else
138 put_page(page);
139 }
140
141 static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
142 struct pipe_buffer *buf)
143 {
144 struct page *page = buf->page;
145
146 if (page_count(page) == 1) {
147 if (memcg_kmem_enabled())
148 memcg_kmem_uncharge(page, 0);
149 __SetPageLocked(page);
150 return 0;
151 }
152 return 1;
153 }
154
155 /**
156 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
157 * @pipe: the pipe that the buffer belongs to
158 * @buf: the buffer to attempt to steal
159 *
160 * Description:
161 * This function attempts to steal the &struct page attached to
162 * @buf. If successful, this function returns 0 and returns with
163 * the page locked. The caller may then reuse the page for whatever
164 * he wishes; the typical use is insertion into a different file
165 * page cache.
166 */
167 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
168 struct pipe_buffer *buf)
169 {
170 struct page *page = buf->page;
171
172 /*
173 * A reference of one is golden, that means that the owner of this
174 * page is the only one holding a reference to it. lock the page
175 * and return OK.
176 */
177 if (page_count(page) == 1) {
178 lock_page(page);
179 return 0;
180 }
181
182 return 1;
183 }
184 EXPORT_SYMBOL(generic_pipe_buf_steal);
185
186 /**
187 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
188 * @pipe: the pipe that the buffer belongs to
189 * @buf: the buffer to get a reference to
190 *
191 * Description:
192 * This function grabs an extra reference to @buf. It's used in
193 * in the tee() system call, when we duplicate the buffers in one
194 * pipe into another.
195 */
196 void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
197 {
198 get_page(buf->page);
199 }
200 EXPORT_SYMBOL(generic_pipe_buf_get);
201
202 /**
203 * generic_pipe_buf_confirm - verify contents of the pipe buffer
204 * @info: the pipe that the buffer belongs to
205 * @buf: the buffer to confirm
206 *
207 * Description:
208 * This function does nothing, because the generic pipe code uses
209 * pages that are always good when inserted into the pipe.
210 */
211 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
212 struct pipe_buffer *buf)
213 {
214 return 0;
215 }
216 EXPORT_SYMBOL(generic_pipe_buf_confirm);
217
218 /**
219 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
220 * @pipe: the pipe that the buffer belongs to
221 * @buf: the buffer to put a reference to
222 *
223 * Description:
224 * This function releases a reference to @buf.
225 */
226 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
227 struct pipe_buffer *buf)
228 {
229 put_page(buf->page);
230 }
231 EXPORT_SYMBOL(generic_pipe_buf_release);
232
233 static const struct pipe_buf_operations anon_pipe_buf_ops = {
234 .can_merge = 1,
235 .confirm = generic_pipe_buf_confirm,
236 .release = anon_pipe_buf_release,
237 .steal = anon_pipe_buf_steal,
238 .get = generic_pipe_buf_get,
239 };
240
241 static const struct pipe_buf_operations packet_pipe_buf_ops = {
242 .can_merge = 0,
243 .confirm = generic_pipe_buf_confirm,
244 .release = anon_pipe_buf_release,
245 .steal = anon_pipe_buf_steal,
246 .get = generic_pipe_buf_get,
247 };
248
249 static ssize_t
250 pipe_read(struct kiocb *iocb, struct iov_iter *to)
251 {
252 size_t total_len = iov_iter_count(to);
253 struct file *filp = iocb->ki_filp;
254 struct pipe_inode_info *pipe = filp->private_data;
255 int do_wakeup;
256 ssize_t ret;
257
258 /* Null read succeeds. */
259 if (unlikely(total_len == 0))
260 return 0;
261
262 do_wakeup = 0;
263 ret = 0;
264 __pipe_lock(pipe);
265 for (;;) {
266 int bufs = pipe->nrbufs;
267 if (bufs) {
268 int curbuf = pipe->curbuf;
269 struct pipe_buffer *buf = pipe->bufs + curbuf;
270 const struct pipe_buf_operations *ops = buf->ops;
271 size_t chars = buf->len;
272 size_t written;
273 int error;
274
275 if (chars > total_len)
276 chars = total_len;
277
278 error = ops->confirm(pipe, buf);
279 if (error) {
280 if (!ret)
281 ret = error;
282 break;
283 }
284
285 written = copy_page_to_iter(buf->page, buf->offset, chars, to);
286 if (unlikely(written < chars)) {
287 if (!ret)
288 ret = -EFAULT;
289 break;
290 }
291 ret += chars;
292 buf->offset += chars;
293 buf->len -= chars;
294
295 /* Was it a packet buffer? Clean up and exit */
296 if (buf->flags & PIPE_BUF_FLAG_PACKET) {
297 total_len = chars;
298 buf->len = 0;
299 }
300
301 if (!buf->len) {
302 buf->ops = NULL;
303 ops->release(pipe, buf);
304 curbuf = (curbuf + 1) & (pipe->buffers - 1);
305 pipe->curbuf = curbuf;
306 pipe->nrbufs = --bufs;
307 do_wakeup = 1;
308 }
309 total_len -= chars;
310 if (!total_len)
311 break; /* common path: read succeeded */
312 }
313 if (bufs) /* More to do? */
314 continue;
315 if (!pipe->writers)
316 break;
317 if (!pipe->waiting_writers) {
318 /* syscall merging: Usually we must not sleep
319 * if O_NONBLOCK is set, or if we got some data.
320 * But if a writer sleeps in kernel space, then
321 * we can wait for that data without violating POSIX.
322 */
323 if (ret)
324 break;
325 if (filp->f_flags & O_NONBLOCK) {
326 ret = -EAGAIN;
327 break;
328 }
329 }
330 if (signal_pending(current)) {
331 if (!ret)
332 ret = -ERESTARTSYS;
333 break;
334 }
335 if (do_wakeup) {
336 wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
337 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
338 }
339 pipe_wait(pipe);
340 }
341 __pipe_unlock(pipe);
342
343 /* Signal writers asynchronously that there is more room. */
344 if (do_wakeup) {
345 wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
346 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
347 }
348 if (ret > 0)
349 file_accessed(filp);
350 return ret;
351 }
352
353 static inline int is_packetized(struct file *file)
354 {
355 return (file->f_flags & O_DIRECT) != 0;
356 }
357
358 static ssize_t
359 pipe_write(struct kiocb *iocb, struct iov_iter *from)
360 {
361 struct file *filp = iocb->ki_filp;
362 struct pipe_inode_info *pipe = filp->private_data;
363 ssize_t ret = 0;
364 int do_wakeup = 0;
365 size_t total_len = iov_iter_count(from);
366 ssize_t chars;
367
368 /* Null write succeeds. */
369 if (unlikely(total_len == 0))
370 return 0;
371
372 __pipe_lock(pipe);
373
374 if (!pipe->readers) {
375 send_sig(SIGPIPE, current, 0);
376 ret = -EPIPE;
377 goto out;
378 }
379
380 /* We try to merge small writes */
381 chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
382 if (pipe->nrbufs && chars != 0) {
383 int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
384 (pipe->buffers - 1);
385 struct pipe_buffer *buf = pipe->bufs + lastbuf;
386 const struct pipe_buf_operations *ops = buf->ops;
387 int offset = buf->offset + buf->len;
388
389 if (ops->can_merge && offset + chars <= PAGE_SIZE) {
390 ret = ops->confirm(pipe, buf);
391 if (ret)
392 goto out;
393
394 ret = copy_page_from_iter(buf->page, offset, chars, from);
395 if (unlikely(ret < chars)) {
396 ret = -EFAULT;
397 goto out;
398 }
399 do_wakeup = 1;
400 buf->len += ret;
401 if (!iov_iter_count(from))
402 goto out;
403 }
404 }
405
406 for (;;) {
407 int bufs;
408
409 if (!pipe->readers) {
410 send_sig(SIGPIPE, current, 0);
411 if (!ret)
412 ret = -EPIPE;
413 break;
414 }
415 bufs = pipe->nrbufs;
416 if (bufs < pipe->buffers) {
417 int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
418 struct pipe_buffer *buf = pipe->bufs + newbuf;
419 struct page *page = pipe->tmp_page;
420 int copied;
421
422 if (!page) {
423 page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
424 if (unlikely(!page)) {
425 ret = ret ? : -ENOMEM;
426 break;
427 }
428 pipe->tmp_page = page;
429 }
430 /* Always wake up, even if the copy fails. Otherwise
431 * we lock up (O_NONBLOCK-)readers that sleep due to
432 * syscall merging.
433 * FIXME! Is this really true?
434 */
435 do_wakeup = 1;
436 copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
437 if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
438 if (!ret)
439 ret = -EFAULT;
440 break;
441 }
442 ret += copied;
443
444 /* Insert it into the buffer array */
445 buf->page = page;
446 buf->ops = &anon_pipe_buf_ops;
447 buf->offset = 0;
448 buf->len = copied;
449 buf->flags = 0;
450 if (is_packetized(filp)) {
451 buf->ops = &packet_pipe_buf_ops;
452 buf->flags = PIPE_BUF_FLAG_PACKET;
453 }
454 pipe->nrbufs = ++bufs;
455 pipe->tmp_page = NULL;
456
457 if (!iov_iter_count(from))
458 break;
459 }
460 if (bufs < pipe->buffers)
461 continue;
462 if (filp->f_flags & O_NONBLOCK) {
463 if (!ret)
464 ret = -EAGAIN;
465 break;
466 }
467 if (signal_pending(current)) {
468 if (!ret)
469 ret = -ERESTARTSYS;
470 break;
471 }
472 if (do_wakeup) {
473 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
474 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
475 do_wakeup = 0;
476 }
477 pipe->waiting_writers++;
478 pipe_wait(pipe);
479 pipe->waiting_writers--;
480 }
481 out:
482 __pipe_unlock(pipe);
483 if (do_wakeup) {
484 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
485 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
486 }
487 if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
488 int err = file_update_time(filp);
489 if (err)
490 ret = err;
491 sb_end_write(file_inode(filp)->i_sb);
492 }
493 return ret;
494 }
495
496 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
497 {
498 struct pipe_inode_info *pipe = filp->private_data;
499 int count, buf, nrbufs;
500
501 switch (cmd) {
502 case FIONREAD:
503 __pipe_lock(pipe);
504 count = 0;
505 buf = pipe->curbuf;
506 nrbufs = pipe->nrbufs;
507 while (--nrbufs >= 0) {
508 count += pipe->bufs[buf].len;
509 buf = (buf+1) & (pipe->buffers - 1);
510 }
511 __pipe_unlock(pipe);
512
513 return put_user(count, (int __user *)arg);
514 default:
515 return -ENOIOCTLCMD;
516 }
517 }
518
519 /* No kernel lock held - fine */
520 static unsigned int
521 pipe_poll(struct file *filp, poll_table *wait)
522 {
523 unsigned int mask;
524 struct pipe_inode_info *pipe = filp->private_data;
525 int nrbufs;
526
527 poll_wait(filp, &pipe->wait, wait);
528
529 /* Reading only -- no need for acquiring the semaphore. */
530 nrbufs = pipe->nrbufs;
531 mask = 0;
532 if (filp->f_mode & FMODE_READ) {
533 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
534 if (!pipe->writers && filp->f_version != pipe->w_counter)
535 mask |= POLLHUP;
536 }
537
538 if (filp->f_mode & FMODE_WRITE) {
539 mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
540 /*
541 * Most Unices do not set POLLERR for FIFOs but on Linux they
542 * behave exactly like pipes for poll().
543 */
544 if (!pipe->readers)
545 mask |= POLLERR;
546 }
547
548 return mask;
549 }
550
551 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
552 {
553 int kill = 0;
554
555 spin_lock(&inode->i_lock);
556 if (!--pipe->files) {
557 inode->i_pipe = NULL;
558 kill = 1;
559 }
560 spin_unlock(&inode->i_lock);
561
562 if (kill)
563 free_pipe_info(pipe);
564 }
565
566 static int
567 pipe_release(struct inode *inode, struct file *file)
568 {
569 struct pipe_inode_info *pipe = file->private_data;
570
571 __pipe_lock(pipe);
572 if (file->f_mode & FMODE_READ)
573 pipe->readers--;
574 if (file->f_mode & FMODE_WRITE)
575 pipe->writers--;
576
577 if (pipe->readers || pipe->writers) {
578 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
579 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
580 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
581 }
582 __pipe_unlock(pipe);
583
584 put_pipe_info(inode, pipe);
585 return 0;
586 }
587
588 static int
589 pipe_fasync(int fd, struct file *filp, int on)
590 {
591 struct pipe_inode_info *pipe = filp->private_data;
592 int retval = 0;
593
594 __pipe_lock(pipe);
595 if (filp->f_mode & FMODE_READ)
596 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
597 if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
598 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
599 if (retval < 0 && (filp->f_mode & FMODE_READ))
600 /* this can happen only if on == T */
601 fasync_helper(-1, filp, 0, &pipe->fasync_readers);
602 }
603 __pipe_unlock(pipe);
604 return retval;
605 }
606
607 static unsigned long account_pipe_buffers(struct user_struct *user,
608 unsigned long old, unsigned long new)
609 {
610 return atomic_long_add_return(new - old, &user->pipe_bufs);
611 }
612
613 static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
614 {
615 return pipe_user_pages_soft && user_bufs >= pipe_user_pages_soft;
616 }
617
618 static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
619 {
620 return pipe_user_pages_hard && user_bufs >= pipe_user_pages_hard;
621 }
622
623 struct pipe_inode_info *alloc_pipe_info(void)
624 {
625 struct pipe_inode_info *pipe;
626 unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
627 struct user_struct *user = get_current_user();
628 unsigned long user_bufs;
629
630 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
631 if (pipe == NULL)
632 goto out_free_uid;
633
634 if (pipe_bufs * PAGE_SIZE > pipe_max_size && !capable(CAP_SYS_RESOURCE))
635 pipe_bufs = pipe_max_size >> PAGE_SHIFT;
636
637 user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
638
639 if (too_many_pipe_buffers_soft(user_bufs)) {
640 user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
641 pipe_bufs = 1;
642 }
643
644 if (too_many_pipe_buffers_hard(user_bufs))
645 goto out_revert_acct;
646
647 pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
648 GFP_KERNEL_ACCOUNT);
649
650 if (pipe->bufs) {
651 init_waitqueue_head(&pipe->wait);
652 pipe->r_counter = pipe->w_counter = 1;
653 pipe->buffers = pipe_bufs;
654 pipe->user = user;
655 mutex_init(&pipe->mutex);
656 return pipe;
657 }
658
659 out_revert_acct:
660 (void) account_pipe_buffers(user, pipe_bufs, 0);
661 kfree(pipe);
662 out_free_uid:
663 free_uid(user);
664 return NULL;
665 }
666
667 void free_pipe_info(struct pipe_inode_info *pipe)
668 {
669 int i;
670
671 (void) account_pipe_buffers(pipe->user, pipe->buffers, 0);
672 free_uid(pipe->user);
673 for (i = 0; i < pipe->buffers; i++) {
674 struct pipe_buffer *buf = pipe->bufs + i;
675 if (buf->ops)
676 buf->ops->release(pipe, buf);
677 }
678 if (pipe->tmp_page)
679 __free_page(pipe->tmp_page);
680 kfree(pipe->bufs);
681 kfree(pipe);
682 }
683
684 static struct vfsmount *pipe_mnt __read_mostly;
685
686 /*
687 * pipefs_dname() is called from d_path().
688 */
689 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
690 {
691 return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
692 d_inode(dentry)->i_ino);
693 }
694
695 static const struct dentry_operations pipefs_dentry_operations = {
696 .d_dname = pipefs_dname,
697 };
698
699 static struct inode * get_pipe_inode(void)
700 {
701 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
702 struct pipe_inode_info *pipe;
703
704 if (!inode)
705 goto fail_inode;
706
707 inode->i_ino = get_next_ino();
708
709 pipe = alloc_pipe_info();
710 if (!pipe)
711 goto fail_iput;
712
713 inode->i_pipe = pipe;
714 pipe->files = 2;
715 pipe->readers = pipe->writers = 1;
716 inode->i_fop = &pipefifo_fops;
717
718 /*
719 * Mark the inode dirty from the very beginning,
720 * that way it will never be moved to the dirty
721 * list because "mark_inode_dirty()" will think
722 * that it already _is_ on the dirty list.
723 */
724 inode->i_state = I_DIRTY;
725 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
726 inode->i_uid = current_fsuid();
727 inode->i_gid = current_fsgid();
728 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
729
730 return inode;
731
732 fail_iput:
733 iput(inode);
734
735 fail_inode:
736 return NULL;
737 }
738
739 int create_pipe_files(struct file **res, int flags)
740 {
741 int err;
742 struct inode *inode = get_pipe_inode();
743 struct file *f;
744 struct path path;
745 static struct qstr name = { .name = "" };
746
747 if (!inode)
748 return -ENFILE;
749
750 err = -ENOMEM;
751 path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
752 if (!path.dentry)
753 goto err_inode;
754 path.mnt = mntget(pipe_mnt);
755
756 d_instantiate(path.dentry, inode);
757
758 f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops);
759 if (IS_ERR(f)) {
760 err = PTR_ERR(f);
761 goto err_dentry;
762 }
763
764 f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
765 f->private_data = inode->i_pipe;
766
767 res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops);
768 if (IS_ERR(res[0])) {
769 err = PTR_ERR(res[0]);
770 goto err_file;
771 }
772
773 path_get(&path);
774 res[0]->private_data = inode->i_pipe;
775 res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK);
776 res[1] = f;
777 return 0;
778
779 err_file:
780 put_filp(f);
781 err_dentry:
782 free_pipe_info(inode->i_pipe);
783 path_put(&path);
784 return err;
785
786 err_inode:
787 free_pipe_info(inode->i_pipe);
788 iput(inode);
789 return err;
790 }
791
792 static int __do_pipe_flags(int *fd, struct file **files, int flags)
793 {
794 int error;
795 int fdw, fdr;
796
797 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
798 return -EINVAL;
799
800 error = create_pipe_files(files, flags);
801 if (error)
802 return error;
803
804 error = get_unused_fd_flags(flags);
805 if (error < 0)
806 goto err_read_pipe;
807 fdr = error;
808
809 error = get_unused_fd_flags(flags);
810 if (error < 0)
811 goto err_fdr;
812 fdw = error;
813
814 audit_fd_pair(fdr, fdw);
815 fd[0] = fdr;
816 fd[1] = fdw;
817 return 0;
818
819 err_fdr:
820 put_unused_fd(fdr);
821 err_read_pipe:
822 fput(files[0]);
823 fput(files[1]);
824 return error;
825 }
826
827 int do_pipe_flags(int *fd, int flags)
828 {
829 struct file *files[2];
830 int error = __do_pipe_flags(fd, files, flags);
831 if (!error) {
832 fd_install(fd[0], files[0]);
833 fd_install(fd[1], files[1]);
834 }
835 return error;
836 }
837
838 /*
839 * sys_pipe() is the normal C calling standard for creating
840 * a pipe. It's not the way Unix traditionally does this, though.
841 */
842 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
843 {
844 struct file *files[2];
845 int fd[2];
846 int error;
847
848 error = __do_pipe_flags(fd, files, flags);
849 if (!error) {
850 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
851 fput(files[0]);
852 fput(files[1]);
853 put_unused_fd(fd[0]);
854 put_unused_fd(fd[1]);
855 error = -EFAULT;
856 } else {
857 fd_install(fd[0], files[0]);
858 fd_install(fd[1], files[1]);
859 }
860 }
861 return error;
862 }
863
864 SYSCALL_DEFINE1(pipe, int __user *, fildes)
865 {
866 return sys_pipe2(fildes, 0);
867 }
868
869 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
870 {
871 int cur = *cnt;
872
873 while (cur == *cnt) {
874 pipe_wait(pipe);
875 if (signal_pending(current))
876 break;
877 }
878 return cur == *cnt ? -ERESTARTSYS : 0;
879 }
880
881 static void wake_up_partner(struct pipe_inode_info *pipe)
882 {
883 wake_up_interruptible(&pipe->wait);
884 }
885
886 static int fifo_open(struct inode *inode, struct file *filp)
887 {
888 struct pipe_inode_info *pipe;
889 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
890 int ret;
891
892 filp->f_version = 0;
893
894 spin_lock(&inode->i_lock);
895 if (inode->i_pipe) {
896 pipe = inode->i_pipe;
897 pipe->files++;
898 spin_unlock(&inode->i_lock);
899 } else {
900 spin_unlock(&inode->i_lock);
901 pipe = alloc_pipe_info();
902 if (!pipe)
903 return -ENOMEM;
904 pipe->files = 1;
905 spin_lock(&inode->i_lock);
906 if (unlikely(inode->i_pipe)) {
907 inode->i_pipe->files++;
908 spin_unlock(&inode->i_lock);
909 free_pipe_info(pipe);
910 pipe = inode->i_pipe;
911 } else {
912 inode->i_pipe = pipe;
913 spin_unlock(&inode->i_lock);
914 }
915 }
916 filp->private_data = pipe;
917 /* OK, we have a pipe and it's pinned down */
918
919 __pipe_lock(pipe);
920
921 /* We can only do regular read/write on fifos */
922 filp->f_mode &= (FMODE_READ | FMODE_WRITE);
923
924 switch (filp->f_mode) {
925 case FMODE_READ:
926 /*
927 * O_RDONLY
928 * POSIX.1 says that O_NONBLOCK means return with the FIFO
929 * opened, even when there is no process writing the FIFO.
930 */
931 pipe->r_counter++;
932 if (pipe->readers++ == 0)
933 wake_up_partner(pipe);
934
935 if (!is_pipe && !pipe->writers) {
936 if ((filp->f_flags & O_NONBLOCK)) {
937 /* suppress POLLHUP until we have
938 * seen a writer */
939 filp->f_version = pipe->w_counter;
940 } else {
941 if (wait_for_partner(pipe, &pipe->w_counter))
942 goto err_rd;
943 }
944 }
945 break;
946
947 case FMODE_WRITE:
948 /*
949 * O_WRONLY
950 * POSIX.1 says that O_NONBLOCK means return -1 with
951 * errno=ENXIO when there is no process reading the FIFO.
952 */
953 ret = -ENXIO;
954 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
955 goto err;
956
957 pipe->w_counter++;
958 if (!pipe->writers++)
959 wake_up_partner(pipe);
960
961 if (!is_pipe && !pipe->readers) {
962 if (wait_for_partner(pipe, &pipe->r_counter))
963 goto err_wr;
964 }
965 break;
966
967 case FMODE_READ | FMODE_WRITE:
968 /*
969 * O_RDWR
970 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
971 * This implementation will NEVER block on a O_RDWR open, since
972 * the process can at least talk to itself.
973 */
974
975 pipe->readers++;
976 pipe->writers++;
977 pipe->r_counter++;
978 pipe->w_counter++;
979 if (pipe->readers == 1 || pipe->writers == 1)
980 wake_up_partner(pipe);
981 break;
982
983 default:
984 ret = -EINVAL;
985 goto err;
986 }
987
988 /* Ok! */
989 __pipe_unlock(pipe);
990 return 0;
991
992 err_rd:
993 if (!--pipe->readers)
994 wake_up_interruptible(&pipe->wait);
995 ret = -ERESTARTSYS;
996 goto err;
997
998 err_wr:
999 if (!--pipe->writers)
1000 wake_up_interruptible(&pipe->wait);
1001 ret = -ERESTARTSYS;
1002 goto err;
1003
1004 err:
1005 __pipe_unlock(pipe);
1006
1007 put_pipe_info(inode, pipe);
1008 return ret;
1009 }
1010
1011 const struct file_operations pipefifo_fops = {
1012 .open = fifo_open,
1013 .llseek = no_llseek,
1014 .read_iter = pipe_read,
1015 .write_iter = pipe_write,
1016 .poll = pipe_poll,
1017 .unlocked_ioctl = pipe_ioctl,
1018 .release = pipe_release,
1019 .fasync = pipe_fasync,
1020 };
1021
1022 /*
1023 * Currently we rely on the pipe array holding a power-of-2 number
1024 * of pages.
1025 */
1026 static inline unsigned int round_pipe_size(unsigned int size)
1027 {
1028 unsigned long nr_pages;
1029
1030 nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1031 return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
1032 }
1033
1034 /*
1035 * Allocate a new array of pipe buffers and copy the info over. Returns the
1036 * pipe size if successful, or return -ERROR on error.
1037 */
1038 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1039 {
1040 struct pipe_buffer *bufs;
1041 unsigned int size, nr_pages;
1042 unsigned long user_bufs;
1043 long ret = 0;
1044
1045 size = round_pipe_size(arg);
1046 nr_pages = size >> PAGE_SHIFT;
1047
1048 if (!nr_pages)
1049 return -EINVAL;
1050
1051 /*
1052 * If trying to increase the pipe capacity, check that an
1053 * unprivileged user is not trying to exceed various limits
1054 * (soft limit check here, hard limit check just below).
1055 * Decreasing the pipe capacity is always permitted, even
1056 * if the user is currently over a limit.
1057 */
1058 if (nr_pages > pipe->buffers &&
1059 size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1060 return -EPERM;
1061
1062 user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages);
1063
1064 if (nr_pages > pipe->buffers &&
1065 (too_many_pipe_buffers_hard(user_bufs) ||
1066 too_many_pipe_buffers_soft(user_bufs)) &&
1067 !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
1068 ret = -EPERM;
1069 goto out_revert_acct;
1070 }
1071
1072 /*
1073 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1074 * expect a lot of shrink+grow operations, just free and allocate
1075 * again like we would do for growing. If the pipe currently
1076 * contains more buffers than arg, then return busy.
1077 */
1078 if (nr_pages < pipe->nrbufs) {
1079 ret = -EBUSY;
1080 goto out_revert_acct;
1081 }
1082
1083 bufs = kcalloc(nr_pages, sizeof(*bufs),
1084 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1085 if (unlikely(!bufs)) {
1086 ret = -ENOMEM;
1087 goto out_revert_acct;
1088 }
1089
1090 /*
1091 * The pipe array wraps around, so just start the new one at zero
1092 * and adjust the indexes.
1093 */
1094 if (pipe->nrbufs) {
1095 unsigned int tail;
1096 unsigned int head;
1097
1098 tail = pipe->curbuf + pipe->nrbufs;
1099 if (tail < pipe->buffers)
1100 tail = 0;
1101 else
1102 tail &= (pipe->buffers - 1);
1103
1104 head = pipe->nrbufs - tail;
1105 if (head)
1106 memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
1107 if (tail)
1108 memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
1109 }
1110
1111 pipe->curbuf = 0;
1112 kfree(pipe->bufs);
1113 pipe->bufs = bufs;
1114 pipe->buffers = nr_pages;
1115 return nr_pages * PAGE_SIZE;
1116
1117 out_revert_acct:
1118 (void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers);
1119 return ret;
1120 }
1121
1122 /*
1123 * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
1124 * will return an error.
1125 */
1126 int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
1127 size_t *lenp, loff_t *ppos)
1128 {
1129 int ret;
1130
1131 ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
1132 if (ret < 0 || !write)
1133 return ret;
1134
1135 pipe_max_size = round_pipe_size(pipe_max_size);
1136 return ret;
1137 }
1138
1139 /*
1140 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1141 * location, so checking ->i_pipe is not enough to verify that this is a
1142 * pipe.
1143 */
1144 struct pipe_inode_info *get_pipe_info(struct file *file)
1145 {
1146 return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1147 }
1148
1149 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1150 {
1151 struct pipe_inode_info *pipe;
1152 long ret;
1153
1154 pipe = get_pipe_info(file);
1155 if (!pipe)
1156 return -EBADF;
1157
1158 __pipe_lock(pipe);
1159
1160 switch (cmd) {
1161 case F_SETPIPE_SZ:
1162 ret = pipe_set_size(pipe, arg);
1163 break;
1164 case F_GETPIPE_SZ:
1165 ret = pipe->buffers * PAGE_SIZE;
1166 break;
1167 default:
1168 ret = -EINVAL;
1169 break;
1170 }
1171
1172 __pipe_unlock(pipe);
1173 return ret;
1174 }
1175
1176 static const struct super_operations pipefs_ops = {
1177 .destroy_inode = free_inode_nonrcu,
1178 .statfs = simple_statfs,
1179 };
1180
1181 /*
1182 * pipefs should _never_ be mounted by userland - too much of security hassle,
1183 * no real gain from having the whole whorehouse mounted. So we don't need
1184 * any operations on the root directory. However, we need a non-trivial
1185 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1186 */
1187 static struct dentry *pipefs_mount(struct file_system_type *fs_type,
1188 int flags, const char *dev_name, void *data)
1189 {
1190 return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
1191 &pipefs_dentry_operations, PIPEFS_MAGIC);
1192 }
1193
1194 static struct file_system_type pipe_fs_type = {
1195 .name = "pipefs",
1196 .mount = pipefs_mount,
1197 .kill_sb = kill_anon_super,
1198 };
1199
1200 static int __init init_pipe_fs(void)
1201 {
1202 int err = register_filesystem(&pipe_fs_type);
1203
1204 if (!err) {
1205 pipe_mnt = kern_mount(&pipe_fs_type);
1206 if (IS_ERR(pipe_mnt)) {
1207 err = PTR_ERR(pipe_mnt);
1208 unregister_filesystem(&pipe_fs_type);
1209 }
1210 }
1211 return err;
1212 }
1213
1214 fs_initcall(init_pipe_fs);
This page took 0.055495 seconds and 5 git commands to generate.