4 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
8 #include <linux/file.h>
9 #include <linux/poll.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/log2.h>
15 #include <linux/mount.h>
16 #include <linux/pipe_fs_i.h>
17 #include <linux/uio.h>
18 #include <linux/highmem.h>
19 #include <linux/pagemap.h>
20 #include <linux/audit.h>
21 #include <linux/syscalls.h>
22 #include <linux/fcntl.h>
24 #include <asm/uaccess.h>
25 #include <asm/ioctls.h>
28 * The max size that a non-root user is allowed to grow the pipe. Can
29 * be set by root in /proc/sys/fs/pipe-max-size
31 unsigned int pipe_max_size = 1048576;
34 * Minimum pipe size, as required by POSIX
36 unsigned int pipe_min_size = PAGE_SIZE;
39 * We use a start+len construction, which provides full use of the
41 * -- Florian Coosmann (FGC)
43 * Reads with count = 0 should always return 0.
44 * -- Julian Bradfield 1999-06-07.
46 * FIFOs and Pipes now generate SIGIO for both readers and writers.
47 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
49 * pipe_read & write cleanup
50 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
53 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
56 mutex_lock_nested(&pipe->inode->i_mutex, subclass);
59 void pipe_lock(struct pipe_inode_info *pipe)
62 * pipe_lock() nests non-pipe inode locks (for writing to a file)
64 pipe_lock_nested(pipe, I_MUTEX_PARENT);
66 EXPORT_SYMBOL(pipe_lock);
68 void pipe_unlock(struct pipe_inode_info *pipe)
71 mutex_unlock(&pipe->inode->i_mutex);
73 EXPORT_SYMBOL(pipe_unlock);
75 void pipe_double_lock(struct pipe_inode_info *pipe1,
76 struct pipe_inode_info *pipe2)
78 BUG_ON(pipe1 == pipe2);
81 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
82 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
84 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
85 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
89 /* Drop the inode semaphore and wait for a pipe event, atomically */
90 void pipe_wait(struct pipe_inode_info *pipe)
95 * Pipes are system-local resources, so sleeping on them
96 * is considered a noninteractive wait:
98 prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
101 finish_wait(&pipe->wait, &wait);
106 pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
112 while (!iov->iov_len)
114 copy = min_t(unsigned long, len, iov->iov_len);
117 if (__copy_from_user_inatomic(to, iov->iov_base, copy))
120 if (copy_from_user(to, iov->iov_base, copy))
125 iov->iov_base += copy;
126 iov->iov_len -= copy;
132 pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
138 while (!iov->iov_len)
140 copy = min_t(unsigned long, len, iov->iov_len);
143 if (__copy_to_user_inatomic(iov->iov_base, from, copy))
146 if (copy_to_user(iov->iov_base, from, copy))
151 iov->iov_base += copy;
152 iov->iov_len -= copy;
158 * Attempt to pre-fault in the user memory, so we can use atomic copies.
159 * Returns the number of bytes not faulted in.
161 static int iov_fault_in_pages_write(struct iovec *iov, unsigned long len)
163 while (!iov->iov_len)
167 unsigned long this_len;
169 this_len = min_t(unsigned long, len, iov->iov_len);
170 if (fault_in_pages_writeable(iov->iov_base, this_len))
181 * Pre-fault in the user memory, so we can use atomic copies.
183 static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len)
185 while (!iov->iov_len)
189 unsigned long this_len;
191 this_len = min_t(unsigned long, len, iov->iov_len);
192 fault_in_pages_readable(iov->iov_base, this_len);
198 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
199 struct pipe_buffer *buf)
201 struct page *page = buf->page;
204 * If nobody else uses this page, and we don't already have a
205 * temporary page, let's keep track of it as a one-deep
206 * allocation cache. (Otherwise just release our reference to it)
208 if (page_count(page) == 1 && !pipe->tmp_page)
209 pipe->tmp_page = page;
211 page_cache_release(page);
215 * generic_pipe_buf_map - virtually map a pipe buffer
216 * @pipe: the pipe that the buffer belongs to
217 * @buf: the buffer that should be mapped
218 * @atomic: whether to use an atomic map
221 * This function returns a kernel virtual address mapping for the
222 * pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
223 * and the caller has to be careful not to fault before calling
224 * the unmap function.
226 * Note that this function occupies KM_USER0 if @atomic != 0.
228 void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
229 struct pipe_buffer *buf, int atomic)
232 buf->flags |= PIPE_BUF_FLAG_ATOMIC;
233 return kmap_atomic(buf->page, KM_USER0);
236 return kmap(buf->page);
238 EXPORT_SYMBOL(generic_pipe_buf_map);
241 * generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
242 * @pipe: the pipe that the buffer belongs to
243 * @buf: the buffer that should be unmapped
244 * @map_data: the data that the mapping function returned
247 * This function undoes the mapping that ->map() provided.
249 void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
250 struct pipe_buffer *buf, void *map_data)
252 if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
253 buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
254 kunmap_atomic(map_data, KM_USER0);
258 EXPORT_SYMBOL(generic_pipe_buf_unmap);
261 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
262 * @pipe: the pipe that the buffer belongs to
263 * @buf: the buffer to attempt to steal
266 * This function attempts to steal the &struct page attached to
267 * @buf. If successful, this function returns 0 and returns with
268 * the page locked. The caller may then reuse the page for whatever
269 * he wishes; the typical use is insertion into a different file
272 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
273 struct pipe_buffer *buf)
275 struct page *page = buf->page;
278 * A reference of one is golden, that means that the owner of this
279 * page is the only one holding a reference to it. lock the page
282 if (page_count(page) == 1) {
289 EXPORT_SYMBOL(generic_pipe_buf_steal);
292 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
293 * @pipe: the pipe that the buffer belongs to
294 * @buf: the buffer to get a reference to
297 * This function grabs an extra reference to @buf. It's used in
298 * in the tee() system call, when we duplicate the buffers in one
301 void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
303 page_cache_get(buf->page);
305 EXPORT_SYMBOL(generic_pipe_buf_get);
308 * generic_pipe_buf_confirm - verify contents of the pipe buffer
309 * @info: the pipe that the buffer belongs to
310 * @buf: the buffer to confirm
313 * This function does nothing, because the generic pipe code uses
314 * pages that are always good when inserted into the pipe.
316 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
317 struct pipe_buffer *buf)
321 EXPORT_SYMBOL(generic_pipe_buf_confirm);
324 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
325 * @pipe: the pipe that the buffer belongs to
326 * @buf: the buffer to put a reference to
329 * This function releases a reference to @buf.
331 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
332 struct pipe_buffer *buf)
334 page_cache_release(buf->page);
336 EXPORT_SYMBOL(generic_pipe_buf_release);
338 static const struct pipe_buf_operations anon_pipe_buf_ops = {
340 .map = generic_pipe_buf_map,
341 .unmap = generic_pipe_buf_unmap,
342 .confirm = generic_pipe_buf_confirm,
343 .release = anon_pipe_buf_release,
344 .steal = generic_pipe_buf_steal,
345 .get = generic_pipe_buf_get,
348 static const struct pipe_buf_operations packet_pipe_buf_ops = {
350 .map = generic_pipe_buf_map,
351 .unmap = generic_pipe_buf_unmap,
352 .confirm = generic_pipe_buf_confirm,
353 .release = anon_pipe_buf_release,
354 .steal = generic_pipe_buf_steal,
355 .get = generic_pipe_buf_get,
359 pipe_read(struct kiocb *iocb, const struct iovec *_iov,
360 unsigned long nr_segs, loff_t pos)
362 struct file *filp = iocb->ki_filp;
363 struct inode *inode = filp->f_path.dentry->d_inode;
364 struct pipe_inode_info *pipe;
367 struct iovec *iov = (struct iovec *)_iov;
370 total_len = iov_length(iov, nr_segs);
371 /* Null read succeeds. */
372 if (unlikely(total_len == 0))
377 mutex_lock(&inode->i_mutex);
378 pipe = inode->i_pipe;
380 int bufs = pipe->nrbufs;
382 int curbuf = pipe->curbuf;
383 struct pipe_buffer *buf = pipe->bufs + curbuf;
384 const struct pipe_buf_operations *ops = buf->ops;
386 size_t chars = buf->len;
389 if (chars > total_len)
392 error = ops->confirm(pipe, buf);
399 atomic = !iov_fault_in_pages_write(iov, chars);
401 addr = ops->map(pipe, buf, atomic);
402 error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
403 ops->unmap(pipe, buf, addr);
404 if (unlikely(error)) {
406 * Just retry with the slow path if we failed.
417 buf->offset += chars;
420 /* Was it a packet buffer? Clean up and exit */
421 if (buf->flags & PIPE_BUF_FLAG_PACKET) {
428 ops->release(pipe, buf);
429 curbuf = (curbuf + 1) & (pipe->buffers - 1);
430 pipe->curbuf = curbuf;
431 pipe->nrbufs = --bufs;
436 break; /* common path: read succeeded */
438 if (bufs) /* More to do? */
442 if (!pipe->waiting_writers) {
443 /* syscall merging: Usually we must not sleep
444 * if O_NONBLOCK is set, or if we got some data.
445 * But if a writer sleeps in kernel space, then
446 * we can wait for that data without violating POSIX.
450 if (filp->f_flags & O_NONBLOCK) {
455 if (signal_pending(current)) {
461 wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
462 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
466 mutex_unlock(&inode->i_mutex);
468 /* Signal writers asynchronously that there is more room. */
470 wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
471 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
478 static inline int is_packetized(struct file *file)
480 return (file->f_flags & O_DIRECT) != 0;
484 pipe_write(struct kiocb *iocb, const struct iovec *_iov,
485 unsigned long nr_segs, loff_t ppos)
487 struct file *filp = iocb->ki_filp;
488 struct inode *inode = filp->f_path.dentry->d_inode;
489 struct pipe_inode_info *pipe;
492 struct iovec *iov = (struct iovec *)_iov;
496 total_len = iov_length(iov, nr_segs);
497 /* Null write succeeds. */
498 if (unlikely(total_len == 0))
503 mutex_lock(&inode->i_mutex);
504 pipe = inode->i_pipe;
506 if (!pipe->readers) {
507 send_sig(SIGPIPE, current, 0);
512 /* We try to merge small writes */
513 chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
514 if (pipe->nrbufs && chars != 0) {
515 int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
517 struct pipe_buffer *buf = pipe->bufs + lastbuf;
518 const struct pipe_buf_operations *ops = buf->ops;
519 int offset = buf->offset + buf->len;
521 if (ops->can_merge && offset + chars <= PAGE_SIZE) {
522 int error, atomic = 1;
525 error = ops->confirm(pipe, buf);
529 iov_fault_in_pages_read(iov, chars);
531 addr = ops->map(pipe, buf, atomic);
532 error = pipe_iov_copy_from_user(offset + addr, iov,
534 ops->unmap(pipe, buf, addr);
555 if (!pipe->readers) {
556 send_sig(SIGPIPE, current, 0);
562 if (bufs < pipe->buffers) {
563 int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
564 struct pipe_buffer *buf = pipe->bufs + newbuf;
565 struct page *page = pipe->tmp_page;
567 int error, atomic = 1;
570 page = alloc_page(GFP_HIGHUSER);
571 if (unlikely(!page)) {
572 ret = ret ? : -ENOMEM;
575 pipe->tmp_page = page;
577 /* Always wake up, even if the copy fails. Otherwise
578 * we lock up (O_NONBLOCK-)readers that sleep due to
580 * FIXME! Is this really true?
584 if (chars > total_len)
587 iov_fault_in_pages_read(iov, chars);
590 src = kmap_atomic(page, KM_USER0);
594 error = pipe_iov_copy_from_user(src, iov, chars,
597 kunmap_atomic(src, KM_USER0);
601 if (unlikely(error)) {
612 /* Insert it into the buffer array */
614 buf->ops = &anon_pipe_buf_ops;
618 if (is_packetized(filp)) {
619 buf->ops = &packet_pipe_buf_ops;
620 buf->flags = PIPE_BUF_FLAG_PACKET;
622 pipe->nrbufs = ++bufs;
623 pipe->tmp_page = NULL;
629 if (bufs < pipe->buffers)
631 if (filp->f_flags & O_NONBLOCK) {
636 if (signal_pending(current)) {
642 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
643 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
646 pipe->waiting_writers++;
648 pipe->waiting_writers--;
651 mutex_unlock(&inode->i_mutex);
653 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
654 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
657 file_update_time(filp);
662 bad_pipe_r(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
668 bad_pipe_w(struct file *filp, const char __user *buf, size_t count,
674 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
676 struct inode *inode = filp->f_path.dentry->d_inode;
677 struct pipe_inode_info *pipe;
678 int count, buf, nrbufs;
682 mutex_lock(&inode->i_mutex);
683 pipe = inode->i_pipe;
686 nrbufs = pipe->nrbufs;
687 while (--nrbufs >= 0) {
688 count += pipe->bufs[buf].len;
689 buf = (buf+1) & (pipe->buffers - 1);
691 mutex_unlock(&inode->i_mutex);
693 return put_user(count, (int __user *)arg);
699 /* No kernel lock held - fine */
701 pipe_poll(struct file *filp, poll_table *wait)
704 struct inode *inode = filp->f_path.dentry->d_inode;
705 struct pipe_inode_info *pipe = inode->i_pipe;
708 poll_wait(filp, &pipe->wait, wait);
710 /* Reading only -- no need for acquiring the semaphore. */
711 nrbufs = pipe->nrbufs;
713 if (filp->f_mode & FMODE_READ) {
714 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
715 if (!pipe->writers && filp->f_version != pipe->w_counter)
719 if (filp->f_mode & FMODE_WRITE) {
720 mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
722 * Most Unices do not set POLLERR for FIFOs but on Linux they
723 * behave exactly like pipes for poll().
733 pipe_release(struct inode *inode, int decr, int decw)
735 struct pipe_inode_info *pipe;
737 mutex_lock(&inode->i_mutex);
738 pipe = inode->i_pipe;
739 pipe->readers -= decr;
740 pipe->writers -= decw;
742 if (!pipe->readers && !pipe->writers) {
743 free_pipe_info(inode);
745 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
746 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
747 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
749 mutex_unlock(&inode->i_mutex);
755 pipe_read_fasync(int fd, struct file *filp, int on)
757 struct inode *inode = filp->f_path.dentry->d_inode;
760 mutex_lock(&inode->i_mutex);
761 retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_readers);
762 mutex_unlock(&inode->i_mutex);
769 pipe_write_fasync(int fd, struct file *filp, int on)
771 struct inode *inode = filp->f_path.dentry->d_inode;
774 mutex_lock(&inode->i_mutex);
775 retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_writers);
776 mutex_unlock(&inode->i_mutex);
783 pipe_rdwr_fasync(int fd, struct file *filp, int on)
785 struct inode *inode = filp->f_path.dentry->d_inode;
786 struct pipe_inode_info *pipe = inode->i_pipe;
789 mutex_lock(&inode->i_mutex);
790 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
792 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
793 if (retval < 0) /* this can happen only if on == T */
794 fasync_helper(-1, filp, 0, &pipe->fasync_readers);
796 mutex_unlock(&inode->i_mutex);
802 pipe_read_release(struct inode *inode, struct file *filp)
804 return pipe_release(inode, 1, 0);
808 pipe_write_release(struct inode *inode, struct file *filp)
810 return pipe_release(inode, 0, 1);
814 pipe_rdwr_release(struct inode *inode, struct file *filp)
818 decr = (filp->f_mode & FMODE_READ) != 0;
819 decw = (filp->f_mode & FMODE_WRITE) != 0;
820 return pipe_release(inode, decr, decw);
824 pipe_read_open(struct inode *inode, struct file *filp)
828 mutex_lock(&inode->i_mutex);
832 inode->i_pipe->readers++;
835 mutex_unlock(&inode->i_mutex);
841 pipe_write_open(struct inode *inode, struct file *filp)
845 mutex_lock(&inode->i_mutex);
849 inode->i_pipe->writers++;
852 mutex_unlock(&inode->i_mutex);
858 pipe_rdwr_open(struct inode *inode, struct file *filp)
862 mutex_lock(&inode->i_mutex);
866 if (filp->f_mode & FMODE_READ)
867 inode->i_pipe->readers++;
868 if (filp->f_mode & FMODE_WRITE)
869 inode->i_pipe->writers++;
872 mutex_unlock(&inode->i_mutex);
878 * The file_operations structs are not static because they
879 * are also used in linux/fs/fifo.c to do operations on FIFOs.
881 * Pipes reuse fifos' file_operations structs.
883 const struct file_operations read_pipefifo_fops = {
885 .read = do_sync_read,
886 .aio_read = pipe_read,
889 .unlocked_ioctl = pipe_ioctl,
890 .open = pipe_read_open,
891 .release = pipe_read_release,
892 .fasync = pipe_read_fasync,
895 const struct file_operations write_pipefifo_fops = {
898 .write = do_sync_write,
899 .aio_write = pipe_write,
901 .unlocked_ioctl = pipe_ioctl,
902 .open = pipe_write_open,
903 .release = pipe_write_release,
904 .fasync = pipe_write_fasync,
907 const struct file_operations rdwr_pipefifo_fops = {
909 .read = do_sync_read,
910 .aio_read = pipe_read,
911 .write = do_sync_write,
912 .aio_write = pipe_write,
914 .unlocked_ioctl = pipe_ioctl,
915 .open = pipe_rdwr_open,
916 .release = pipe_rdwr_release,
917 .fasync = pipe_rdwr_fasync,
920 struct pipe_inode_info * alloc_pipe_info(struct inode *inode)
922 struct pipe_inode_info *pipe;
924 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
926 pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
928 init_waitqueue_head(&pipe->wait);
929 pipe->r_counter = pipe->w_counter = 1;
931 pipe->buffers = PIPE_DEF_BUFFERS;
940 void __free_pipe_info(struct pipe_inode_info *pipe)
944 for (i = 0; i < pipe->buffers; i++) {
945 struct pipe_buffer *buf = pipe->bufs + i;
947 buf->ops->release(pipe, buf);
950 __free_page(pipe->tmp_page);
955 void free_pipe_info(struct inode *inode)
957 __free_pipe_info(inode->i_pipe);
958 inode->i_pipe = NULL;
961 static struct vfsmount *pipe_mnt __read_mostly;
964 * pipefs_dname() is called from d_path().
966 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
968 return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
969 dentry->d_inode->i_ino);
972 static const struct dentry_operations pipefs_dentry_operations = {
973 .d_dname = pipefs_dname,
976 static struct inode * get_pipe_inode(void)
978 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
979 struct pipe_inode_info *pipe;
984 inode->i_ino = get_next_ino();
986 pipe = alloc_pipe_info(inode);
989 inode->i_pipe = pipe;
991 pipe->readers = pipe->writers = 1;
992 inode->i_fop = &rdwr_pipefifo_fops;
995 * Mark the inode dirty from the very beginning,
996 * that way it will never be moved to the dirty
997 * list because "mark_inode_dirty()" will think
998 * that it already _is_ on the dirty list.
1000 inode->i_state = I_DIRTY;
1001 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
1002 inode->i_uid = current_fsuid();
1003 inode->i_gid = current_fsgid();
1004 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1015 struct file *create_write_pipe(int flags)
1018 struct inode *inode;
1021 struct qstr name = { .name = "" };
1024 inode = get_pipe_inode();
1029 path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
1032 path.mnt = mntget(pipe_mnt);
1034 d_instantiate(path.dentry, inode);
1037 f = alloc_file(&path, FMODE_WRITE, &write_pipefifo_fops);
1040 f->f_mapping = inode->i_mapping;
1042 f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
1048 free_pipe_info(inode);
1050 return ERR_PTR(err);
1053 free_pipe_info(inode);
1056 return ERR_PTR(err);
1059 void free_write_pipe(struct file *f)
1061 free_pipe_info(f->f_dentry->d_inode);
1062 path_put(&f->f_path);
1066 struct file *create_read_pipe(struct file *wrf, int flags)
1068 /* Grab pipe from the writer */
1069 struct file *f = alloc_file(&wrf->f_path, FMODE_READ,
1070 &read_pipefifo_fops);
1072 return ERR_PTR(-ENFILE);
1074 path_get(&wrf->f_path);
1075 f->f_flags = O_RDONLY | (flags & O_NONBLOCK);
1080 int do_pipe_flags(int *fd, int flags)
1082 struct file *fw, *fr;
1086 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
1089 fw = create_write_pipe(flags);
1092 fr = create_read_pipe(fw, flags);
1093 error = PTR_ERR(fr);
1095 goto err_write_pipe;
1097 error = get_unused_fd_flags(flags);
1102 error = get_unused_fd_flags(flags);
1107 audit_fd_pair(fdr, fdw);
1108 fd_install(fdr, fr);
1109 fd_install(fdw, fw);
1118 path_put(&fr->f_path);
1121 free_write_pipe(fw);
1126 * sys_pipe() is the normal C calling standard for creating
1127 * a pipe. It's not the way Unix traditionally does this, though.
1129 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
1134 error = do_pipe_flags(fd, flags);
1136 if (copy_to_user(fildes, fd, sizeof(fd))) {
1145 SYSCALL_DEFINE1(pipe, int __user *, fildes)
1147 return sys_pipe2(fildes, 0);
1151 * Allocate a new array of pipe buffers and copy the info over. Returns the
1152 * pipe size if successful, or return -ERROR on error.
1154 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
1156 struct pipe_buffer *bufs;
1159 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1160 * expect a lot of shrink+grow operations, just free and allocate
1161 * again like we would do for growing. If the pipe currently
1162 * contains more buffers than arg, then return busy.
1164 if (nr_pages < pipe->nrbufs)
1167 bufs = kcalloc(nr_pages, sizeof(struct pipe_buffer), GFP_KERNEL);
1168 if (unlikely(!bufs))
1172 * The pipe array wraps around, so just start the new one at zero
1173 * and adjust the indexes.
1179 tail = pipe->curbuf + pipe->nrbufs;
1180 if (tail < pipe->buffers)
1183 tail &= (pipe->buffers - 1);
1185 head = pipe->nrbufs - tail;
1187 memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
1189 memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
1195 pipe->buffers = nr_pages;
1196 return nr_pages * PAGE_SIZE;
1200 * Currently we rely on the pipe array holding a power-of-2 number
1203 static inline unsigned int round_pipe_size(unsigned int size)
1205 unsigned long nr_pages;
1207 nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1208 return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
1212 * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
1213 * will return an error.
1215 int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
1216 size_t *lenp, loff_t *ppos)
1220 ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
1221 if (ret < 0 || !write)
1224 pipe_max_size = round_pipe_size(pipe_max_size);
1229 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1230 * location, so checking ->i_pipe is not enough to verify that this is a
1233 struct pipe_inode_info *get_pipe_info(struct file *file)
1235 struct inode *i = file->f_path.dentry->d_inode;
1237 return S_ISFIFO(i->i_mode) ? i->i_pipe : NULL;
1240 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1242 struct pipe_inode_info *pipe;
1245 pipe = get_pipe_info(file);
1249 mutex_lock(&pipe->inode->i_mutex);
1252 case F_SETPIPE_SZ: {
1253 unsigned int size, nr_pages;
1255 size = round_pipe_size(arg);
1256 nr_pages = size >> PAGE_SHIFT;
1262 if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
1266 ret = pipe_set_size(pipe, nr_pages);
1270 ret = pipe->buffers * PAGE_SIZE;
1278 mutex_unlock(&pipe->inode->i_mutex);
1282 static const struct super_operations pipefs_ops = {
1283 .destroy_inode = free_inode_nonrcu,
1284 .statfs = simple_statfs,
1288 * pipefs should _never_ be mounted by userland - too much of security hassle,
1289 * no real gain from having the whole whorehouse mounted. So we don't need
1290 * any operations on the root directory. However, we need a non-trivial
1291 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1293 static struct dentry *pipefs_mount(struct file_system_type *fs_type,
1294 int flags, const char *dev_name, void *data)
1296 return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
1297 &pipefs_dentry_operations, PIPEFS_MAGIC);
1300 static struct file_system_type pipe_fs_type = {
1302 .mount = pipefs_mount,
1303 .kill_sb = kill_anon_super,
1306 static int __init init_pipe_fs(void)
1308 int err = register_filesystem(&pipe_fs_type);
1311 pipe_mnt = kern_mount(&pipe_fs_type);
1312 if (IS_ERR(pipe_mnt)) {
1313 err = PTR_ERR(pipe_mnt);
1314 unregister_filesystem(&pipe_fs_type);
1320 static void __exit exit_pipe_fs(void)
1322 kern_unmount(pipe_mnt);
1323 unregister_filesystem(&pipe_fs_type);
1326 fs_initcall(init_pipe_fs);
1327 module_exit(exit_pipe_fs);