4 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
8 #include <linux/file.h>
9 #include <linux/poll.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/log2.h>
15 #include <linux/mount.h>
16 #include <linux/pipe_fs_i.h>
17 #include <linux/uio.h>
18 #include <linux/highmem.h>
19 #include <linux/pagemap.h>
20 #include <linux/audit.h>
21 #include <linux/syscalls.h>
22 #include <linux/fcntl.h>
24 #include <asm/uaccess.h>
25 #include <asm/ioctls.h>
28 * The max size that a non-root user is allowed to grow the pipe. Can
29 * be set by root in /proc/sys/fs/pipe-max-size
31 unsigned int pipe_max_size = 1048576;
34 * Minimum pipe size, as required by POSIX
36 unsigned int pipe_min_size = PAGE_SIZE;
38 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
39 * matches default values.
41 unsigned long pipe_user_pages_hard;
42 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
45 * We use a start+len construction, which provides full use of the
47 * -- Florian Coosmann (FGC)
49 * Reads with count = 0 should always return 0.
50 * -- Julian Bradfield 1999-06-07.
52 * FIFOs and Pipes now generate SIGIO for both readers and writers.
53 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
55 * pipe_read & write cleanup
56 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
59 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
62 mutex_lock_nested(&pipe->inode->i_mutex, subclass);
65 void pipe_lock(struct pipe_inode_info *pipe)
68 * pipe_lock() nests non-pipe inode locks (for writing to a file)
70 pipe_lock_nested(pipe, I_MUTEX_PARENT);
72 EXPORT_SYMBOL(pipe_lock);
74 void pipe_unlock(struct pipe_inode_info *pipe)
77 mutex_unlock(&pipe->inode->i_mutex);
79 EXPORT_SYMBOL(pipe_unlock);
81 void pipe_double_lock(struct pipe_inode_info *pipe1,
82 struct pipe_inode_info *pipe2)
84 BUG_ON(pipe1 == pipe2);
87 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
88 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
90 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
91 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
95 /* Drop the inode semaphore and wait for a pipe event, atomically */
96 void pipe_wait(struct pipe_inode_info *pipe)
101 * Pipes are system-local resources, so sleeping on them
102 * is considered a noninteractive wait:
104 prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
107 finish_wait(&pipe->wait, &wait);
112 pipe_iov_copy_from_user(void *addr, int *offset, struct iovec *iov,
113 size_t *remaining, int atomic)
117 while (*remaining > 0) {
118 while (!iov->iov_len)
120 copy = min_t(unsigned long, *remaining, iov->iov_len);
123 if (__copy_from_user_inatomic(addr + *offset,
124 iov->iov_base, copy))
127 if (copy_from_user(addr + *offset,
128 iov->iov_base, copy))
133 iov->iov_base += copy;
134 iov->iov_len -= copy;
140 pipe_iov_copy_to_user(struct iovec *iov, void *addr, int *offset,
141 size_t *remaining, int atomic)
145 while (*remaining > 0) {
146 while (!iov->iov_len)
148 copy = min_t(unsigned long, *remaining, iov->iov_len);
151 if (__copy_to_user_inatomic(iov->iov_base,
152 addr + *offset, copy))
155 if (copy_to_user(iov->iov_base,
156 addr + *offset, copy))
161 iov->iov_base += copy;
162 iov->iov_len -= copy;
168 * Attempt to pre-fault in the user memory, so we can use atomic copies.
169 * Returns the number of bytes not faulted in.
171 static int iov_fault_in_pages_write(struct iovec *iov, unsigned long len)
173 while (!iov->iov_len)
177 unsigned long this_len;
179 this_len = min_t(unsigned long, len, iov->iov_len);
180 if (fault_in_pages_writeable(iov->iov_base, this_len))
191 * Pre-fault in the user memory, so we can use atomic copies.
193 static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len)
195 while (!iov->iov_len)
199 unsigned long this_len;
201 this_len = min_t(unsigned long, len, iov->iov_len);
202 fault_in_pages_readable(iov->iov_base, this_len);
208 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
209 struct pipe_buffer *buf)
211 struct page *page = buf->page;
214 * If nobody else uses this page, and we don't already have a
215 * temporary page, let's keep track of it as a one-deep
216 * allocation cache. (Otherwise just release our reference to it)
218 if (page_count(page) == 1 && !pipe->tmp_page)
219 pipe->tmp_page = page;
221 page_cache_release(page);
225 * generic_pipe_buf_map - virtually map a pipe buffer
226 * @pipe: the pipe that the buffer belongs to
227 * @buf: the buffer that should be mapped
228 * @atomic: whether to use an atomic map
231 * This function returns a kernel virtual address mapping for the
232 * pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
233 * and the caller has to be careful not to fault before calling
234 * the unmap function.
236 * Note that this function occupies KM_USER0 if @atomic != 0.
238 void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
239 struct pipe_buffer *buf, int atomic)
242 buf->flags |= PIPE_BUF_FLAG_ATOMIC;
243 return kmap_atomic(buf->page, KM_USER0);
246 return kmap(buf->page);
248 EXPORT_SYMBOL(generic_pipe_buf_map);
251 * generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
252 * @pipe: the pipe that the buffer belongs to
253 * @buf: the buffer that should be unmapped
254 * @map_data: the data that the mapping function returned
257 * This function undoes the mapping that ->map() provided.
259 void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
260 struct pipe_buffer *buf, void *map_data)
262 if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
263 buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
264 kunmap_atomic(map_data, KM_USER0);
268 EXPORT_SYMBOL(generic_pipe_buf_unmap);
271 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
272 * @pipe: the pipe that the buffer belongs to
273 * @buf: the buffer to attempt to steal
276 * This function attempts to steal the &struct page attached to
277 * @buf. If successful, this function returns 0 and returns with
278 * the page locked. The caller may then reuse the page for whatever
279 * he wishes; the typical use is insertion into a different file
282 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
283 struct pipe_buffer *buf)
285 struct page *page = buf->page;
288 * A reference of one is golden, that means that the owner of this
289 * page is the only one holding a reference to it. lock the page
292 if (page_count(page) == 1) {
299 EXPORT_SYMBOL(generic_pipe_buf_steal);
302 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
303 * @pipe: the pipe that the buffer belongs to
304 * @buf: the buffer to get a reference to
307 * This function grabs an extra reference to @buf. It's used in
308 * in the tee() system call, when we duplicate the buffers in one
311 void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
313 page_cache_get(buf->page);
315 EXPORT_SYMBOL(generic_pipe_buf_get);
318 * generic_pipe_buf_confirm - verify contents of the pipe buffer
319 * @info: the pipe that the buffer belongs to
320 * @buf: the buffer to confirm
323 * This function does nothing, because the generic pipe code uses
324 * pages that are always good when inserted into the pipe.
326 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
327 struct pipe_buffer *buf)
331 EXPORT_SYMBOL(generic_pipe_buf_confirm);
334 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
335 * @pipe: the pipe that the buffer belongs to
336 * @buf: the buffer to put a reference to
339 * This function releases a reference to @buf.
341 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
342 struct pipe_buffer *buf)
344 page_cache_release(buf->page);
346 EXPORT_SYMBOL(generic_pipe_buf_release);
348 static const struct pipe_buf_operations anon_pipe_buf_ops = {
350 .map = generic_pipe_buf_map,
351 .unmap = generic_pipe_buf_unmap,
352 .confirm = generic_pipe_buf_confirm,
353 .release = anon_pipe_buf_release,
354 .steal = generic_pipe_buf_steal,
355 .get = generic_pipe_buf_get,
358 static const struct pipe_buf_operations packet_pipe_buf_ops = {
360 .map = generic_pipe_buf_map,
361 .unmap = generic_pipe_buf_unmap,
362 .confirm = generic_pipe_buf_confirm,
363 .release = anon_pipe_buf_release,
364 .steal = generic_pipe_buf_steal,
365 .get = generic_pipe_buf_get,
369 pipe_read(struct kiocb *iocb, const struct iovec *_iov,
370 unsigned long nr_segs, loff_t pos)
372 struct file *filp = iocb->ki_filp;
373 struct inode *inode = filp->f_path.dentry->d_inode;
374 struct pipe_inode_info *pipe;
377 struct iovec *iov = (struct iovec *)_iov;
380 total_len = iov_length(iov, nr_segs);
381 /* Null read succeeds. */
382 if (unlikely(total_len == 0))
387 mutex_lock(&inode->i_mutex);
388 pipe = inode->i_pipe;
390 int bufs = pipe->nrbufs;
392 int curbuf = pipe->curbuf;
393 struct pipe_buffer *buf = pipe->bufs + curbuf;
394 const struct pipe_buf_operations *ops = buf->ops;
396 size_t chars = buf->len, remaining;
400 if (chars > total_len)
403 error = ops->confirm(pipe, buf);
410 atomic = !iov_fault_in_pages_write(iov, chars);
412 offset = buf->offset;
414 addr = ops->map(pipe, buf, atomic);
415 error = pipe_iov_copy_to_user(iov, addr, &offset,
417 ops->unmap(pipe, buf, addr);
418 if (unlikely(error)) {
420 * Just retry with the slow path if we failed.
431 buf->offset += chars;
434 /* Was it a packet buffer? Clean up and exit */
435 if (buf->flags & PIPE_BUF_FLAG_PACKET) {
442 ops->release(pipe, buf);
443 curbuf = (curbuf + 1) & (pipe->buffers - 1);
444 pipe->curbuf = curbuf;
445 pipe->nrbufs = --bufs;
450 break; /* common path: read succeeded */
452 if (bufs) /* More to do? */
456 if (!pipe->waiting_writers) {
457 /* syscall merging: Usually we must not sleep
458 * if O_NONBLOCK is set, or if we got some data.
459 * But if a writer sleeps in kernel space, then
460 * we can wait for that data without violating POSIX.
464 if (filp->f_flags & O_NONBLOCK) {
469 if (signal_pending(current)) {
475 wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
476 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
480 mutex_unlock(&inode->i_mutex);
482 /* Signal writers asynchronously that there is more room. */
484 wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
485 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
492 static inline int is_packetized(struct file *file)
494 return (file->f_flags & O_DIRECT) != 0;
498 pipe_write(struct kiocb *iocb, const struct iovec *_iov,
499 unsigned long nr_segs, loff_t ppos)
501 struct file *filp = iocb->ki_filp;
502 struct inode *inode = filp->f_path.dentry->d_inode;
503 struct pipe_inode_info *pipe;
506 struct iovec *iov = (struct iovec *)_iov;
510 total_len = iov_length(iov, nr_segs);
511 /* Null write succeeds. */
512 if (unlikely(total_len == 0))
517 mutex_lock(&inode->i_mutex);
518 pipe = inode->i_pipe;
520 if (!pipe->readers) {
521 send_sig(SIGPIPE, current, 0);
526 /* We try to merge small writes */
527 chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
528 if (pipe->nrbufs && chars != 0) {
529 int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
531 struct pipe_buffer *buf = pipe->bufs + lastbuf;
532 const struct pipe_buf_operations *ops = buf->ops;
533 int offset = buf->offset + buf->len;
535 if (ops->can_merge && offset + chars <= PAGE_SIZE) {
536 int error, atomic = 1;
538 size_t remaining = chars;
540 error = ops->confirm(pipe, buf);
544 iov_fault_in_pages_read(iov, chars);
546 addr = ops->map(pipe, buf, atomic);
547 error = pipe_iov_copy_from_user(addr, &offset, iov,
549 ops->unmap(pipe, buf, addr);
570 if (!pipe->readers) {
571 send_sig(SIGPIPE, current, 0);
577 if (bufs < pipe->buffers) {
578 int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
579 struct pipe_buffer *buf = pipe->bufs + newbuf;
580 struct page *page = pipe->tmp_page;
582 int error, atomic = 1;
587 page = alloc_page(GFP_HIGHUSER);
588 if (unlikely(!page)) {
589 ret = ret ? : -ENOMEM;
592 pipe->tmp_page = page;
594 /* Always wake up, even if the copy fails. Otherwise
595 * we lock up (O_NONBLOCK-)readers that sleep due to
597 * FIXME! Is this really true?
601 if (chars > total_len)
604 iov_fault_in_pages_read(iov, chars);
608 src = kmap_atomic(page, KM_USER0);
612 error = pipe_iov_copy_from_user(src, &offset, iov,
615 kunmap_atomic(src, KM_USER0);
619 if (unlikely(error)) {
630 /* Insert it into the buffer array */
632 buf->ops = &anon_pipe_buf_ops;
636 if (is_packetized(filp)) {
637 buf->ops = &packet_pipe_buf_ops;
638 buf->flags = PIPE_BUF_FLAG_PACKET;
640 pipe->nrbufs = ++bufs;
641 pipe->tmp_page = NULL;
647 if (bufs < pipe->buffers)
649 if (filp->f_flags & O_NONBLOCK) {
654 if (signal_pending(current)) {
660 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
661 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
664 pipe->waiting_writers++;
666 pipe->waiting_writers--;
669 mutex_unlock(&inode->i_mutex);
671 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
672 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
675 file_update_time(filp);
680 bad_pipe_r(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
686 bad_pipe_w(struct file *filp, const char __user *buf, size_t count,
692 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
694 struct inode *inode = filp->f_path.dentry->d_inode;
695 struct pipe_inode_info *pipe;
696 int count, buf, nrbufs;
700 mutex_lock(&inode->i_mutex);
701 pipe = inode->i_pipe;
704 nrbufs = pipe->nrbufs;
705 while (--nrbufs >= 0) {
706 count += pipe->bufs[buf].len;
707 buf = (buf+1) & (pipe->buffers - 1);
709 mutex_unlock(&inode->i_mutex);
711 return put_user(count, (int __user *)arg);
717 /* No kernel lock held - fine */
719 pipe_poll(struct file *filp, poll_table *wait)
722 struct inode *inode = filp->f_path.dentry->d_inode;
723 struct pipe_inode_info *pipe = inode->i_pipe;
726 poll_wait(filp, &pipe->wait, wait);
728 /* Reading only -- no need for acquiring the semaphore. */
729 nrbufs = pipe->nrbufs;
731 if (filp->f_mode & FMODE_READ) {
732 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
733 if (!pipe->writers && filp->f_version != pipe->w_counter)
737 if (filp->f_mode & FMODE_WRITE) {
738 mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
740 * Most Unices do not set POLLERR for FIFOs but on Linux they
741 * behave exactly like pipes for poll().
751 pipe_release(struct inode *inode, int decr, int decw)
753 struct pipe_inode_info *pipe;
755 mutex_lock(&inode->i_mutex);
756 pipe = inode->i_pipe;
757 pipe->readers -= decr;
758 pipe->writers -= decw;
760 if (!pipe->readers && !pipe->writers) {
761 free_pipe_info(inode);
763 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
764 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
765 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
767 mutex_unlock(&inode->i_mutex);
773 pipe_read_fasync(int fd, struct file *filp, int on)
775 struct inode *inode = filp->f_path.dentry->d_inode;
778 mutex_lock(&inode->i_mutex);
779 retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_readers);
780 mutex_unlock(&inode->i_mutex);
787 pipe_write_fasync(int fd, struct file *filp, int on)
789 struct inode *inode = filp->f_path.dentry->d_inode;
792 mutex_lock(&inode->i_mutex);
793 retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_writers);
794 mutex_unlock(&inode->i_mutex);
801 pipe_rdwr_fasync(int fd, struct file *filp, int on)
803 struct inode *inode = filp->f_path.dentry->d_inode;
804 struct pipe_inode_info *pipe = inode->i_pipe;
807 mutex_lock(&inode->i_mutex);
808 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
810 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
811 if (retval < 0) /* this can happen only if on == T */
812 fasync_helper(-1, filp, 0, &pipe->fasync_readers);
814 mutex_unlock(&inode->i_mutex);
820 pipe_read_release(struct inode *inode, struct file *filp)
822 return pipe_release(inode, 1, 0);
826 pipe_write_release(struct inode *inode, struct file *filp)
828 return pipe_release(inode, 0, 1);
832 pipe_rdwr_release(struct inode *inode, struct file *filp)
836 decr = (filp->f_mode & FMODE_READ) != 0;
837 decw = (filp->f_mode & FMODE_WRITE) != 0;
838 return pipe_release(inode, decr, decw);
842 pipe_read_open(struct inode *inode, struct file *filp)
846 mutex_lock(&inode->i_mutex);
850 inode->i_pipe->readers++;
853 mutex_unlock(&inode->i_mutex);
859 pipe_write_open(struct inode *inode, struct file *filp)
863 mutex_lock(&inode->i_mutex);
867 inode->i_pipe->writers++;
870 mutex_unlock(&inode->i_mutex);
876 pipe_rdwr_open(struct inode *inode, struct file *filp)
880 if (!(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
883 mutex_lock(&inode->i_mutex);
887 if (filp->f_mode & FMODE_READ)
888 inode->i_pipe->readers++;
889 if (filp->f_mode & FMODE_WRITE)
890 inode->i_pipe->writers++;
893 mutex_unlock(&inode->i_mutex);
899 * The file_operations structs are not static because they
900 * are also used in linux/fs/fifo.c to do operations on FIFOs.
902 * Pipes reuse fifos' file_operations structs.
904 const struct file_operations read_pipefifo_fops = {
906 .read = do_sync_read,
907 .aio_read = pipe_read,
910 .unlocked_ioctl = pipe_ioctl,
911 .open = pipe_read_open,
912 .release = pipe_read_release,
913 .fasync = pipe_read_fasync,
916 const struct file_operations write_pipefifo_fops = {
919 .write = do_sync_write,
920 .aio_write = pipe_write,
922 .unlocked_ioctl = pipe_ioctl,
923 .open = pipe_write_open,
924 .release = pipe_write_release,
925 .fasync = pipe_write_fasync,
928 const struct file_operations rdwr_pipefifo_fops = {
930 .read = do_sync_read,
931 .aio_read = pipe_read,
932 .write = do_sync_write,
933 .aio_write = pipe_write,
935 .unlocked_ioctl = pipe_ioctl,
936 .open = pipe_rdwr_open,
937 .release = pipe_rdwr_release,
938 .fasync = pipe_rdwr_fasync,
941 static void account_pipe_buffers(struct pipe_inode_info *pipe,
942 unsigned long old, unsigned long new)
944 atomic_long_add(new - old, &pipe->user->pipe_bufs);
947 static bool too_many_pipe_buffers_soft(struct user_struct *user)
949 return pipe_user_pages_soft &&
950 atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft;
953 static bool too_many_pipe_buffers_hard(struct user_struct *user)
955 return pipe_user_pages_hard &&
956 atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard;
959 struct pipe_inode_info * alloc_pipe_info(struct inode *inode)
961 struct pipe_inode_info *pipe;
963 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
965 unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
966 struct user_struct *user = get_current_user();
968 if (!too_many_pipe_buffers_hard(user)) {
969 if (too_many_pipe_buffers_soft(user))
971 pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL);
975 init_waitqueue_head(&pipe->wait);
976 pipe->r_counter = pipe->w_counter = 1;
978 pipe->buffers = pipe_bufs;
980 account_pipe_buffers(pipe, 0, pipe_bufs);
990 void __free_pipe_info(struct pipe_inode_info *pipe)
994 account_pipe_buffers(pipe, pipe->buffers, 0);
995 free_uid(pipe->user);
996 for (i = 0; i < pipe->buffers; i++) {
997 struct pipe_buffer *buf = pipe->bufs + i;
999 buf->ops->release(pipe, buf);
1002 __free_page(pipe->tmp_page);
1007 void free_pipe_info(struct inode *inode)
1009 __free_pipe_info(inode->i_pipe);
1010 inode->i_pipe = NULL;
1013 static struct vfsmount *pipe_mnt __read_mostly;
1016 * pipefs_dname() is called from d_path().
1018 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
1020 return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
1021 dentry->d_inode->i_ino);
1024 static const struct dentry_operations pipefs_dentry_operations = {
1025 .d_dname = pipefs_dname,
1028 static struct inode * get_pipe_inode(void)
1030 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
1031 struct pipe_inode_info *pipe;
1036 inode->i_ino = get_next_ino();
1038 pipe = alloc_pipe_info(inode);
1041 inode->i_pipe = pipe;
1043 pipe->readers = pipe->writers = 1;
1044 inode->i_fop = &rdwr_pipefifo_fops;
1047 * Mark the inode dirty from the very beginning,
1048 * that way it will never be moved to the dirty
1049 * list because "mark_inode_dirty()" will think
1050 * that it already _is_ on the dirty list.
1052 inode->i_state = I_DIRTY;
1053 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
1054 inode->i_uid = current_fsuid();
1055 inode->i_gid = current_fsgid();
1056 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1067 struct file *create_write_pipe(int flags)
1070 struct inode *inode;
1073 struct qstr name = { .name = "" };
1076 inode = get_pipe_inode();
1081 path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
1084 path.mnt = mntget(pipe_mnt);
1086 d_instantiate(path.dentry, inode);
1089 f = alloc_file(&path, FMODE_WRITE, &write_pipefifo_fops);
1092 f->f_mapping = inode->i_mapping;
1094 f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
1100 free_pipe_info(inode);
1102 return ERR_PTR(err);
1105 free_pipe_info(inode);
1108 return ERR_PTR(err);
1111 void free_write_pipe(struct file *f)
1113 free_pipe_info(f->f_dentry->d_inode);
1114 path_put(&f->f_path);
1118 struct file *create_read_pipe(struct file *wrf, int flags)
1120 /* Grab pipe from the writer */
1121 struct file *f = alloc_file(&wrf->f_path, FMODE_READ,
1122 &read_pipefifo_fops);
1124 return ERR_PTR(-ENFILE);
1126 path_get(&wrf->f_path);
1127 f->f_flags = O_RDONLY | (flags & O_NONBLOCK);
1132 int do_pipe_flags(int *fd, int flags)
1134 struct file *fw, *fr;
1138 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
1141 fw = create_write_pipe(flags);
1144 fr = create_read_pipe(fw, flags);
1145 error = PTR_ERR(fr);
1147 goto err_write_pipe;
1149 error = get_unused_fd_flags(flags);
1154 error = get_unused_fd_flags(flags);
1159 audit_fd_pair(fdr, fdw);
1160 fd_install(fdr, fr);
1161 fd_install(fdw, fw);
1170 path_put(&fr->f_path);
1173 free_write_pipe(fw);
1178 * sys_pipe() is the normal C calling standard for creating
1179 * a pipe. It's not the way Unix traditionally does this, though.
1181 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
1186 error = do_pipe_flags(fd, flags);
1188 if (copy_to_user(fildes, fd, sizeof(fd))) {
1197 SYSCALL_DEFINE1(pipe, int __user *, fildes)
1199 return sys_pipe2(fildes, 0);
1203 * Allocate a new array of pipe buffers and copy the info over. Returns the
1204 * pipe size if successful, or return -ERROR on error.
1206 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
1208 struct pipe_buffer *bufs;
1211 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1212 * expect a lot of shrink+grow operations, just free and allocate
1213 * again like we would do for growing. If the pipe currently
1214 * contains more buffers than arg, then return busy.
1216 if (nr_pages < pipe->nrbufs)
1219 bufs = kcalloc(nr_pages, sizeof(struct pipe_buffer), GFP_KERNEL);
1220 if (unlikely(!bufs))
1224 * The pipe array wraps around, so just start the new one at zero
1225 * and adjust the indexes.
1231 tail = pipe->curbuf + pipe->nrbufs;
1232 if (tail < pipe->buffers)
1235 tail &= (pipe->buffers - 1);
1237 head = pipe->nrbufs - tail;
1239 memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
1241 memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
1244 account_pipe_buffers(pipe, pipe->buffers, nr_pages);
1248 pipe->buffers = nr_pages;
1249 return nr_pages * PAGE_SIZE;
1253 * Currently we rely on the pipe array holding a power-of-2 number
1256 static inline unsigned int round_pipe_size(unsigned int size)
1258 unsigned long nr_pages;
1260 nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1261 return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
1265 * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
1266 * will return an error.
1268 int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
1269 size_t *lenp, loff_t *ppos)
1273 ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
1274 if (ret < 0 || !write)
1277 pipe_max_size = round_pipe_size(pipe_max_size);
1282 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1283 * location, so checking ->i_pipe is not enough to verify that this is a
1286 struct pipe_inode_info *get_pipe_info(struct file *file)
1288 struct inode *i = file->f_path.dentry->d_inode;
1290 return S_ISFIFO(i->i_mode) ? i->i_pipe : NULL;
1293 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1295 struct pipe_inode_info *pipe;
1298 pipe = get_pipe_info(file);
1302 mutex_lock(&pipe->inode->i_mutex);
1305 case F_SETPIPE_SZ: {
1306 unsigned int size, nr_pages;
1308 size = round_pipe_size(arg);
1309 nr_pages = size >> PAGE_SHIFT;
1315 if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
1318 } else if ((too_many_pipe_buffers_hard(pipe->user) ||
1319 too_many_pipe_buffers_soft(pipe->user)) &&
1320 !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
1324 ret = pipe_set_size(pipe, nr_pages);
1328 ret = pipe->buffers * PAGE_SIZE;
1336 mutex_unlock(&pipe->inode->i_mutex);
1340 static const struct super_operations pipefs_ops = {
1341 .destroy_inode = free_inode_nonrcu,
1342 .statfs = simple_statfs,
1346 * pipefs should _never_ be mounted by userland - too much of security hassle,
1347 * no real gain from having the whole whorehouse mounted. So we don't need
1348 * any operations on the root directory. However, we need a non-trivial
1349 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1351 static struct dentry *pipefs_mount(struct file_system_type *fs_type,
1352 int flags, const char *dev_name, void *data)
1354 return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
1355 &pipefs_dentry_operations, PIPEFS_MAGIC);
1358 static struct file_system_type pipe_fs_type = {
1360 .mount = pipefs_mount,
1361 .kill_sb = kill_anon_super,
1364 static int __init init_pipe_fs(void)
1366 int err = register_filesystem(&pipe_fs_type);
1369 pipe_mnt = kern_mount(&pipe_fs_type);
1370 if (IS_ERR(pipe_mnt)) {
1371 err = PTR_ERR(pipe_mnt);
1372 unregister_filesystem(&pipe_fs_type);
1378 static void __exit exit_pipe_fs(void)
1380 kern_unmount(pipe_mnt);
1381 unregister_filesystem(&pipe_fs_type);
1384 fs_initcall(init_pipe_fs);
1385 module_exit(exit_pipe_fs);