[PATCH] splice: cleanup the SPLICE_F_NONBLOCK handling
[pandora-kernel.git] / fs / splice.c
1 /*
2  * "splice": joining two ropes together by interweaving their strands.
3  *
4  * This is the "extended pipe" functionality, where a pipe is used as
5  * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6  * buffer that you can use to transfer data from one end to the other.
7  *
8  * The traditional unix read/write is extended with a "splice()" operation
9  * that transfers data buffers to or from a pipe buffer.
10  *
11  * Named by Larry McVoy, original implementation from Linus, extended by
12  * Jens to support splicing to files, network, direct splicing, etc and
13  * fixing lots of bugs.
14  *
15  * Copyright (C) 2005-2006 Jens Axboe <axboe@suse.de>
16  * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17  * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
18  *
19  */
20 #include <linux/fs.h>
21 #include <linux/file.h>
22 #include <linux/pagemap.h>
23 #include <linux/pipe_fs_i.h>
24 #include <linux/mm_inline.h>
25 #include <linux/swap.h>
26 #include <linux/writeback.h>
27 #include <linux/buffer_head.h>
28 #include <linux/module.h>
29 #include <linux/syscalls.h>
30
31 /*
32  * Passed to the actors
33  */
34 struct splice_desc {
35         unsigned int len, total_len;    /* current and remaining length */
36         unsigned int flags;             /* splice flags */
37         struct file *file;              /* file to read/write */
38         loff_t pos;                     /* file position */
39 };
40
41 /*
42  * Attempt to steal a page from a pipe buffer. This should perhaps go into
43  * a vm helper function, it's already simplified quite a bit by the
44  * addition of remove_mapping(). If success is returned, the caller may
45  * attempt to reuse this page for another destination.
46  */
47 static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
48                                      struct pipe_buffer *buf)
49 {
50         struct page *page = buf->page;
51         struct address_space *mapping = page_mapping(page);
52
53         WARN_ON(!PageLocked(page));
54         WARN_ON(!PageUptodate(page));
55
56         /*
57          * At least for ext2 with nobh option, we need to wait on writeback
58          * completing on this page, since we'll remove it from the pagecache.
59          * Otherwise truncate wont wait on the page, allowing the disk
60          * blocks to be reused by someone else before we actually wrote our
61          * data to them. fs corruption ensues.
62          */
63         wait_on_page_writeback(page);
64
65         if (PagePrivate(page))
66                 try_to_release_page(page, mapping_gfp_mask(mapping));
67
68         if (!remove_mapping(mapping, page))
69                 return 1;
70
71         buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
72         return 0;
73 }
74
75 static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
76                                         struct pipe_buffer *buf)
77 {
78         page_cache_release(buf->page);
79         buf->page = NULL;
80         buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
81 }
82
83 static void *page_cache_pipe_buf_map(struct file *file,
84                                      struct pipe_inode_info *info,
85                                      struct pipe_buffer *buf)
86 {
87         struct page *page = buf->page;
88         int err;
89
90         if (!PageUptodate(page)) {
91                 lock_page(page);
92
93                 /*
94                  * Page got truncated/unhashed. This will cause a 0-byte
95                  * splice, if this is the first page.
96                  */
97                 if (!page->mapping) {
98                         err = -ENODATA;
99                         goto error;
100                 }
101
102                 /*
103                  * Uh oh, read-error from disk.
104                  */
105                 if (!PageUptodate(page)) {
106                         err = -EIO;
107                         goto error;
108                 }
109
110                 /*
111                  * Page is ok afterall, fall through to mapping.
112                  */
113                 unlock_page(page);
114         }
115
116         return kmap(page);
117 error:
118         unlock_page(page);
119         return ERR_PTR(err);
120 }
121
122 static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
123                                       struct pipe_buffer *buf)
124 {
125         kunmap(buf->page);
126 }
127
128 static void page_cache_pipe_buf_get(struct pipe_inode_info *info,
129                                     struct pipe_buffer *buf)
130 {
131         page_cache_get(buf->page);
132 }
133
134 static struct pipe_buf_operations page_cache_pipe_buf_ops = {
135         .can_merge = 0,
136         .map = page_cache_pipe_buf_map,
137         .unmap = page_cache_pipe_buf_unmap,
138         .release = page_cache_pipe_buf_release,
139         .steal = page_cache_pipe_buf_steal,
140         .get = page_cache_pipe_buf_get,
141 };
142
143 /*
144  * Pipe output worker. This sets up our pipe format with the page cache
145  * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
146  */
147 static ssize_t move_to_pipe(struct pipe_inode_info *pipe, struct page **pages,
148                             int nr_pages, unsigned long len,
149                             unsigned int offset, unsigned int flags)
150 {
151         int ret, do_wakeup, i;
152
153         ret = 0;
154         do_wakeup = 0;
155         i = 0;
156
157         if (pipe->inode)
158                 mutex_lock(&pipe->inode->i_mutex);
159
160         for (;;) {
161                 if (!pipe->readers) {
162                         send_sig(SIGPIPE, current, 0);
163                         if (!ret)
164                                 ret = -EPIPE;
165                         break;
166                 }
167
168                 if (pipe->nrbufs < PIPE_BUFFERS) {
169                         int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
170                         struct pipe_buffer *buf = pipe->bufs + newbuf;
171                         struct page *page = pages[i++];
172                         unsigned long this_len;
173
174                         this_len = PAGE_CACHE_SIZE - offset;
175                         if (this_len > len)
176                                 this_len = len;
177
178                         buf->page = page;
179                         buf->offset = offset;
180                         buf->len = this_len;
181                         buf->ops = &page_cache_pipe_buf_ops;
182                         pipe->nrbufs++;
183                         if (pipe->inode)
184                                 do_wakeup = 1;
185
186                         ret += this_len;
187                         len -= this_len;
188                         offset = 0;
189                         if (!--nr_pages)
190                                 break;
191                         if (!len)
192                                 break;
193                         if (pipe->nrbufs < PIPE_BUFFERS)
194                                 continue;
195
196                         break;
197                 }
198
199                 if (flags & SPLICE_F_NONBLOCK) {
200                         if (!ret)
201                                 ret = -EAGAIN;
202                         break;
203                 }
204
205                 if (signal_pending(current)) {
206                         if (!ret)
207                                 ret = -ERESTARTSYS;
208                         break;
209                 }
210
211                 if (do_wakeup) {
212                         smp_mb();
213                         if (waitqueue_active(&pipe->wait))
214                                 wake_up_interruptible_sync(&pipe->wait);
215                         kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
216                         do_wakeup = 0;
217                 }
218
219                 pipe->waiting_writers++;
220                 pipe_wait(pipe);
221                 pipe->waiting_writers--;
222         }
223
224         if (pipe->inode)
225                 mutex_unlock(&pipe->inode->i_mutex);
226
227         if (do_wakeup) {
228                 smp_mb();
229                 if (waitqueue_active(&pipe->wait))
230                         wake_up_interruptible(&pipe->wait);
231                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
232         }
233
234         while (i < nr_pages)
235                 page_cache_release(pages[i++]);
236
237         return ret;
238 }
239
240 static int
241 __generic_file_splice_read(struct file *in, loff_t *ppos,
242                            struct pipe_inode_info *pipe, size_t len,
243                            unsigned int flags)
244 {
245         struct address_space *mapping = in->f_mapping;
246         unsigned int loff, offset, nr_pages;
247         struct page *pages[PIPE_BUFFERS];
248         struct page *page;
249         pgoff_t index, end_index;
250         loff_t isize;
251         size_t bytes;
252         int i, error;
253
254         index = *ppos >> PAGE_CACHE_SHIFT;
255         loff = offset = *ppos & ~PAGE_CACHE_MASK;
256         nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
257
258         if (nr_pages > PIPE_BUFFERS)
259                 nr_pages = PIPE_BUFFERS;
260
261         /*
262          * Initiate read-ahead on this page range. however, don't call into
263          * read-ahead if this is a non-zero offset (we are likely doing small
264          * chunk splice and the page is already there) for a single page.
265          */
266         if (!offset || nr_pages > 1)
267                 do_page_cache_readahead(mapping, in, index, nr_pages);
268
269         /*
270          * Now fill in the holes:
271          */
272         error = 0;
273         bytes = 0;
274         for (i = 0; i < nr_pages; i++, index++) {
275 find_page:
276                 /*
277                  * lookup the page for this index
278                  */
279                 page = find_get_page(mapping, index);
280                 if (!page) {
281                         /*
282                          * page didn't exist, allocate one
283                          */
284                         page = page_cache_alloc_cold(mapping);
285                         if (!page)
286                                 break;
287
288                         error = add_to_page_cache_lru(page, mapping, index,
289                                                 mapping_gfp_mask(mapping));
290                         if (unlikely(error)) {
291                                 page_cache_release(page);
292                                 break;
293                         }
294
295                         goto readpage;
296                 }
297
298                 /*
299                  * If the page isn't uptodate, we may need to start io on it
300                  */
301                 if (!PageUptodate(page)) {
302                         /*
303                          * If in nonblock mode then dont block on waiting
304                          * for an in-flight io page
305                          */
306                         if (flags & SPLICE_F_NONBLOCK)
307                                 break;
308
309                         lock_page(page);
310
311                         /*
312                          * page was truncated, stop here. if this isn't the
313                          * first page, we'll just complete what we already
314                          * added
315                          */
316                         if (!page->mapping) {
317                                 unlock_page(page);
318                                 page_cache_release(page);
319                                 break;
320                         }
321                         /*
322                          * page was already under io and is now done, great
323                          */
324                         if (PageUptodate(page)) {
325                                 unlock_page(page);
326                                 goto fill_it;
327                         }
328
329 readpage:
330                         /*
331                          * need to read in the page
332                          */
333                         error = mapping->a_ops->readpage(in, page);
334
335                         if (unlikely(error)) {
336                                 page_cache_release(page);
337                                 if (error == AOP_TRUNCATED_PAGE)
338                                         goto find_page;
339                                 break;
340                         }
341
342                         /*
343                          * i_size must be checked after ->readpage().
344                          */
345                         isize = i_size_read(mapping->host);
346                         end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
347                         if (unlikely(!isize || index > end_index)) {
348                                 page_cache_release(page);
349                                 break;
350                         }
351
352                         /*
353                          * if this is the last page, see if we need to shrink
354                          * the length and stop
355                          */
356                         if (end_index == index) {
357                                 loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK);
358                                 if (bytes + loff > isize) {
359                                         page_cache_release(page);
360                                         break;
361                                 }
362                                 /*
363                                  * force quit after adding this page
364                                  */
365                                 nr_pages = i;
366                         }
367                 }
368 fill_it:
369                 pages[i] = page;
370                 bytes += PAGE_CACHE_SIZE - loff;
371                 loff = 0;
372         }
373
374         if (i)
375                 return move_to_pipe(pipe, pages, i, bytes, offset, flags);
376
377         return error;
378 }
379
380 /**
381  * generic_file_splice_read - splice data from file to a pipe
382  * @in:         file to splice from
383  * @pipe:       pipe to splice to
384  * @len:        number of bytes to splice
385  * @flags:      splice modifier flags
386  *
387  * Will read pages from given file and fill them into a pipe.
388  */
389 ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
390                                  struct pipe_inode_info *pipe, size_t len,
391                                  unsigned int flags)
392 {
393         ssize_t spliced;
394         int ret;
395
396         ret = 0;
397         spliced = 0;
398
399         while (len) {
400                 ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
401
402                 if (ret < 0)
403                         break;
404                 else if (!ret) {
405                         if (spliced)
406                                 break;
407                         if (flags & SPLICE_F_NONBLOCK) {
408                                 ret = -EAGAIN;
409                                 break;
410                         }
411                 }
412
413                 *ppos += ret;
414                 len -= ret;
415                 spliced += ret;
416         }
417
418         if (spliced)
419                 return spliced;
420
421         return ret;
422 }
423
424 EXPORT_SYMBOL(generic_file_splice_read);
425
426 /*
427  * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
428  * using sendpage().
429  */
430 static int pipe_to_sendpage(struct pipe_inode_info *info,
431                             struct pipe_buffer *buf, struct splice_desc *sd)
432 {
433         struct file *file = sd->file;
434         loff_t pos = sd->pos;
435         unsigned int offset;
436         ssize_t ret;
437         void *ptr;
438         int more;
439
440         /*
441          * Sub-optimal, but we are limited by the pipe ->map. We don't
442          * need a kmap'ed buffer here, we just want to make sure we
443          * have the page pinned if the pipe page originates from the
444          * page cache.
445          */
446         ptr = buf->ops->map(file, info, buf);
447         if (IS_ERR(ptr))
448                 return PTR_ERR(ptr);
449
450         offset = pos & ~PAGE_CACHE_MASK;
451         more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
452
453         ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
454
455         buf->ops->unmap(info, buf);
456         if (ret == sd->len)
457                 return 0;
458
459         return -EIO;
460 }
461
462 /*
463  * This is a little more tricky than the file -> pipe splicing. There are
464  * basically three cases:
465  *
466  *      - Destination page already exists in the address space and there
467  *        are users of it. For that case we have no other option that
468  *        copying the data. Tough luck.
469  *      - Destination page already exists in the address space, but there
470  *        are no users of it. Make sure it's uptodate, then drop it. Fall
471  *        through to last case.
472  *      - Destination page does not exist, we can add the pipe page to
473  *        the page cache and avoid the copy.
474  *
475  * If asked to move pages to the output file (SPLICE_F_MOVE is set in
476  * sd->flags), we attempt to migrate pages from the pipe to the output
477  * file address space page cache. This is possible if no one else has
478  * the pipe page referenced outside of the pipe and page cache. If
479  * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
480  * a new page in the output file page cache and fill/dirty that.
481  */
482 static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
483                         struct splice_desc *sd)
484 {
485         struct file *file = sd->file;
486         struct address_space *mapping = file->f_mapping;
487         gfp_t gfp_mask = mapping_gfp_mask(mapping);
488         unsigned int offset;
489         struct page *page;
490         pgoff_t index;
491         char *src;
492         int ret;
493
494         /*
495          * make sure the data in this buffer is uptodate
496          */
497         src = buf->ops->map(file, info, buf);
498         if (IS_ERR(src))
499                 return PTR_ERR(src);
500
501         index = sd->pos >> PAGE_CACHE_SHIFT;
502         offset = sd->pos & ~PAGE_CACHE_MASK;
503
504         /*
505          * Reuse buf page, if SPLICE_F_MOVE is set.
506          */
507         if (sd->flags & SPLICE_F_MOVE) {
508                 /*
509                  * If steal succeeds, buf->page is now pruned from the vm
510                  * side (LRU and page cache) and we can reuse it.
511                  */
512                 if (buf->ops->steal(info, buf))
513                         goto find_page;
514
515                 /*
516                  * this will also set the page locked
517                  */
518                 page = buf->page;
519                 if (add_to_page_cache(page, mapping, index, gfp_mask))
520                         goto find_page;
521
522                 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
523                         lru_cache_add(page);
524         } else {
525 find_page:
526                 ret = -ENOMEM;
527                 page = find_or_create_page(mapping, index, gfp_mask);
528                 if (!page)
529                         goto out_nomem;
530
531                 /*
532                  * If the page is uptodate, it is also locked. If it isn't
533                  * uptodate, we can mark it uptodate if we are filling the
534                  * full page. Otherwise we need to read it in first...
535                  */
536                 if (!PageUptodate(page)) {
537                         if (sd->len < PAGE_CACHE_SIZE) {
538                                 ret = mapping->a_ops->readpage(file, page);
539                                 if (unlikely(ret))
540                                         goto out;
541
542                                 lock_page(page);
543
544                                 if (!PageUptodate(page)) {
545                                         /*
546                                          * Page got invalidated, repeat.
547                                          */
548                                         if (!page->mapping) {
549                                                 unlock_page(page);
550                                                 page_cache_release(page);
551                                                 goto find_page;
552                                         }
553                                         ret = -EIO;
554                                         goto out;
555                                 }
556                         } else {
557                                 WARN_ON(!PageLocked(page));
558                                 SetPageUptodate(page);
559                         }
560                 }
561         }
562
563         ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
564         if (ret == AOP_TRUNCATED_PAGE) {
565                 page_cache_release(page);
566                 goto find_page;
567         } else if (ret)
568                 goto out;
569
570         if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
571                 char *dst = kmap_atomic(page, KM_USER0);
572
573                 memcpy(dst + offset, src + buf->offset, sd->len);
574                 flush_dcache_page(page);
575                 kunmap_atomic(dst, KM_USER0);
576         }
577
578         ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
579         if (ret == AOP_TRUNCATED_PAGE) {
580                 page_cache_release(page);
581                 goto find_page;
582         } else if (ret)
583                 goto out;
584
585         mark_page_accessed(page);
586         balance_dirty_pages_ratelimited(mapping);
587 out:
588         if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
589                 page_cache_release(page);
590                 unlock_page(page);
591         }
592 out_nomem:
593         buf->ops->unmap(info, buf);
594         return ret;
595 }
596
597 typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
598                            struct splice_desc *);
599
600 /*
601  * Pipe input worker. Most of this logic works like a regular pipe, the
602  * key here is the 'actor' worker passed in that actually moves the data
603  * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
604  */
605 static ssize_t move_from_pipe(struct pipe_inode_info *pipe, struct file *out,
606                               loff_t *ppos, size_t len, unsigned int flags,
607                               splice_actor *actor)
608 {
609         int ret, do_wakeup, err;
610         struct splice_desc sd;
611
612         ret = 0;
613         do_wakeup = 0;
614
615         sd.total_len = len;
616         sd.flags = flags;
617         sd.file = out;
618         sd.pos = *ppos;
619
620         if (pipe->inode)
621                 mutex_lock(&pipe->inode->i_mutex);
622
623         for (;;) {
624                 if (pipe->nrbufs) {
625                         struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
626                         struct pipe_buf_operations *ops = buf->ops;
627
628                         sd.len = buf->len;
629                         if (sd.len > sd.total_len)
630                                 sd.len = sd.total_len;
631
632                         err = actor(pipe, buf, &sd);
633                         if (err) {
634                                 if (!ret && err != -ENODATA)
635                                         ret = err;
636
637                                 break;
638                         }
639
640                         ret += sd.len;
641                         buf->offset += sd.len;
642                         buf->len -= sd.len;
643
644                         if (!buf->len) {
645                                 buf->ops = NULL;
646                                 ops->release(pipe, buf);
647                                 pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
648                                 pipe->nrbufs--;
649                                 if (pipe->inode)
650                                         do_wakeup = 1;
651                         }
652
653                         sd.pos += sd.len;
654                         sd.total_len -= sd.len;
655                         if (!sd.total_len)
656                                 break;
657                 }
658
659                 if (pipe->nrbufs)
660                         continue;
661                 if (!pipe->writers)
662                         break;
663                 if (!pipe->waiting_writers) {
664                         if (ret)
665                                 break;
666                 }
667
668                 if (flags & SPLICE_F_NONBLOCK) {
669                         if (!ret)
670                                 ret = -EAGAIN;
671                         break;
672                 }
673
674                 if (signal_pending(current)) {
675                         if (!ret)
676                                 ret = -ERESTARTSYS;
677                         break;
678                 }
679
680                 if (do_wakeup) {
681                         smp_mb();
682                         if (waitqueue_active(&pipe->wait))
683                                 wake_up_interruptible_sync(&pipe->wait);
684                         kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
685                         do_wakeup = 0;
686                 }
687
688                 pipe_wait(pipe);
689         }
690
691         if (pipe->inode)
692                 mutex_unlock(&pipe->inode->i_mutex);
693
694         if (do_wakeup) {
695                 smp_mb();
696                 if (waitqueue_active(&pipe->wait))
697                         wake_up_interruptible(&pipe->wait);
698                 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
699         }
700
701         return ret;
702 }
703
704 /**
705  * generic_file_splice_write - splice data from a pipe to a file
706  * @pipe:       pipe info
707  * @out:        file to write to
708  * @len:        number of bytes to splice
709  * @flags:      splice modifier flags
710  *
711  * Will either move or copy pages (determined by @flags options) from
712  * the given pipe inode to the given file.
713  *
714  */
715 ssize_t
716 generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
717                           loff_t *ppos, size_t len, unsigned int flags)
718 {
719         struct address_space *mapping = out->f_mapping;
720         ssize_t ret;
721
722         ret = move_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
723
724         /*
725          * If file or inode is SYNC and we actually wrote some data, sync it.
726          */
727         if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
728             && ret > 0) {
729                 struct inode *inode = mapping->host;
730                 int err;
731
732                 mutex_lock(&inode->i_mutex);
733                 err = generic_osync_inode(mapping->host, mapping,
734                                           OSYNC_METADATA|OSYNC_DATA);
735                 mutex_unlock(&inode->i_mutex);
736
737                 if (err)
738                         ret = err;
739         }
740
741         return ret;
742 }
743
744 EXPORT_SYMBOL(generic_file_splice_write);
745
746 /**
747  * generic_splice_sendpage - splice data from a pipe to a socket
748  * @inode:      pipe inode
749  * @out:        socket to write to
750  * @len:        number of bytes to splice
751  * @flags:      splice modifier flags
752  *
753  * Will send @len bytes from the pipe to a network socket. No data copying
754  * is involved.
755  *
756  */
757 ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
758                                 loff_t *ppos, size_t len, unsigned int flags)
759 {
760         return move_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
761 }
762
763 EXPORT_SYMBOL(generic_splice_sendpage);
764
765 /*
766  * Attempt to initiate a splice from pipe to file.
767  */
768 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
769                            loff_t *ppos, size_t len, unsigned int flags)
770 {
771         int ret;
772
773         if (unlikely(!out->f_op || !out->f_op->splice_write))
774                 return -EINVAL;
775
776         if (unlikely(!(out->f_mode & FMODE_WRITE)))
777                 return -EBADF;
778
779         ret = rw_verify_area(WRITE, out, ppos, len);
780         if (unlikely(ret < 0))
781                 return ret;
782
783         return out->f_op->splice_write(pipe, out, ppos, len, flags);
784 }
785
786 /*
787  * Attempt to initiate a splice from a file to a pipe.
788  */
789 static long do_splice_to(struct file *in, loff_t *ppos,
790                          struct pipe_inode_info *pipe, size_t len,
791                          unsigned int flags)
792 {
793         loff_t isize, left;
794         int ret;
795
796         if (unlikely(!in->f_op || !in->f_op->splice_read))
797                 return -EINVAL;
798
799         if (unlikely(!(in->f_mode & FMODE_READ)))
800                 return -EBADF;
801
802         ret = rw_verify_area(READ, in, ppos, len);
803         if (unlikely(ret < 0))
804                 return ret;
805
806         isize = i_size_read(in->f_mapping->host);
807         if (unlikely(*ppos >= isize))
808                 return 0;
809         
810         left = isize - *ppos;
811         if (unlikely(left < len))
812                 len = left;
813
814         return in->f_op->splice_read(in, ppos, pipe, len, flags);
815 }
816
817 long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
818                       size_t len, unsigned int flags)
819 {
820         struct pipe_inode_info *pipe;
821         long ret, bytes;
822         loff_t out_off;
823         umode_t i_mode;
824         int i;
825
826         /*
827          * We require the input being a regular file, as we don't want to
828          * randomly drop data for eg socket -> socket splicing. Use the
829          * piped splicing for that!
830          */
831         i_mode = in->f_dentry->d_inode->i_mode;
832         if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
833                 return -EINVAL;
834
835         /*
836          * neither in nor out is a pipe, setup an internal pipe attached to
837          * 'out' and transfer the wanted data from 'in' to 'out' through that
838          */
839         pipe = current->splice_pipe;
840         if (unlikely(!pipe)) {
841                 pipe = alloc_pipe_info(NULL);
842                 if (!pipe)
843                         return -ENOMEM;
844
845                 /*
846                  * We don't have an immediate reader, but we'll read the stuff
847                  * out of the pipe right after the move_to_pipe(). So set
848                  * PIPE_READERS appropriately.
849                  */
850                 pipe->readers = 1;
851
852                 current->splice_pipe = pipe;
853         }
854
855         /*
856          * Do the splice.
857          */
858         ret = 0;
859         bytes = 0;
860         out_off = 0;
861
862         while (len) {
863                 size_t read_len, max_read_len;
864
865                 /*
866                  * Do at most PIPE_BUFFERS pages worth of transfer:
867                  */
868                 max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
869
870                 ret = do_splice_to(in, ppos, pipe, max_read_len, flags);
871                 if (unlikely(ret < 0))
872                         goto out_release;
873
874                 read_len = ret;
875
876                 /*
877                  * NOTE: nonblocking mode only applies to the input. We
878                  * must not do the output in nonblocking mode as then we
879                  * could get stuck data in the internal pipe:
880                  */
881                 ret = do_splice_from(pipe, out, &out_off, read_len,
882                                      flags & ~SPLICE_F_NONBLOCK);
883                 if (unlikely(ret < 0))
884                         goto out_release;
885
886                 bytes += ret;
887                 len -= ret;
888
889                 /*
890                  * In nonblocking mode, if we got back a short read then
891                  * that was due to either an IO error or due to the
892                  * pagecache entry not being there. In the IO error case
893                  * the _next_ splice attempt will produce a clean IO error
894                  * return value (not a short read), so in both cases it's
895                  * correct to break out of the loop here:
896                  */
897                 if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
898                         break;
899         }
900
901         pipe->nrbufs = pipe->curbuf = 0;
902
903         return bytes;
904
905 out_release:
906         /*
907          * If we did an incomplete transfer we must release
908          * the pipe buffers in question:
909          */
910         for (i = 0; i < PIPE_BUFFERS; i++) {
911                 struct pipe_buffer *buf = pipe->bufs + i;
912
913                 if (buf->ops) {
914                         buf->ops->release(pipe, buf);
915                         buf->ops = NULL;
916                 }
917         }
918         pipe->nrbufs = pipe->curbuf = 0;
919
920         /*
921          * If we transferred some data, return the number of bytes:
922          */
923         if (bytes > 0)
924                 return bytes;
925
926         return ret;
927 }
928
929 EXPORT_SYMBOL(do_splice_direct);
930
931 /*
932  * Determine where to splice to/from.
933  */
934 static long do_splice(struct file *in, loff_t __user *off_in,
935                       struct file *out, loff_t __user *off_out,
936                       size_t len, unsigned int flags)
937 {
938         struct pipe_inode_info *pipe;
939         loff_t offset, *off;
940
941         pipe = in->f_dentry->d_inode->i_pipe;
942         if (pipe) {
943                 if (off_in)
944                         return -ESPIPE;
945                 if (off_out) {
946                         if (out->f_op->llseek == no_llseek)
947                                 return -EINVAL;
948                         if (copy_from_user(&offset, off_out, sizeof(loff_t)))
949                                 return -EFAULT;
950                         off = &offset;
951                 } else
952                         off = &out->f_pos;
953
954                 return do_splice_from(pipe, out, off, len, flags);
955         }
956
957         pipe = out->f_dentry->d_inode->i_pipe;
958         if (pipe) {
959                 if (off_out)
960                         return -ESPIPE;
961                 if (off_in) {
962                         if (in->f_op->llseek == no_llseek)
963                                 return -EINVAL;
964                         if (copy_from_user(&offset, off_in, sizeof(loff_t)))
965                                 return -EFAULT;
966                         off = &offset;
967                 } else
968                         off = &in->f_pos;
969
970                 return do_splice_to(in, off, pipe, len, flags);
971         }
972
973         return -EINVAL;
974 }
975
976 asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
977                            int fd_out, loff_t __user *off_out,
978                            size_t len, unsigned int flags)
979 {
980         long error;
981         struct file *in, *out;
982         int fput_in, fput_out;
983
984         if (unlikely(!len))
985                 return 0;
986
987         error = -EBADF;
988         in = fget_light(fd_in, &fput_in);
989         if (in) {
990                 if (in->f_mode & FMODE_READ) {
991                         out = fget_light(fd_out, &fput_out);
992                         if (out) {
993                                 if (out->f_mode & FMODE_WRITE)
994                                         error = do_splice(in, off_in,
995                                                           out, off_out,
996                                                           len, flags);
997                                 fput_light(out, fput_out);
998                         }
999                 }
1000
1001                 fput_light(in, fput_in);
1002         }
1003
1004         return error;
1005 }
1006
1007 /*
1008  * Link contents of ipipe to opipe.
1009  */
1010 static int link_pipe(struct pipe_inode_info *ipipe,
1011                      struct pipe_inode_info *opipe,
1012                      size_t len, unsigned int flags)
1013 {
1014         struct pipe_buffer *ibuf, *obuf;
1015         int ret = 0, do_wakeup = 0, i;
1016
1017         /*
1018          * Potential ABBA deadlock, work around it by ordering lock
1019          * grabbing by inode address. Otherwise two different processes
1020          * could deadlock (one doing tee from A -> B, the other from B -> A).
1021          */
1022         if (ipipe->inode < opipe->inode) {
1023                 mutex_lock(&ipipe->inode->i_mutex);
1024                 mutex_lock(&opipe->inode->i_mutex);
1025         } else {
1026                 mutex_lock(&opipe->inode->i_mutex);
1027                 mutex_lock(&ipipe->inode->i_mutex);
1028         }
1029
1030         for (i = 0;; i++) {
1031                 if (!opipe->readers) {
1032                         send_sig(SIGPIPE, current, 0);
1033                         if (!ret)
1034                                 ret = -EPIPE;
1035                         break;
1036                 }
1037                 if (ipipe->nrbufs - i) {
1038                         ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1));
1039
1040                         /*
1041                          * If we have room, fill this buffer
1042                          */
1043                         if (opipe->nrbufs < PIPE_BUFFERS) {
1044                                 int nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1);
1045
1046                                 /*
1047                                  * Get a reference to this pipe buffer,
1048                                  * so we can copy the contents over.
1049                                  */
1050                                 ibuf->ops->get(ipipe, ibuf);
1051
1052                                 obuf = opipe->bufs + nbuf;
1053                                 *obuf = *ibuf;
1054
1055                                 if (obuf->len > len)
1056                                         obuf->len = len;
1057
1058                                 opipe->nrbufs++;
1059                                 do_wakeup = 1;
1060                                 ret += obuf->len;
1061                                 len -= obuf->len;
1062
1063                                 if (!len)
1064                                         break;
1065                                 if (opipe->nrbufs < PIPE_BUFFERS)
1066                                         continue;
1067                         }
1068
1069                         /*
1070                          * We have input available, but no output room.
1071                          * If we already copied data, return that.
1072                          */
1073                         if (flags & SPLICE_F_NONBLOCK) {
1074                                 if (!ret)
1075                                         ret = -EAGAIN;
1076                                 break;
1077                         }
1078                         if (signal_pending(current)) {
1079                                 if (!ret)
1080                                         ret = -ERESTARTSYS;
1081                                 break;
1082                         }
1083                         if (do_wakeup) {
1084                                 smp_mb();
1085                                 if (waitqueue_active(&opipe->wait))
1086                                         wake_up_interruptible(&opipe->wait);
1087                                 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1088                                 do_wakeup = 0;
1089                         }
1090
1091                         opipe->waiting_writers++;
1092                         pipe_wait(opipe);
1093                         opipe->waiting_writers--;
1094                         continue;
1095                 }
1096
1097                 /*
1098                  * No input buffers, do the usual checks for available
1099                  * writers and blocking and wait if necessary
1100                  */
1101                 if (!ipipe->writers)
1102                         break;
1103                 if (!ipipe->waiting_writers) {
1104                         if (ret)
1105                                 break;
1106                 }
1107                 if (flags & SPLICE_F_NONBLOCK) {
1108                         if (!ret)
1109                                 ret = -EAGAIN;
1110                         break;
1111                 }
1112                 if (signal_pending(current)) {
1113                         if (!ret)
1114                                 ret = -ERESTARTSYS;
1115                         break;
1116                 }
1117
1118                 if (waitqueue_active(&ipipe->wait))
1119                         wake_up_interruptible_sync(&ipipe->wait);
1120                 kill_fasync(&ipipe->fasync_writers, SIGIO, POLL_OUT);
1121
1122                 pipe_wait(ipipe);
1123         }
1124
1125         mutex_unlock(&ipipe->inode->i_mutex);
1126         mutex_unlock(&opipe->inode->i_mutex);
1127
1128         if (do_wakeup) {
1129                 smp_mb();
1130                 if (waitqueue_active(&opipe->wait))
1131                         wake_up_interruptible(&opipe->wait);
1132                 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1133         }
1134
1135         return ret;
1136 }
1137
1138 /*
1139  * This is a tee(1) implementation that works on pipes. It doesn't copy
1140  * any data, it simply references the 'in' pages on the 'out' pipe.
1141  * The 'flags' used are the SPLICE_F_* variants, currently the only
1142  * applicable one is SPLICE_F_NONBLOCK.
1143  */
1144 static long do_tee(struct file *in, struct file *out, size_t len,
1145                    unsigned int flags)
1146 {
1147         struct pipe_inode_info *ipipe = in->f_dentry->d_inode->i_pipe;
1148         struct pipe_inode_info *opipe = out->f_dentry->d_inode->i_pipe;
1149
1150         /*
1151          * Link ipipe to the two output pipes, consuming as we go along.
1152          */
1153         if (ipipe && opipe)
1154                 return link_pipe(ipipe, opipe, len, flags);
1155
1156         return -EINVAL;
1157 }
1158
1159 asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags)
1160 {
1161         struct file *in;
1162         int error, fput_in;
1163
1164         if (unlikely(!len))
1165                 return 0;
1166
1167         error = -EBADF;
1168         in = fget_light(fdin, &fput_in);
1169         if (in) {
1170                 if (in->f_mode & FMODE_READ) {
1171                         int fput_out;
1172                         struct file *out = fget_light(fdout, &fput_out);
1173
1174                         if (out) {
1175                                 if (out->f_mode & FMODE_WRITE)
1176                                         error = do_tee(in, out, len, flags);
1177                                 fput_light(out, fput_out);
1178                         }
1179                 }
1180                 fput_light(in, fput_in);
1181         }
1182
1183         return error;
1184 }