[PATCH] splice: fix offset problems
[pandora-kernel.git] / fs / splice.c
1 /*
2  * "splice": joining two ropes together by interweaving their strands.
3  *
4  * This is the "extended pipe" functionality, where a pipe is used as
5  * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6  * buffer that you can use to transfer data from one end to the other.
7  *
8  * The traditional unix read/write is extended with a "splice()" operation
9  * that transfers data buffers to or from a pipe buffer.
10  *
11  * Named by Larry McVoy, original implementation from Linus, extended by
12  * Jens to support splicing to files, network, direct splicing, etc and
13  * fixing lots of bugs.
14  *
15  * Copyright (C) 2005-2006 Jens Axboe <axboe@suse.de>
16  * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17  * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
18  *
19  */
20 #include <linux/fs.h>
21 #include <linux/file.h>
22 #include <linux/pagemap.h>
23 #include <linux/pipe_fs_i.h>
24 #include <linux/mm_inline.h>
25 #include <linux/swap.h>
26 #include <linux/writeback.h>
27 #include <linux/buffer_head.h>
28 #include <linux/module.h>
29 #include <linux/syscalls.h>
30
31 /*
32  * Passed to the actors
33  */
34 struct splice_desc {
35         unsigned int len, total_len;    /* current and remaining length */
36         unsigned int flags;             /* splice flags */
37         struct file *file;              /* file to read/write */
38         loff_t pos;                     /* file position */
39 };
40
41 /*
42  * Attempt to steal a page from a pipe buffer. This should perhaps go into
43  * a vm helper function, it's already simplified quite a bit by the
44  * addition of remove_mapping(). If success is returned, the caller may
45  * attempt to reuse this page for another destination.
46  */
47 static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
48                                      struct pipe_buffer *buf)
49 {
50         struct page *page = buf->page;
51         struct address_space *mapping = page_mapping(page);
52
53         lock_page(page);
54
55         WARN_ON(!PageUptodate(page));
56
57         /*
58          * At least for ext2 with nobh option, we need to wait on writeback
59          * completing on this page, since we'll remove it from the pagecache.
60          * Otherwise truncate wont wait on the page, allowing the disk
61          * blocks to be reused by someone else before we actually wrote our
62          * data to them. fs corruption ensues.
63          */
64         wait_on_page_writeback(page);
65
66         if (PagePrivate(page))
67                 try_to_release_page(page, mapping_gfp_mask(mapping));
68
69         if (!remove_mapping(mapping, page)) {
70                 unlock_page(page);
71                 return 1;
72         }
73
74         buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
75         return 0;
76 }
77
78 static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
79                                         struct pipe_buffer *buf)
80 {
81         page_cache_release(buf->page);
82         buf->page = NULL;
83         buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
84 }
85
86 static void *page_cache_pipe_buf_map(struct file *file,
87                                      struct pipe_inode_info *info,
88                                      struct pipe_buffer *buf)
89 {
90         struct page *page = buf->page;
91         int err;
92
93         if (!PageUptodate(page)) {
94                 lock_page(page);
95
96                 /*
97                  * Page got truncated/unhashed. This will cause a 0-byte
98                  * splice, if this is the first page.
99                  */
100                 if (!page->mapping) {
101                         err = -ENODATA;
102                         goto error;
103                 }
104
105                 /*
106                  * Uh oh, read-error from disk.
107                  */
108                 if (!PageUptodate(page)) {
109                         err = -EIO;
110                         goto error;
111                 }
112
113                 /*
114                  * Page is ok afterall, fall through to mapping.
115                  */
116                 unlock_page(page);
117         }
118
119         return kmap(page);
120 error:
121         unlock_page(page);
122         return ERR_PTR(err);
123 }
124
125 static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
126                                       struct pipe_buffer *buf)
127 {
128         kunmap(buf->page);
129 }
130
131 static void page_cache_pipe_buf_get(struct pipe_inode_info *info,
132                                     struct pipe_buffer *buf)
133 {
134         page_cache_get(buf->page);
135 }
136
137 static struct pipe_buf_operations page_cache_pipe_buf_ops = {
138         .can_merge = 0,
139         .map = page_cache_pipe_buf_map,
140         .unmap = page_cache_pipe_buf_unmap,
141         .release = page_cache_pipe_buf_release,
142         .steal = page_cache_pipe_buf_steal,
143         .get = page_cache_pipe_buf_get,
144 };
145
146 /*
147  * Pipe output worker. This sets up our pipe format with the page cache
148  * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
149  */
150 static ssize_t move_to_pipe(struct pipe_inode_info *pipe, struct page **pages,
151                             int nr_pages, unsigned long len,
152                             unsigned int offset, unsigned int flags)
153 {
154         int ret, do_wakeup, i;
155
156         ret = 0;
157         do_wakeup = 0;
158         i = 0;
159
160         if (pipe->inode)
161                 mutex_lock(&pipe->inode->i_mutex);
162
163         for (;;) {
164                 if (!pipe->readers) {
165                         send_sig(SIGPIPE, current, 0);
166                         if (!ret)
167                                 ret = -EPIPE;
168                         break;
169                 }
170
171                 if (pipe->nrbufs < PIPE_BUFFERS) {
172                         int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
173                         struct pipe_buffer *buf = pipe->bufs + newbuf;
174                         struct page *page = pages[i++];
175                         unsigned long this_len;
176
177                         this_len = PAGE_CACHE_SIZE - offset;
178                         if (this_len > len)
179                                 this_len = len;
180
181                         buf->page = page;
182                         buf->offset = offset;
183                         buf->len = this_len;
184                         buf->ops = &page_cache_pipe_buf_ops;
185                         pipe->nrbufs++;
186                         if (pipe->inode)
187                                 do_wakeup = 1;
188
189                         ret += this_len;
190                         len -= this_len;
191                         offset = 0;
192                         if (!--nr_pages)
193                                 break;
194                         if (!len)
195                                 break;
196                         if (pipe->nrbufs < PIPE_BUFFERS)
197                                 continue;
198
199                         break;
200                 }
201
202                 if (flags & SPLICE_F_NONBLOCK) {
203                         if (!ret)
204                                 ret = -EAGAIN;
205                         break;
206                 }
207
208                 if (signal_pending(current)) {
209                         if (!ret)
210                                 ret = -ERESTARTSYS;
211                         break;
212                 }
213
214                 if (do_wakeup) {
215                         smp_mb();
216                         if (waitqueue_active(&pipe->wait))
217                                 wake_up_interruptible_sync(&pipe->wait);
218                         kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
219                         do_wakeup = 0;
220                 }
221
222                 pipe->waiting_writers++;
223                 pipe_wait(pipe);
224                 pipe->waiting_writers--;
225         }
226
227         if (pipe->inode)
228                 mutex_unlock(&pipe->inode->i_mutex);
229
230         if (do_wakeup) {
231                 smp_mb();
232                 if (waitqueue_active(&pipe->wait))
233                         wake_up_interruptible(&pipe->wait);
234                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
235         }
236
237         while (i < nr_pages)
238                 page_cache_release(pages[i++]);
239
240         return ret;
241 }
242
243 static int
244 __generic_file_splice_read(struct file *in, loff_t *ppos,
245                            struct pipe_inode_info *pipe, size_t len,
246                            unsigned int flags)
247 {
248         struct address_space *mapping = in->f_mapping;
249         unsigned int loff, offset, nr_pages;
250         struct page *pages[PIPE_BUFFERS];
251         struct page *page;
252         pgoff_t index, end_index;
253         loff_t isize;
254         size_t bytes;
255         int i, error;
256
257         index = *ppos >> PAGE_CACHE_SHIFT;
258         loff = offset = *ppos & ~PAGE_CACHE_MASK;
259         nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
260
261         if (nr_pages > PIPE_BUFFERS)
262                 nr_pages = PIPE_BUFFERS;
263
264         /*
265          * Initiate read-ahead on this page range. however, don't call into
266          * read-ahead if this is a non-zero offset (we are likely doing small
267          * chunk splice and the page is already there) for a single page.
268          */
269         if (!offset || nr_pages > 1)
270                 do_page_cache_readahead(mapping, in, index, nr_pages);
271
272         /*
273          * Now fill in the holes:
274          */
275         error = 0;
276         bytes = 0;
277         for (i = 0; i < nr_pages; i++, index++) {
278                 unsigned int this_len;
279
280                 if (!len)
281                         break;
282
283                 /*
284                  * this_len is the max we'll use from this page
285                  */
286                 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
287 find_page:
288                 /*
289                  * lookup the page for this index
290                  */
291                 page = find_get_page(mapping, index);
292                 if (!page) {
293                         /*
294                          * page didn't exist, allocate one
295                          */
296                         page = page_cache_alloc_cold(mapping);
297                         if (!page)
298                                 break;
299
300                         error = add_to_page_cache_lru(page, mapping, index,
301                                                 mapping_gfp_mask(mapping));
302                         if (unlikely(error)) {
303                                 page_cache_release(page);
304                                 break;
305                         }
306
307                         goto readpage;
308                 }
309
310                 /*
311                  * If the page isn't uptodate, we may need to start io on it
312                  */
313                 if (!PageUptodate(page)) {
314                         /*
315                          * If in nonblock mode then dont block on waiting
316                          * for an in-flight io page
317                          */
318                         if (flags & SPLICE_F_NONBLOCK)
319                                 break;
320
321                         lock_page(page);
322
323                         /*
324                          * page was truncated, stop here. if this isn't the
325                          * first page, we'll just complete what we already
326                          * added
327                          */
328                         if (!page->mapping) {
329                                 unlock_page(page);
330                                 page_cache_release(page);
331                                 break;
332                         }
333                         /*
334                          * page was already under io and is now done, great
335                          */
336                         if (PageUptodate(page)) {
337                                 unlock_page(page);
338                                 goto fill_it;
339                         }
340
341 readpage:
342                         /*
343                          * need to read in the page
344                          */
345                         error = mapping->a_ops->readpage(in, page);
346
347                         if (unlikely(error)) {
348                                 page_cache_release(page);
349                                 if (error == AOP_TRUNCATED_PAGE)
350                                         goto find_page;
351                                 break;
352                         }
353
354                         /*
355                          * i_size must be checked after ->readpage().
356                          */
357                         isize = i_size_read(mapping->host);
358                         end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
359                         if (unlikely(!isize || index > end_index)) {
360                                 page_cache_release(page);
361                                 break;
362                         }
363
364                         /*
365                          * if this is the last page, see if we need to shrink
366                          * the length and stop
367                          */
368                         if (end_index == index) {
369                                 loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK);
370                                 if (bytes + loff > isize) {
371                                         page_cache_release(page);
372                                         break;
373                                 }
374                                 /*
375                                  * force quit after adding this page
376                                  */
377                                 nr_pages = i;
378                                 this_len = min(this_len, loff);
379                         }
380                 }
381 fill_it:
382                 pages[i] = page;
383                 bytes += this_len;
384                 len -= this_len;
385                 loff = 0;
386         }
387
388         if (i)
389                 return move_to_pipe(pipe, pages, i, bytes, offset, flags);
390
391         return error;
392 }
393
394 /**
395  * generic_file_splice_read - splice data from file to a pipe
396  * @in:         file to splice from
397  * @pipe:       pipe to splice to
398  * @len:        number of bytes to splice
399  * @flags:      splice modifier flags
400  *
401  * Will read pages from given file and fill them into a pipe.
402  */
403 ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
404                                  struct pipe_inode_info *pipe, size_t len,
405                                  unsigned int flags)
406 {
407         ssize_t spliced;
408         int ret;
409
410         ret = 0;
411         spliced = 0;
412
413         while (len) {
414                 ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
415
416                 if (ret < 0)
417                         break;
418                 else if (!ret) {
419                         if (spliced)
420                                 break;
421                         if (flags & SPLICE_F_NONBLOCK) {
422                                 ret = -EAGAIN;
423                                 break;
424                         }
425                 }
426
427                 *ppos += ret;
428                 len -= ret;
429                 spliced += ret;
430         }
431
432         if (spliced)
433                 return spliced;
434
435         return ret;
436 }
437
438 EXPORT_SYMBOL(generic_file_splice_read);
439
440 /*
441  * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
442  * using sendpage(). Return the number of bytes sent.
443  */
444 static int pipe_to_sendpage(struct pipe_inode_info *info,
445                             struct pipe_buffer *buf, struct splice_desc *sd)
446 {
447         struct file *file = sd->file;
448         loff_t pos = sd->pos;
449         ssize_t ret;
450         void *ptr;
451         int more;
452
453         /*
454          * Sub-optimal, but we are limited by the pipe ->map. We don't
455          * need a kmap'ed buffer here, we just want to make sure we
456          * have the page pinned if the pipe page originates from the
457          * page cache.
458          */
459         ptr = buf->ops->map(file, info, buf);
460         if (IS_ERR(ptr))
461                 return PTR_ERR(ptr);
462
463         more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
464
465         ret = file->f_op->sendpage(file, buf->page, buf->offset, sd->len,
466                                    &pos, more);
467
468         buf->ops->unmap(info, buf);
469         return ret;
470 }
471
472 /*
473  * This is a little more tricky than the file -> pipe splicing. There are
474  * basically three cases:
475  *
476  *      - Destination page already exists in the address space and there
477  *        are users of it. For that case we have no other option that
478  *        copying the data. Tough luck.
479  *      - Destination page already exists in the address space, but there
480  *        are no users of it. Make sure it's uptodate, then drop it. Fall
481  *        through to last case.
482  *      - Destination page does not exist, we can add the pipe page to
483  *        the page cache and avoid the copy.
484  *
485  * If asked to move pages to the output file (SPLICE_F_MOVE is set in
486  * sd->flags), we attempt to migrate pages from the pipe to the output
487  * file address space page cache. This is possible if no one else has
488  * the pipe page referenced outside of the pipe and page cache. If
489  * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
490  * a new page in the output file page cache and fill/dirty that.
491  */
492 static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
493                         struct splice_desc *sd)
494 {
495         struct file *file = sd->file;
496         struct address_space *mapping = file->f_mapping;
497         gfp_t gfp_mask = mapping_gfp_mask(mapping);
498         unsigned int offset, this_len;
499         struct page *page;
500         pgoff_t index;
501         char *src;
502         int ret;
503
504         /*
505          * make sure the data in this buffer is uptodate
506          */
507         src = buf->ops->map(file, info, buf);
508         if (IS_ERR(src))
509                 return PTR_ERR(src);
510
511         index = sd->pos >> PAGE_CACHE_SHIFT;
512         offset = sd->pos & ~PAGE_CACHE_MASK;
513
514         this_len = sd->len;
515         if (this_len + offset > PAGE_CACHE_SIZE)
516                 this_len = PAGE_CACHE_SIZE - offset;
517
518         /*
519          * Reuse buf page, if SPLICE_F_MOVE is set.
520          */
521         if (sd->flags & SPLICE_F_MOVE) {
522                 /*
523                  * If steal succeeds, buf->page is now pruned from the vm
524                  * side (LRU and page cache) and we can reuse it. The page
525                  * will also be looked on successful return.
526                  */
527                 if (buf->ops->steal(info, buf))
528                         goto find_page;
529
530                 page = buf->page;
531                 if (add_to_page_cache(page, mapping, index, gfp_mask))
532                         goto find_page;
533
534                 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
535                         lru_cache_add(page);
536         } else {
537 find_page:
538                 page = find_lock_page(mapping, index);
539                 if (!page) {
540                         ret = -ENOMEM;
541                         page = page_cache_alloc_cold(mapping);
542                         if (unlikely(!page))
543                                 goto out_nomem;
544
545                         /*
546                          * This will also lock the page
547                          */
548                         ret = add_to_page_cache_lru(page, mapping, index,
549                                                     gfp_mask);
550                         if (unlikely(ret))
551                                 goto out;
552                 }
553
554                 /*
555                  * We get here with the page locked. If the page is also
556                  * uptodate, we don't need to do more. If it isn't, we
557                  * may need to bring it in if we are not going to overwrite
558                  * the full page.
559                  */
560                 if (!PageUptodate(page)) {
561                         if (this_len < PAGE_CACHE_SIZE) {
562                                 ret = mapping->a_ops->readpage(file, page);
563                                 if (unlikely(ret))
564                                         goto out;
565
566                                 lock_page(page);
567
568                                 if (!PageUptodate(page)) {
569                                         /*
570                                          * Page got invalidated, repeat.
571                                          */
572                                         if (!page->mapping) {
573                                                 unlock_page(page);
574                                                 page_cache_release(page);
575                                                 goto find_page;
576                                         }
577                                         ret = -EIO;
578                                         goto out;
579                                 }
580                         } else
581                                 SetPageUptodate(page);
582                 }
583         }
584
585         ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len);
586         if (ret == AOP_TRUNCATED_PAGE) {
587                 page_cache_release(page);
588                 goto find_page;
589         } else if (ret)
590                 goto out;
591
592         if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
593                 char *dst = kmap_atomic(page, KM_USER0);
594
595                 memcpy(dst + offset, src + buf->offset, this_len);
596                 flush_dcache_page(page);
597                 kunmap_atomic(dst, KM_USER0);
598         }
599
600         ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
601         if (ret == AOP_TRUNCATED_PAGE) {
602                 page_cache_release(page);
603                 goto find_page;
604         } else if (ret)
605                 goto out;
606
607         /*
608          * Return the number of bytes written.
609          */
610         ret = this_len;
611         mark_page_accessed(page);
612         balance_dirty_pages_ratelimited(mapping);
613 out:
614         if (!(buf->flags & PIPE_BUF_FLAG_STOLEN))
615                 page_cache_release(page);
616
617         unlock_page(page);
618 out_nomem:
619         buf->ops->unmap(info, buf);
620         return ret;
621 }
622
623 typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
624                            struct splice_desc *);
625
626 /*
627  * Pipe input worker. Most of this logic works like a regular pipe, the
628  * key here is the 'actor' worker passed in that actually moves the data
629  * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
630  */
631 static ssize_t move_from_pipe(struct pipe_inode_info *pipe, struct file *out,
632                               loff_t *ppos, size_t len, unsigned int flags,
633                               splice_actor *actor)
634 {
635         int ret, do_wakeup, err;
636         struct splice_desc sd;
637
638         ret = 0;
639         do_wakeup = 0;
640
641         sd.total_len = len;
642         sd.flags = flags;
643         sd.file = out;
644         sd.pos = *ppos;
645
646         if (pipe->inode)
647                 mutex_lock(&pipe->inode->i_mutex);
648
649         for (;;) {
650                 if (pipe->nrbufs) {
651                         struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
652                         struct pipe_buf_operations *ops = buf->ops;
653
654                         sd.len = buf->len;
655                         if (sd.len > sd.total_len)
656                                 sd.len = sd.total_len;
657
658                         err = actor(pipe, buf, &sd);
659                         if (err <= 0) {
660                                 if (!ret && err != -ENODATA)
661                                         ret = err;
662
663                                 break;
664                         }
665
666                         ret += err;
667                         buf->offset += err;
668                         buf->len -= err;
669
670                         sd.len -= err;
671                         sd.pos += err;
672                         sd.total_len -= err;
673                         if (sd.len)
674                                 continue;
675
676                         if (!buf->len) {
677                                 buf->ops = NULL;
678                                 ops->release(pipe, buf);
679                                 pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
680                                 pipe->nrbufs--;
681                                 if (pipe->inode)
682                                         do_wakeup = 1;
683                         }
684
685                         if (!sd.total_len)
686                                 break;
687                 }
688
689                 if (pipe->nrbufs)
690                         continue;
691                 if (!pipe->writers)
692                         break;
693                 if (!pipe->waiting_writers) {
694                         if (ret)
695                                 break;
696                 }
697
698                 if (flags & SPLICE_F_NONBLOCK) {
699                         if (!ret)
700                                 ret = -EAGAIN;
701                         break;
702                 }
703
704                 if (signal_pending(current)) {
705                         if (!ret)
706                                 ret = -ERESTARTSYS;
707                         break;
708                 }
709
710                 if (do_wakeup) {
711                         smp_mb();
712                         if (waitqueue_active(&pipe->wait))
713                                 wake_up_interruptible_sync(&pipe->wait);
714                         kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
715                         do_wakeup = 0;
716                 }
717
718                 pipe_wait(pipe);
719         }
720
721         if (pipe->inode)
722                 mutex_unlock(&pipe->inode->i_mutex);
723
724         if (do_wakeup) {
725                 smp_mb();
726                 if (waitqueue_active(&pipe->wait))
727                         wake_up_interruptible(&pipe->wait);
728                 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
729         }
730
731         return ret;
732 }
733
734 /**
735  * generic_file_splice_write - splice data from a pipe to a file
736  * @pipe:       pipe info
737  * @out:        file to write to
738  * @len:        number of bytes to splice
739  * @flags:      splice modifier flags
740  *
741  * Will either move or copy pages (determined by @flags options) from
742  * the given pipe inode to the given file.
743  *
744  */
745 ssize_t
746 generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
747                           loff_t *ppos, size_t len, unsigned int flags)
748 {
749         struct address_space *mapping = out->f_mapping;
750         ssize_t ret;
751
752         ret = move_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
753         if (ret > 0) {
754                 struct inode *inode = mapping->host;
755
756                 *ppos += ret;
757
758                 /*
759                  * If file or inode is SYNC and we actually wrote some data,
760                  * sync it.
761                  */
762                 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
763                         int err;
764
765                         mutex_lock(&inode->i_mutex);
766                         err = generic_osync_inode(inode, mapping,
767                                                   OSYNC_METADATA|OSYNC_DATA);
768                         mutex_unlock(&inode->i_mutex);
769
770                         if (err)
771                                 ret = err;
772                 }
773         }
774
775         return ret;
776 }
777
778 EXPORT_SYMBOL(generic_file_splice_write);
779
780 /**
781  * generic_splice_sendpage - splice data from a pipe to a socket
782  * @inode:      pipe inode
783  * @out:        socket to write to
784  * @len:        number of bytes to splice
785  * @flags:      splice modifier flags
786  *
787  * Will send @len bytes from the pipe to a network socket. No data copying
788  * is involved.
789  *
790  */
791 ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
792                                 loff_t *ppos, size_t len, unsigned int flags)
793 {
794         return move_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
795 }
796
797 EXPORT_SYMBOL(generic_splice_sendpage);
798
799 /*
800  * Attempt to initiate a splice from pipe to file.
801  */
802 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
803                            loff_t *ppos, size_t len, unsigned int flags)
804 {
805         int ret;
806
807         if (unlikely(!out->f_op || !out->f_op->splice_write))
808                 return -EINVAL;
809
810         if (unlikely(!(out->f_mode & FMODE_WRITE)))
811                 return -EBADF;
812
813         ret = rw_verify_area(WRITE, out, ppos, len);
814         if (unlikely(ret < 0))
815                 return ret;
816
817         return out->f_op->splice_write(pipe, out, ppos, len, flags);
818 }
819
820 /*
821  * Attempt to initiate a splice from a file to a pipe.
822  */
823 static long do_splice_to(struct file *in, loff_t *ppos,
824                          struct pipe_inode_info *pipe, size_t len,
825                          unsigned int flags)
826 {
827         loff_t isize, left;
828         int ret;
829
830         if (unlikely(!in->f_op || !in->f_op->splice_read))
831                 return -EINVAL;
832
833         if (unlikely(!(in->f_mode & FMODE_READ)))
834                 return -EBADF;
835
836         ret = rw_verify_area(READ, in, ppos, len);
837         if (unlikely(ret < 0))
838                 return ret;
839
840         isize = i_size_read(in->f_mapping->host);
841         if (unlikely(*ppos >= isize))
842                 return 0;
843         
844         left = isize - *ppos;
845         if (unlikely(left < len))
846                 len = left;
847
848         return in->f_op->splice_read(in, ppos, pipe, len, flags);
849 }
850
851 long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
852                       size_t len, unsigned int flags)
853 {
854         struct pipe_inode_info *pipe;
855         long ret, bytes;
856         loff_t out_off;
857         umode_t i_mode;
858         int i;
859
860         /*
861          * We require the input being a regular file, as we don't want to
862          * randomly drop data for eg socket -> socket splicing. Use the
863          * piped splicing for that!
864          */
865         i_mode = in->f_dentry->d_inode->i_mode;
866         if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
867                 return -EINVAL;
868
869         /*
870          * neither in nor out is a pipe, setup an internal pipe attached to
871          * 'out' and transfer the wanted data from 'in' to 'out' through that
872          */
873         pipe = current->splice_pipe;
874         if (unlikely(!pipe)) {
875                 pipe = alloc_pipe_info(NULL);
876                 if (!pipe)
877                         return -ENOMEM;
878
879                 /*
880                  * We don't have an immediate reader, but we'll read the stuff
881                  * out of the pipe right after the move_to_pipe(). So set
882                  * PIPE_READERS appropriately.
883                  */
884                 pipe->readers = 1;
885
886                 current->splice_pipe = pipe;
887         }
888
889         /*
890          * Do the splice.
891          */
892         ret = 0;
893         bytes = 0;
894         out_off = 0;
895
896         while (len) {
897                 size_t read_len, max_read_len;
898
899                 /*
900                  * Do at most PIPE_BUFFERS pages worth of transfer:
901                  */
902                 max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
903
904                 ret = do_splice_to(in, ppos, pipe, max_read_len, flags);
905                 if (unlikely(ret < 0))
906                         goto out_release;
907
908                 read_len = ret;
909
910                 /*
911                  * NOTE: nonblocking mode only applies to the input. We
912                  * must not do the output in nonblocking mode as then we
913                  * could get stuck data in the internal pipe:
914                  */
915                 ret = do_splice_from(pipe, out, &out_off, read_len,
916                                      flags & ~SPLICE_F_NONBLOCK);
917                 if (unlikely(ret < 0))
918                         goto out_release;
919
920                 bytes += ret;
921                 len -= ret;
922
923                 /*
924                  * In nonblocking mode, if we got back a short read then
925                  * that was due to either an IO error or due to the
926                  * pagecache entry not being there. In the IO error case
927                  * the _next_ splice attempt will produce a clean IO error
928                  * return value (not a short read), so in both cases it's
929                  * correct to break out of the loop here:
930                  */
931                 if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
932                         break;
933         }
934
935         pipe->nrbufs = pipe->curbuf = 0;
936
937         return bytes;
938
939 out_release:
940         /*
941          * If we did an incomplete transfer we must release
942          * the pipe buffers in question:
943          */
944         for (i = 0; i < PIPE_BUFFERS; i++) {
945                 struct pipe_buffer *buf = pipe->bufs + i;
946
947                 if (buf->ops) {
948                         buf->ops->release(pipe, buf);
949                         buf->ops = NULL;
950                 }
951         }
952         pipe->nrbufs = pipe->curbuf = 0;
953
954         /*
955          * If we transferred some data, return the number of bytes:
956          */
957         if (bytes > 0)
958                 return bytes;
959
960         return ret;
961 }
962
963 EXPORT_SYMBOL(do_splice_direct);
964
965 /*
966  * Determine where to splice to/from.
967  */
968 static long do_splice(struct file *in, loff_t __user *off_in,
969                       struct file *out, loff_t __user *off_out,
970                       size_t len, unsigned int flags)
971 {
972         struct pipe_inode_info *pipe;
973         loff_t offset, *off;
974         long ret;
975
976         pipe = in->f_dentry->d_inode->i_pipe;
977         if (pipe) {
978                 if (off_in)
979                         return -ESPIPE;
980                 if (off_out) {
981                         if (out->f_op->llseek == no_llseek)
982                                 return -EINVAL;
983                         if (copy_from_user(&offset, off_out, sizeof(loff_t)))
984                                 return -EFAULT;
985                         off = &offset;
986                 } else
987                         off = &out->f_pos;
988
989                 ret = do_splice_from(pipe, out, off, len, flags);
990
991                 if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
992                         ret = -EFAULT;
993
994                 return ret;
995         }
996
997         pipe = out->f_dentry->d_inode->i_pipe;
998         if (pipe) {
999                 if (off_out)
1000                         return -ESPIPE;
1001                 if (off_in) {
1002                         if (in->f_op->llseek == no_llseek)
1003                                 return -EINVAL;
1004                         if (copy_from_user(&offset, off_in, sizeof(loff_t)))
1005                                 return -EFAULT;
1006                         off = &offset;
1007                 } else
1008                         off = &in->f_pos;
1009
1010                 ret = do_splice_to(in, off, pipe, len, flags);
1011
1012                 if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
1013                         ret = -EFAULT;
1014
1015                 return ret;
1016         }
1017
1018         return -EINVAL;
1019 }
1020
1021 asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
1022                            int fd_out, loff_t __user *off_out,
1023                            size_t len, unsigned int flags)
1024 {
1025         long error;
1026         struct file *in, *out;
1027         int fput_in, fput_out;
1028
1029         if (unlikely(!len))
1030                 return 0;
1031
1032         error = -EBADF;
1033         in = fget_light(fd_in, &fput_in);
1034         if (in) {
1035                 if (in->f_mode & FMODE_READ) {
1036                         out = fget_light(fd_out, &fput_out);
1037                         if (out) {
1038                                 if (out->f_mode & FMODE_WRITE)
1039                                         error = do_splice(in, off_in,
1040                                                           out, off_out,
1041                                                           len, flags);
1042                                 fput_light(out, fput_out);
1043                         }
1044                 }
1045
1046                 fput_light(in, fput_in);
1047         }
1048
1049         return error;
1050 }
1051
1052 /*
1053  * Link contents of ipipe to opipe.
1054  */
1055 static int link_pipe(struct pipe_inode_info *ipipe,
1056                      struct pipe_inode_info *opipe,
1057                      size_t len, unsigned int flags)
1058 {
1059         struct pipe_buffer *ibuf, *obuf;
1060         int ret, do_wakeup, i, ipipe_first;
1061
1062         ret = do_wakeup = ipipe_first = 0;
1063
1064         /*
1065          * Potential ABBA deadlock, work around it by ordering lock
1066          * grabbing by inode address. Otherwise two different processes
1067          * could deadlock (one doing tee from A -> B, the other from B -> A).
1068          */
1069         if (ipipe->inode < opipe->inode) {
1070                 ipipe_first = 1;
1071                 mutex_lock(&ipipe->inode->i_mutex);
1072                 mutex_lock(&opipe->inode->i_mutex);
1073         } else {
1074                 mutex_lock(&opipe->inode->i_mutex);
1075                 mutex_lock(&ipipe->inode->i_mutex);
1076         }
1077
1078         for (i = 0;; i++) {
1079                 if (!opipe->readers) {
1080                         send_sig(SIGPIPE, current, 0);
1081                         if (!ret)
1082                                 ret = -EPIPE;
1083                         break;
1084                 }
1085                 if (ipipe->nrbufs - i) {
1086                         ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1));
1087
1088                         /*
1089                          * If we have room, fill this buffer
1090                          */
1091                         if (opipe->nrbufs < PIPE_BUFFERS) {
1092                                 int nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1);
1093
1094                                 /*
1095                                  * Get a reference to this pipe buffer,
1096                                  * so we can copy the contents over.
1097                                  */
1098                                 ibuf->ops->get(ipipe, ibuf);
1099
1100                                 obuf = opipe->bufs + nbuf;
1101                                 *obuf = *ibuf;
1102
1103                                 if (obuf->len > len)
1104                                         obuf->len = len;
1105
1106                                 opipe->nrbufs++;
1107                                 do_wakeup = 1;
1108                                 ret += obuf->len;
1109                                 len -= obuf->len;
1110
1111                                 if (!len)
1112                                         break;
1113                                 if (opipe->nrbufs < PIPE_BUFFERS)
1114                                         continue;
1115                         }
1116
1117                         /*
1118                          * We have input available, but no output room.
1119                          * If we already copied data, return that. If we
1120                          * need to drop the opipe lock, it must be ordered
1121                          * last to avoid deadlocks.
1122                          */
1123                         if ((flags & SPLICE_F_NONBLOCK) || !ipipe_first) {
1124                                 if (!ret)
1125                                         ret = -EAGAIN;
1126                                 break;
1127                         }
1128                         if (signal_pending(current)) {
1129                                 if (!ret)
1130                                         ret = -ERESTARTSYS;
1131                                 break;
1132                         }
1133                         if (do_wakeup) {
1134                                 smp_mb();
1135                                 if (waitqueue_active(&opipe->wait))
1136                                         wake_up_interruptible(&opipe->wait);
1137                                 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1138                                 do_wakeup = 0;
1139                         }
1140
1141                         opipe->waiting_writers++;
1142                         pipe_wait(opipe);
1143                         opipe->waiting_writers--;
1144                         continue;
1145                 }
1146
1147                 /*
1148                  * No input buffers, do the usual checks for available
1149                  * writers and blocking and wait if necessary
1150                  */
1151                 if (!ipipe->writers)
1152                         break;
1153                 if (!ipipe->waiting_writers) {
1154                         if (ret)
1155                                 break;
1156                 }
1157                 /*
1158                  * pipe_wait() drops the ipipe mutex. To avoid deadlocks
1159                  * with another process, we can only safely do that if
1160                  * the ipipe lock is ordered last.
1161                  */
1162                 if ((flags & SPLICE_F_NONBLOCK) || ipipe_first) {
1163                         if (!ret)
1164                                 ret = -EAGAIN;
1165                         break;
1166                 }
1167                 if (signal_pending(current)) {
1168                         if (!ret)
1169                                 ret = -ERESTARTSYS;
1170                         break;
1171                 }
1172
1173                 if (waitqueue_active(&ipipe->wait))
1174                         wake_up_interruptible_sync(&ipipe->wait);
1175                 kill_fasync(&ipipe->fasync_writers, SIGIO, POLL_OUT);
1176
1177                 pipe_wait(ipipe);
1178         }
1179
1180         mutex_unlock(&ipipe->inode->i_mutex);
1181         mutex_unlock(&opipe->inode->i_mutex);
1182
1183         if (do_wakeup) {
1184                 smp_mb();
1185                 if (waitqueue_active(&opipe->wait))
1186                         wake_up_interruptible(&opipe->wait);
1187                 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1188         }
1189
1190         return ret;
1191 }
1192
1193 /*
1194  * This is a tee(1) implementation that works on pipes. It doesn't copy
1195  * any data, it simply references the 'in' pages on the 'out' pipe.
1196  * The 'flags' used are the SPLICE_F_* variants, currently the only
1197  * applicable one is SPLICE_F_NONBLOCK.
1198  */
1199 static long do_tee(struct file *in, struct file *out, size_t len,
1200                    unsigned int flags)
1201 {
1202         struct pipe_inode_info *ipipe = in->f_dentry->d_inode->i_pipe;
1203         struct pipe_inode_info *opipe = out->f_dentry->d_inode->i_pipe;
1204
1205         /*
1206          * Link ipipe to the two output pipes, consuming as we go along.
1207          */
1208         if (ipipe && opipe)
1209                 return link_pipe(ipipe, opipe, len, flags);
1210
1211         return -EINVAL;
1212 }
1213
1214 asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags)
1215 {
1216         struct file *in;
1217         int error, fput_in;
1218
1219         if (unlikely(!len))
1220                 return 0;
1221
1222         error = -EBADF;
1223         in = fget_light(fdin, &fput_in);
1224         if (in) {
1225                 if (in->f_mode & FMODE_READ) {
1226                         int fput_out;
1227                         struct file *out = fget_light(fdout, &fput_out);
1228
1229                         if (out) {
1230                                 if (out->f_mode & FMODE_WRITE)
1231                                         error = do_tee(in, out, len, flags);
1232                                 fput_light(out, fput_out);
1233                         }
1234                 }
1235                 fput_light(in, fput_in);
1236         }
1237
1238         return error;
1239 }