[PATCH] splice: offset fixes
[pandora-kernel.git] / fs / splice.c
1 /*
2  * "splice": joining two ropes together by interweaving their strands.
3  *
4  * This is the "extended pipe" functionality, where a pipe is used as
5  * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6  * buffer that you can use to transfer data from one end to the other.
7  *
8  * The traditional unix read/write is extended with a "splice()" operation
9  * that transfers data buffers to or from a pipe buffer.
10  *
11  * Named by Larry McVoy, original implementation from Linus, extended by
12  * Jens to support splicing to files, network, direct splicing, etc and
13  * fixing lots of bugs.
14  *
15  * Copyright (C) 2005-2006 Jens Axboe <axboe@suse.de>
16  * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17  * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
18  *
19  */
20 #include <linux/fs.h>
21 #include <linux/file.h>
22 #include <linux/pagemap.h>
23 #include <linux/pipe_fs_i.h>
24 #include <linux/mm_inline.h>
25 #include <linux/swap.h>
26 #include <linux/writeback.h>
27 #include <linux/buffer_head.h>
28 #include <linux/module.h>
29 #include <linux/syscalls.h>
30
31 /*
32  * Passed to the actors
33  */
34 struct splice_desc {
35         unsigned int len, total_len;    /* current and remaining length */
36         unsigned int flags;             /* splice flags */
37         struct file *file;              /* file to read/write */
38         loff_t pos;                     /* file position */
39 };
40
41 /*
42  * Attempt to steal a page from a pipe buffer. This should perhaps go into
43  * a vm helper function, it's already simplified quite a bit by the
44  * addition of remove_mapping(). If success is returned, the caller may
45  * attempt to reuse this page for another destination.
46  */
47 static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
48                                      struct pipe_buffer *buf)
49 {
50         struct page *page = buf->page;
51         struct address_space *mapping = page_mapping(page);
52
53         WARN_ON(!PageLocked(page));
54         WARN_ON(!PageUptodate(page));
55
56         /*
57          * At least for ext2 with nobh option, we need to wait on writeback
58          * completing on this page, since we'll remove it from the pagecache.
59          * Otherwise truncate wont wait on the page, allowing the disk
60          * blocks to be reused by someone else before we actually wrote our
61          * data to them. fs corruption ensues.
62          */
63         wait_on_page_writeback(page);
64
65         if (PagePrivate(page))
66                 try_to_release_page(page, mapping_gfp_mask(mapping));
67
68         if (!remove_mapping(mapping, page))
69                 return 1;
70
71         buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
72         return 0;
73 }
74
75 static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
76                                         struct pipe_buffer *buf)
77 {
78         page_cache_release(buf->page);
79         buf->page = NULL;
80         buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
81 }
82
83 static void *page_cache_pipe_buf_map(struct file *file,
84                                      struct pipe_inode_info *info,
85                                      struct pipe_buffer *buf)
86 {
87         struct page *page = buf->page;
88         int err;
89
90         if (!PageUptodate(page)) {
91                 lock_page(page);
92
93                 /*
94                  * Page got truncated/unhashed. This will cause a 0-byte
95                  * splice, if this is the first page.
96                  */
97                 if (!page->mapping) {
98                         err = -ENODATA;
99                         goto error;
100                 }
101
102                 /*
103                  * Uh oh, read-error from disk.
104                  */
105                 if (!PageUptodate(page)) {
106                         err = -EIO;
107                         goto error;
108                 }
109
110                 /*
111                  * Page is ok afterall, fall through to mapping.
112                  */
113                 unlock_page(page);
114         }
115
116         return kmap(page);
117 error:
118         unlock_page(page);
119         return ERR_PTR(err);
120 }
121
122 static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
123                                       struct pipe_buffer *buf)
124 {
125         kunmap(buf->page);
126 }
127
128 static void page_cache_pipe_buf_get(struct pipe_inode_info *info,
129                                     struct pipe_buffer *buf)
130 {
131         page_cache_get(buf->page);
132 }
133
134 static struct pipe_buf_operations page_cache_pipe_buf_ops = {
135         .can_merge = 0,
136         .map = page_cache_pipe_buf_map,
137         .unmap = page_cache_pipe_buf_unmap,
138         .release = page_cache_pipe_buf_release,
139         .steal = page_cache_pipe_buf_steal,
140         .get = page_cache_pipe_buf_get,
141 };
142
143 /*
144  * Pipe output worker. This sets up our pipe format with the page cache
145  * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
146  */
147 static ssize_t move_to_pipe(struct pipe_inode_info *pipe, struct page **pages,
148                             int nr_pages, unsigned long len,
149                             unsigned int offset, unsigned int flags)
150 {
151         int ret, do_wakeup, i;
152
153         ret = 0;
154         do_wakeup = 0;
155         i = 0;
156
157         if (pipe->inode)
158                 mutex_lock(&pipe->inode->i_mutex);
159
160         for (;;) {
161                 if (!pipe->readers) {
162                         send_sig(SIGPIPE, current, 0);
163                         if (!ret)
164                                 ret = -EPIPE;
165                         break;
166                 }
167
168                 if (pipe->nrbufs < PIPE_BUFFERS) {
169                         int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
170                         struct pipe_buffer *buf = pipe->bufs + newbuf;
171                         struct page *page = pages[i++];
172                         unsigned long this_len;
173
174                         this_len = PAGE_CACHE_SIZE - offset;
175                         if (this_len > len)
176                                 this_len = len;
177
178                         buf->page = page;
179                         buf->offset = offset;
180                         buf->len = this_len;
181                         buf->ops = &page_cache_pipe_buf_ops;
182                         pipe->nrbufs++;
183                         if (pipe->inode)
184                                 do_wakeup = 1;
185
186                         ret += this_len;
187                         len -= this_len;
188                         offset = 0;
189                         if (!--nr_pages)
190                                 break;
191                         if (!len)
192                                 break;
193                         if (pipe->nrbufs < PIPE_BUFFERS)
194                                 continue;
195
196                         break;
197                 }
198
199                 if (flags & SPLICE_F_NONBLOCK) {
200                         if (!ret)
201                                 ret = -EAGAIN;
202                         break;
203                 }
204
205                 if (signal_pending(current)) {
206                         if (!ret)
207                                 ret = -ERESTARTSYS;
208                         break;
209                 }
210
211                 if (do_wakeup) {
212                         smp_mb();
213                         if (waitqueue_active(&pipe->wait))
214                                 wake_up_interruptible_sync(&pipe->wait);
215                         kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
216                         do_wakeup = 0;
217                 }
218
219                 pipe->waiting_writers++;
220                 pipe_wait(pipe);
221                 pipe->waiting_writers--;
222         }
223
224         if (pipe->inode)
225                 mutex_unlock(&pipe->inode->i_mutex);
226
227         if (do_wakeup) {
228                 smp_mb();
229                 if (waitqueue_active(&pipe->wait))
230                         wake_up_interruptible(&pipe->wait);
231                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
232         }
233
234         while (i < nr_pages)
235                 page_cache_release(pages[i++]);
236
237         return ret;
238 }
239
240 static int
241 __generic_file_splice_read(struct file *in, loff_t *ppos,
242                            struct pipe_inode_info *pipe, size_t len,
243                            unsigned int flags)
244 {
245         struct address_space *mapping = in->f_mapping;
246         unsigned int loff, offset, nr_pages;
247         struct page *pages[PIPE_BUFFERS];
248         struct page *page;
249         pgoff_t index, end_index;
250         loff_t isize;
251         size_t bytes;
252         int i, error;
253
254         index = *ppos >> PAGE_CACHE_SHIFT;
255         loff = offset = *ppos & ~PAGE_CACHE_MASK;
256         nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
257
258         if (nr_pages > PIPE_BUFFERS)
259                 nr_pages = PIPE_BUFFERS;
260
261         /*
262          * Initiate read-ahead on this page range. however, don't call into
263          * read-ahead if this is a non-zero offset (we are likely doing small
264          * chunk splice and the page is already there) for a single page.
265          */
266         if (!offset || nr_pages > 1)
267                 do_page_cache_readahead(mapping, in, index, nr_pages);
268
269         /*
270          * Now fill in the holes:
271          */
272         error = 0;
273         bytes = 0;
274         for (i = 0; i < nr_pages; i++, index++) {
275 find_page:
276                 /*
277                  * lookup the page for this index
278                  */
279                 page = find_get_page(mapping, index);
280                 if (!page) {
281                         /*
282                          * page didn't exist, allocate one
283                          */
284                         page = page_cache_alloc_cold(mapping);
285                         if (!page)
286                                 break;
287
288                         error = add_to_page_cache_lru(page, mapping, index,
289                                                 mapping_gfp_mask(mapping));
290                         if (unlikely(error)) {
291                                 page_cache_release(page);
292                                 break;
293                         }
294
295                         goto readpage;
296                 }
297
298                 /*
299                  * If the page isn't uptodate, we may need to start io on it
300                  */
301                 if (!PageUptodate(page)) {
302                         /*
303                          * If in nonblock mode then dont block on waiting
304                          * for an in-flight io page
305                          */
306                         if (flags & SPLICE_F_NONBLOCK)
307                                 break;
308
309                         lock_page(page);
310
311                         /*
312                          * page was truncated, stop here. if this isn't the
313                          * first page, we'll just complete what we already
314                          * added
315                          */
316                         if (!page->mapping) {
317                                 unlock_page(page);
318                                 page_cache_release(page);
319                                 break;
320                         }
321                         /*
322                          * page was already under io and is now done, great
323                          */
324                         if (PageUptodate(page)) {
325                                 unlock_page(page);
326                                 goto fill_it;
327                         }
328
329 readpage:
330                         /*
331                          * need to read in the page
332                          */
333                         error = mapping->a_ops->readpage(in, page);
334
335                         if (unlikely(error)) {
336                                 page_cache_release(page);
337                                 if (error == AOP_TRUNCATED_PAGE)
338                                         goto find_page;
339                                 break;
340                         }
341
342                         /*
343                          * i_size must be checked after ->readpage().
344                          */
345                         isize = i_size_read(mapping->host);
346                         end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
347                         if (unlikely(!isize || index > end_index)) {
348                                 page_cache_release(page);
349                                 break;
350                         }
351
352                         /*
353                          * if this is the last page, see if we need to shrink
354                          * the length and stop
355                          */
356                         if (end_index == index) {
357                                 loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK);
358                                 if (bytes + loff > isize) {
359                                         page_cache_release(page);
360                                         break;
361                                 }
362                                 /*
363                                  * force quit after adding this page
364                                  */
365                                 nr_pages = i;
366                         }
367                 }
368 fill_it:
369                 pages[i] = page;
370                 bytes += PAGE_CACHE_SIZE - loff;
371                 loff = 0;
372         }
373
374         if (i)
375                 return move_to_pipe(pipe, pages, i, bytes, offset, flags);
376
377         return error;
378 }
379
380 /**
381  * generic_file_splice_read - splice data from file to a pipe
382  * @in:         file to splice from
383  * @pipe:       pipe to splice to
384  * @len:        number of bytes to splice
385  * @flags:      splice modifier flags
386  *
387  * Will read pages from given file and fill them into a pipe.
388  */
389 ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
390                                  struct pipe_inode_info *pipe, size_t len,
391                                  unsigned int flags)
392 {
393         ssize_t spliced;
394         int ret;
395
396         ret = 0;
397         spliced = 0;
398
399         while (len) {
400                 ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
401
402                 if (ret < 0)
403                         break;
404                 else if (!ret) {
405                         if (spliced)
406                                 break;
407                         if (flags & SPLICE_F_NONBLOCK) {
408                                 ret = -EAGAIN;
409                                 break;
410                         }
411                 }
412
413                 *ppos += ret;
414                 len -= ret;
415                 spliced += ret;
416         }
417
418         if (spliced)
419                 return spliced;
420
421         return ret;
422 }
423
424 EXPORT_SYMBOL(generic_file_splice_read);
425
426 /*
427  * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
428  * using sendpage().
429  */
430 static int pipe_to_sendpage(struct pipe_inode_info *info,
431                             struct pipe_buffer *buf, struct splice_desc *sd)
432 {
433         struct file *file = sd->file;
434         loff_t pos = sd->pos;
435         unsigned int offset;
436         ssize_t ret;
437         void *ptr;
438         int more;
439
440         /*
441          * Sub-optimal, but we are limited by the pipe ->map. We don't
442          * need a kmap'ed buffer here, we just want to make sure we
443          * have the page pinned if the pipe page originates from the
444          * page cache.
445          */
446         ptr = buf->ops->map(file, info, buf);
447         if (IS_ERR(ptr))
448                 return PTR_ERR(ptr);
449
450         offset = pos & ~PAGE_CACHE_MASK;
451         more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
452
453         ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
454
455         buf->ops->unmap(info, buf);
456         if (ret == sd->len)
457                 return 0;
458
459         return -EIO;
460 }
461
462 /*
463  * This is a little more tricky than the file -> pipe splicing. There are
464  * basically three cases:
465  *
466  *      - Destination page already exists in the address space and there
467  *        are users of it. For that case we have no other option that
468  *        copying the data. Tough luck.
469  *      - Destination page already exists in the address space, but there
470  *        are no users of it. Make sure it's uptodate, then drop it. Fall
471  *        through to last case.
472  *      - Destination page does not exist, we can add the pipe page to
473  *        the page cache and avoid the copy.
474  *
475  * If asked to move pages to the output file (SPLICE_F_MOVE is set in
476  * sd->flags), we attempt to migrate pages from the pipe to the output
477  * file address space page cache. This is possible if no one else has
478  * the pipe page referenced outside of the pipe and page cache. If
479  * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
480  * a new page in the output file page cache and fill/dirty that.
481  */
482 static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
483                         struct splice_desc *sd)
484 {
485         struct file *file = sd->file;
486         struct address_space *mapping = file->f_mapping;
487         gfp_t gfp_mask = mapping_gfp_mask(mapping);
488         unsigned int offset;
489         struct page *page;
490         pgoff_t index;
491         char *src;
492         int ret;
493
494         /*
495          * make sure the data in this buffer is uptodate
496          */
497         src = buf->ops->map(file, info, buf);
498         if (IS_ERR(src))
499                 return PTR_ERR(src);
500
501         index = sd->pos >> PAGE_CACHE_SHIFT;
502         offset = sd->pos & ~PAGE_CACHE_MASK;
503
504         /*
505          * Reuse buf page, if SPLICE_F_MOVE is set.
506          */
507         if (sd->flags & SPLICE_F_MOVE) {
508                 /*
509                  * If steal succeeds, buf->page is now pruned from the vm
510                  * side (LRU and page cache) and we can reuse it.
511                  */
512                 if (buf->ops->steal(info, buf))
513                         goto find_page;
514
515                 /*
516                  * this will also set the page locked
517                  */
518                 page = buf->page;
519                 if (add_to_page_cache(page, mapping, index, gfp_mask))
520                         goto find_page;
521
522                 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
523                         lru_cache_add(page);
524         } else {
525 find_page:
526                 ret = -ENOMEM;
527                 page = find_or_create_page(mapping, index, gfp_mask);
528                 if (!page)
529                         goto out_nomem;
530
531                 /*
532                  * If the page is uptodate, it is also locked. If it isn't
533                  * uptodate, we can mark it uptodate if we are filling the
534                  * full page. Otherwise we need to read it in first...
535                  */
536                 if (!PageUptodate(page)) {
537                         if (sd->len < PAGE_CACHE_SIZE) {
538                                 ret = mapping->a_ops->readpage(file, page);
539                                 if (unlikely(ret))
540                                         goto out;
541
542                                 lock_page(page);
543
544                                 if (!PageUptodate(page)) {
545                                         /*
546                                          * Page got invalidated, repeat.
547                                          */
548                                         if (!page->mapping) {
549                                                 unlock_page(page);
550                                                 page_cache_release(page);
551                                                 goto find_page;
552                                         }
553                                         ret = -EIO;
554                                         goto out;
555                                 }
556                         } else {
557                                 WARN_ON(!PageLocked(page));
558                                 SetPageUptodate(page);
559                         }
560                 }
561         }
562
563         ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
564         if (ret == AOP_TRUNCATED_PAGE) {
565                 page_cache_release(page);
566                 goto find_page;
567         } else if (ret)
568                 goto out;
569
570         if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
571                 char *dst = kmap_atomic(page, KM_USER0);
572
573                 memcpy(dst + offset, src + buf->offset, sd->len);
574                 flush_dcache_page(page);
575                 kunmap_atomic(dst, KM_USER0);
576         }
577
578         ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
579         if (ret == AOP_TRUNCATED_PAGE) {
580                 page_cache_release(page);
581                 goto find_page;
582         } else if (ret)
583                 goto out;
584
585         mark_page_accessed(page);
586         balance_dirty_pages_ratelimited(mapping);
587 out:
588         if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
589                 page_cache_release(page);
590                 unlock_page(page);
591         }
592 out_nomem:
593         buf->ops->unmap(info, buf);
594         return ret;
595 }
596
597 typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
598                            struct splice_desc *);
599
600 /*
601  * Pipe input worker. Most of this logic works like a regular pipe, the
602  * key here is the 'actor' worker passed in that actually moves the data
603  * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
604  */
605 static ssize_t move_from_pipe(struct pipe_inode_info *pipe, struct file *out,
606                               loff_t *ppos, size_t len, unsigned int flags,
607                               splice_actor *actor)
608 {
609         int ret, do_wakeup, err;
610         struct splice_desc sd;
611
612         ret = 0;
613         do_wakeup = 0;
614
615         sd.total_len = len;
616         sd.flags = flags;
617         sd.file = out;
618         sd.pos = *ppos;
619
620         if (pipe->inode)
621                 mutex_lock(&pipe->inode->i_mutex);
622
623         for (;;) {
624                 if (pipe->nrbufs) {
625                         struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
626                         struct pipe_buf_operations *ops = buf->ops;
627
628                         sd.len = buf->len;
629                         if (sd.len > sd.total_len)
630                                 sd.len = sd.total_len;
631
632                         err = actor(pipe, buf, &sd);
633                         if (err) {
634                                 if (!ret && err != -ENODATA)
635                                         ret = err;
636
637                                 break;
638                         }
639
640                         ret += sd.len;
641                         buf->offset += sd.len;
642                         buf->len -= sd.len;
643
644                         if (!buf->len) {
645                                 buf->ops = NULL;
646                                 ops->release(pipe, buf);
647                                 pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
648                                 pipe->nrbufs--;
649                                 if (pipe->inode)
650                                         do_wakeup = 1;
651                         }
652
653                         sd.pos += sd.len;
654                         sd.total_len -= sd.len;
655                         if (!sd.total_len)
656                                 break;
657                 }
658
659                 if (pipe->nrbufs)
660                         continue;
661                 if (!pipe->writers)
662                         break;
663                 if (!pipe->waiting_writers) {
664                         if (ret)
665                                 break;
666                 }
667
668                 if (flags & SPLICE_F_NONBLOCK) {
669                         if (!ret)
670                                 ret = -EAGAIN;
671                         break;
672                 }
673
674                 if (signal_pending(current)) {
675                         if (!ret)
676                                 ret = -ERESTARTSYS;
677                         break;
678                 }
679
680                 if (do_wakeup) {
681                         smp_mb();
682                         if (waitqueue_active(&pipe->wait))
683                                 wake_up_interruptible_sync(&pipe->wait);
684                         kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
685                         do_wakeup = 0;
686                 }
687
688                 pipe_wait(pipe);
689         }
690
691         if (pipe->inode)
692                 mutex_unlock(&pipe->inode->i_mutex);
693
694         if (do_wakeup) {
695                 smp_mb();
696                 if (waitqueue_active(&pipe->wait))
697                         wake_up_interruptible(&pipe->wait);
698                 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
699         }
700
701         return ret;
702 }
703
704 /**
705  * generic_file_splice_write - splice data from a pipe to a file
706  * @pipe:       pipe info
707  * @out:        file to write to
708  * @len:        number of bytes to splice
709  * @flags:      splice modifier flags
710  *
711  * Will either move or copy pages (determined by @flags options) from
712  * the given pipe inode to the given file.
713  *
714  */
715 ssize_t
716 generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
717                           loff_t *ppos, size_t len, unsigned int flags)
718 {
719         struct address_space *mapping = out->f_mapping;
720         ssize_t ret;
721
722         ret = move_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
723         if (ret > 0) {
724                 struct inode *inode = mapping->host;
725
726                 *ppos += ret;
727
728                 /*
729                  * If file or inode is SYNC and we actually wrote some data,
730                  * sync it.
731                  */
732                 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
733                         int err;
734
735                         mutex_lock(&inode->i_mutex);
736                         err = generic_osync_inode(inode, mapping,
737                                                   OSYNC_METADATA|OSYNC_DATA);
738                         mutex_unlock(&inode->i_mutex);
739
740                         if (err)
741                                 ret = err;
742                 }
743         }
744
745         return ret;
746 }
747
748 EXPORT_SYMBOL(generic_file_splice_write);
749
750 /**
751  * generic_splice_sendpage - splice data from a pipe to a socket
752  * @inode:      pipe inode
753  * @out:        socket to write to
754  * @len:        number of bytes to splice
755  * @flags:      splice modifier flags
756  *
757  * Will send @len bytes from the pipe to a network socket. No data copying
758  * is involved.
759  *
760  */
761 ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
762                                 loff_t *ppos, size_t len, unsigned int flags)
763 {
764         return move_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
765 }
766
767 EXPORT_SYMBOL(generic_splice_sendpage);
768
769 /*
770  * Attempt to initiate a splice from pipe to file.
771  */
772 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
773                            loff_t *ppos, size_t len, unsigned int flags)
774 {
775         int ret;
776
777         if (unlikely(!out->f_op || !out->f_op->splice_write))
778                 return -EINVAL;
779
780         if (unlikely(!(out->f_mode & FMODE_WRITE)))
781                 return -EBADF;
782
783         ret = rw_verify_area(WRITE, out, ppos, len);
784         if (unlikely(ret < 0))
785                 return ret;
786
787         return out->f_op->splice_write(pipe, out, ppos, len, flags);
788 }
789
790 /*
791  * Attempt to initiate a splice from a file to a pipe.
792  */
793 static long do_splice_to(struct file *in, loff_t *ppos,
794                          struct pipe_inode_info *pipe, size_t len,
795                          unsigned int flags)
796 {
797         loff_t isize, left;
798         int ret;
799
800         if (unlikely(!in->f_op || !in->f_op->splice_read))
801                 return -EINVAL;
802
803         if (unlikely(!(in->f_mode & FMODE_READ)))
804                 return -EBADF;
805
806         ret = rw_verify_area(READ, in, ppos, len);
807         if (unlikely(ret < 0))
808                 return ret;
809
810         isize = i_size_read(in->f_mapping->host);
811         if (unlikely(*ppos >= isize))
812                 return 0;
813         
814         left = isize - *ppos;
815         if (unlikely(left < len))
816                 len = left;
817
818         return in->f_op->splice_read(in, ppos, pipe, len, flags);
819 }
820
821 long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
822                       size_t len, unsigned int flags)
823 {
824         struct pipe_inode_info *pipe;
825         long ret, bytes;
826         loff_t out_off;
827         umode_t i_mode;
828         int i;
829
830         /*
831          * We require the input being a regular file, as we don't want to
832          * randomly drop data for eg socket -> socket splicing. Use the
833          * piped splicing for that!
834          */
835         i_mode = in->f_dentry->d_inode->i_mode;
836         if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
837                 return -EINVAL;
838
839         /*
840          * neither in nor out is a pipe, setup an internal pipe attached to
841          * 'out' and transfer the wanted data from 'in' to 'out' through that
842          */
843         pipe = current->splice_pipe;
844         if (unlikely(!pipe)) {
845                 pipe = alloc_pipe_info(NULL);
846                 if (!pipe)
847                         return -ENOMEM;
848
849                 /*
850                  * We don't have an immediate reader, but we'll read the stuff
851                  * out of the pipe right after the move_to_pipe(). So set
852                  * PIPE_READERS appropriately.
853                  */
854                 pipe->readers = 1;
855
856                 current->splice_pipe = pipe;
857         }
858
859         /*
860          * Do the splice.
861          */
862         ret = 0;
863         bytes = 0;
864         out_off = 0;
865
866         while (len) {
867                 size_t read_len, max_read_len;
868
869                 /*
870                  * Do at most PIPE_BUFFERS pages worth of transfer:
871                  */
872                 max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
873
874                 ret = do_splice_to(in, ppos, pipe, max_read_len, flags);
875                 if (unlikely(ret < 0))
876                         goto out_release;
877
878                 read_len = ret;
879
880                 /*
881                  * NOTE: nonblocking mode only applies to the input. We
882                  * must not do the output in nonblocking mode as then we
883                  * could get stuck data in the internal pipe:
884                  */
885                 ret = do_splice_from(pipe, out, &out_off, read_len,
886                                      flags & ~SPLICE_F_NONBLOCK);
887                 if (unlikely(ret < 0))
888                         goto out_release;
889
890                 bytes += ret;
891                 len -= ret;
892
893                 /*
894                  * In nonblocking mode, if we got back a short read then
895                  * that was due to either an IO error or due to the
896                  * pagecache entry not being there. In the IO error case
897                  * the _next_ splice attempt will produce a clean IO error
898                  * return value (not a short read), so in both cases it's
899                  * correct to break out of the loop here:
900                  */
901                 if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
902                         break;
903         }
904
905         pipe->nrbufs = pipe->curbuf = 0;
906
907         return bytes;
908
909 out_release:
910         /*
911          * If we did an incomplete transfer we must release
912          * the pipe buffers in question:
913          */
914         for (i = 0; i < PIPE_BUFFERS; i++) {
915                 struct pipe_buffer *buf = pipe->bufs + i;
916
917                 if (buf->ops) {
918                         buf->ops->release(pipe, buf);
919                         buf->ops = NULL;
920                 }
921         }
922         pipe->nrbufs = pipe->curbuf = 0;
923
924         /*
925          * If we transferred some data, return the number of bytes:
926          */
927         if (bytes > 0)
928                 return bytes;
929
930         return ret;
931 }
932
933 EXPORT_SYMBOL(do_splice_direct);
934
935 /*
936  * Determine where to splice to/from.
937  */
938 static long do_splice(struct file *in, loff_t __user *off_in,
939                       struct file *out, loff_t __user *off_out,
940                       size_t len, unsigned int flags)
941 {
942         struct pipe_inode_info *pipe;
943         loff_t offset, *off;
944         long ret;
945
946         pipe = in->f_dentry->d_inode->i_pipe;
947         if (pipe) {
948                 if (off_in)
949                         return -ESPIPE;
950                 if (off_out) {
951                         if (out->f_op->llseek == no_llseek)
952                                 return -EINVAL;
953                         if (copy_from_user(&offset, off_out, sizeof(loff_t)))
954                                 return -EFAULT;
955                         off = &offset;
956                 } else
957                         off = &out->f_pos;
958
959                 ret = do_splice_from(pipe, out, off, len, flags);
960
961                 if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
962                         ret = -EFAULT;
963
964                 return ret;
965         }
966
967         pipe = out->f_dentry->d_inode->i_pipe;
968         if (pipe) {
969                 if (off_out)
970                         return -ESPIPE;
971                 if (off_in) {
972                         if (in->f_op->llseek == no_llseek)
973                                 return -EINVAL;
974                         if (copy_from_user(&offset, off_in, sizeof(loff_t)))
975                                 return -EFAULT;
976                         off = &offset;
977                 } else
978                         off = &in->f_pos;
979
980                 ret = do_splice_to(in, off, pipe, len, flags);
981
982                 if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
983                         ret = -EFAULT;
984
985                 return ret;
986         }
987
988         return -EINVAL;
989 }
990
991 asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
992                            int fd_out, loff_t __user *off_out,
993                            size_t len, unsigned int flags)
994 {
995         long error;
996         struct file *in, *out;
997         int fput_in, fput_out;
998
999         if (unlikely(!len))
1000                 return 0;
1001
1002         error = -EBADF;
1003         in = fget_light(fd_in, &fput_in);
1004         if (in) {
1005                 if (in->f_mode & FMODE_READ) {
1006                         out = fget_light(fd_out, &fput_out);
1007                         if (out) {
1008                                 if (out->f_mode & FMODE_WRITE)
1009                                         error = do_splice(in, off_in,
1010                                                           out, off_out,
1011                                                           len, flags);
1012                                 fput_light(out, fput_out);
1013                         }
1014                 }
1015
1016                 fput_light(in, fput_in);
1017         }
1018
1019         return error;
1020 }
1021
1022 /*
1023  * Link contents of ipipe to opipe.
1024  */
1025 static int link_pipe(struct pipe_inode_info *ipipe,
1026                      struct pipe_inode_info *opipe,
1027                      size_t len, unsigned int flags)
1028 {
1029         struct pipe_buffer *ibuf, *obuf;
1030         int ret, do_wakeup, i, ipipe_first;
1031
1032         ret = do_wakeup = ipipe_first = 0;
1033
1034         /*
1035          * Potential ABBA deadlock, work around it by ordering lock
1036          * grabbing by inode address. Otherwise two different processes
1037          * could deadlock (one doing tee from A -> B, the other from B -> A).
1038          */
1039         if (ipipe->inode < opipe->inode) {
1040                 ipipe_first = 1;
1041                 mutex_lock(&ipipe->inode->i_mutex);
1042                 mutex_lock(&opipe->inode->i_mutex);
1043         } else {
1044                 mutex_lock(&opipe->inode->i_mutex);
1045                 mutex_lock(&ipipe->inode->i_mutex);
1046         }
1047
1048         for (i = 0;; i++) {
1049                 if (!opipe->readers) {
1050                         send_sig(SIGPIPE, current, 0);
1051                         if (!ret)
1052                                 ret = -EPIPE;
1053                         break;
1054                 }
1055                 if (ipipe->nrbufs - i) {
1056                         ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1));
1057
1058                         /*
1059                          * If we have room, fill this buffer
1060                          */
1061                         if (opipe->nrbufs < PIPE_BUFFERS) {
1062                                 int nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1);
1063
1064                                 /*
1065                                  * Get a reference to this pipe buffer,
1066                                  * so we can copy the contents over.
1067                                  */
1068                                 ibuf->ops->get(ipipe, ibuf);
1069
1070                                 obuf = opipe->bufs + nbuf;
1071                                 *obuf = *ibuf;
1072
1073                                 if (obuf->len > len)
1074                                         obuf->len = len;
1075
1076                                 opipe->nrbufs++;
1077                                 do_wakeup = 1;
1078                                 ret += obuf->len;
1079                                 len -= obuf->len;
1080
1081                                 if (!len)
1082                                         break;
1083                                 if (opipe->nrbufs < PIPE_BUFFERS)
1084                                         continue;
1085                         }
1086
1087                         /*
1088                          * We have input available, but no output room.
1089                          * If we already copied data, return that. If we
1090                          * need to drop the opipe lock, it must be ordered
1091                          * last to avoid deadlocks.
1092                          */
1093                         if ((flags & SPLICE_F_NONBLOCK) || !ipipe_first) {
1094                                 if (!ret)
1095                                         ret = -EAGAIN;
1096                                 break;
1097                         }
1098                         if (signal_pending(current)) {
1099                                 if (!ret)
1100                                         ret = -ERESTARTSYS;
1101                                 break;
1102                         }
1103                         if (do_wakeup) {
1104                                 smp_mb();
1105                                 if (waitqueue_active(&opipe->wait))
1106                                         wake_up_interruptible(&opipe->wait);
1107                                 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1108                                 do_wakeup = 0;
1109                         }
1110
1111                         opipe->waiting_writers++;
1112                         pipe_wait(opipe);
1113                         opipe->waiting_writers--;
1114                         continue;
1115                 }
1116
1117                 /*
1118                  * No input buffers, do the usual checks for available
1119                  * writers and blocking and wait if necessary
1120                  */
1121                 if (!ipipe->writers)
1122                         break;
1123                 if (!ipipe->waiting_writers) {
1124                         if (ret)
1125                                 break;
1126                 }
1127                 /*
1128                  * pipe_wait() drops the ipipe mutex. To avoid deadlocks
1129                  * with another process, we can only safely do that if
1130                  * the ipipe lock is ordered last.
1131                  */
1132                 if ((flags & SPLICE_F_NONBLOCK) || ipipe_first) {
1133                         if (!ret)
1134                                 ret = -EAGAIN;
1135                         break;
1136                 }
1137                 if (signal_pending(current)) {
1138                         if (!ret)
1139                                 ret = -ERESTARTSYS;
1140                         break;
1141                 }
1142
1143                 if (waitqueue_active(&ipipe->wait))
1144                         wake_up_interruptible_sync(&ipipe->wait);
1145                 kill_fasync(&ipipe->fasync_writers, SIGIO, POLL_OUT);
1146
1147                 pipe_wait(ipipe);
1148         }
1149
1150         mutex_unlock(&ipipe->inode->i_mutex);
1151         mutex_unlock(&opipe->inode->i_mutex);
1152
1153         if (do_wakeup) {
1154                 smp_mb();
1155                 if (waitqueue_active(&opipe->wait))
1156                         wake_up_interruptible(&opipe->wait);
1157                 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1158         }
1159
1160         return ret;
1161 }
1162
1163 /*
1164  * This is a tee(1) implementation that works on pipes. It doesn't copy
1165  * any data, it simply references the 'in' pages on the 'out' pipe.
1166  * The 'flags' used are the SPLICE_F_* variants, currently the only
1167  * applicable one is SPLICE_F_NONBLOCK.
1168  */
1169 static long do_tee(struct file *in, struct file *out, size_t len,
1170                    unsigned int flags)
1171 {
1172         struct pipe_inode_info *ipipe = in->f_dentry->d_inode->i_pipe;
1173         struct pipe_inode_info *opipe = out->f_dentry->d_inode->i_pipe;
1174
1175         /*
1176          * Link ipipe to the two output pipes, consuming as we go along.
1177          */
1178         if (ipipe && opipe)
1179                 return link_pipe(ipipe, opipe, len, flags);
1180
1181         return -EINVAL;
1182 }
1183
1184 asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags)
1185 {
1186         struct file *in;
1187         int error, fput_in;
1188
1189         if (unlikely(!len))
1190                 return 0;
1191
1192         error = -EBADF;
1193         in = fget_light(fdin, &fput_in);
1194         if (in) {
1195                 if (in->f_mode & FMODE_READ) {
1196                         int fput_out;
1197                         struct file *out = fget_light(fdout, &fput_out);
1198
1199                         if (out) {
1200                                 if (out->f_mode & FMODE_WRITE)
1201                                         error = do_tee(in, out, len, flags);
1202                                 fput_light(out, fput_out);
1203                         }
1204                 }
1205                 fput_light(in, fput_in);
1206         }
1207
1208         return error;
1209 }