[PATCH] splice: speedup __generic_file_splice_read
[pandora-kernel.git] / fs / splice.c
1 /*
2  * "splice": joining two ropes together by interweaving their strands.
3  *
4  * This is the "extended pipe" functionality, where a pipe is used as
5  * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6  * buffer that you can use to transfer data from one end to the other.
7  *
8  * The traditional unix read/write is extended with a "splice()" operation
9  * that transfers data buffers to or from a pipe buffer.
10  *
11  * Named by Larry McVoy, original implementation from Linus, extended by
12  * Jens to support splicing to files and fixing the initial implementation
13  * bugs.
14  *
15  * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
16  * Copyright (C) 2005 Linus Torvalds <torvalds@osdl.org>
17  *
18  */
19 #include <linux/fs.h>
20 #include <linux/file.h>
21 #include <linux/pagemap.h>
22 #include <linux/pipe_fs_i.h>
23 #include <linux/mm_inline.h>
24 #include <linux/swap.h>
25 #include <linux/writeback.h>
26 #include <linux/buffer_head.h>
27 #include <linux/module.h>
28 #include <linux/syscalls.h>
29
30 /*
31  * Passed to the actors
32  */
33 struct splice_desc {
34         unsigned int len, total_len;    /* current and remaining length */
35         unsigned int flags;             /* splice flags */
36         struct file *file;              /* file to read/write */
37         loff_t pos;                     /* file position */
38 };
39
40 /*
41  * Attempt to steal a page from a pipe buffer. This should perhaps go into
42  * a vm helper function, it's already simplified quite a bit by the
43  * addition of remove_mapping(). If success is returned, the caller may
44  * attempt to reuse this page for another destination.
45  */
46 static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
47                                      struct pipe_buffer *buf)
48 {
49         struct page *page = buf->page;
50         struct address_space *mapping = page_mapping(page);
51
52         WARN_ON(!PageLocked(page));
53         WARN_ON(!PageUptodate(page));
54
55         /*
56          * At least for ext2 with nobh option, we need to wait on writeback
57          * completing on this page, since we'll remove it from the pagecache.
58          * Otherwise truncate wont wait on the page, allowing the disk
59          * blocks to be reused by someone else before we actually wrote our
60          * data to them. fs corruption ensues.
61          */
62         wait_on_page_writeback(page);
63
64         if (PagePrivate(page))
65                 try_to_release_page(page, mapping_gfp_mask(mapping));
66
67         if (!remove_mapping(mapping, page))
68                 return 1;
69
70         buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
71         return 0;
72 }
73
74 static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
75                                         struct pipe_buffer *buf)
76 {
77         page_cache_release(buf->page);
78         buf->page = NULL;
79         buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
80 }
81
82 static void *page_cache_pipe_buf_map(struct file *file,
83                                      struct pipe_inode_info *info,
84                                      struct pipe_buffer *buf)
85 {
86         struct page *page = buf->page;
87         int err;
88
89         if (!PageUptodate(page)) {
90                 lock_page(page);
91
92                 /*
93                  * Page got truncated/unhashed. This will cause a 0-byte
94                  * splice, if this is the first page
95                  */
96                 if (!page->mapping) {
97                         err = -ENODATA;
98                         goto error;
99                 }
100
101                 /*
102                  * uh oh, read-error from disk
103                  */
104                 if (!PageUptodate(page)) {
105                         err = -EIO;
106                         goto error;
107                 }
108
109                 /*
110                  * page is ok afterall, fall through to mapping
111                  */
112                 unlock_page(page);
113         }
114
115         return kmap(page);
116 error:
117         unlock_page(page);
118         return ERR_PTR(err);
119 }
120
121 static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
122                                       struct pipe_buffer *buf)
123 {
124         kunmap(buf->page);
125 }
126
127 static struct pipe_buf_operations page_cache_pipe_buf_ops = {
128         .can_merge = 0,
129         .map = page_cache_pipe_buf_map,
130         .unmap = page_cache_pipe_buf_unmap,
131         .release = page_cache_pipe_buf_release,
132         .steal = page_cache_pipe_buf_steal,
133 };
134
135 /*
136  * Pipe output worker. This sets up our pipe format with the page cache
137  * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
138  */
139 static ssize_t move_to_pipe(struct pipe_inode_info *pipe, struct page **pages,
140                             int nr_pages, unsigned long offset,
141                             unsigned long len, unsigned int flags)
142 {
143         int ret, do_wakeup, i;
144
145         ret = 0;
146         do_wakeup = 0;
147         i = 0;
148
149         if (pipe->inode)
150                 mutex_lock(&pipe->inode->i_mutex);
151
152         for (;;) {
153                 int bufs;
154
155                 if (!pipe->readers) {
156                         send_sig(SIGPIPE, current, 0);
157                         if (!ret)
158                                 ret = -EPIPE;
159                         break;
160                 }
161
162                 bufs = pipe->nrbufs;
163                 if (bufs < PIPE_BUFFERS) {
164                         int newbuf = (pipe->curbuf + bufs) & (PIPE_BUFFERS - 1);
165                         struct pipe_buffer *buf = pipe->bufs + newbuf;
166                         struct page *page = pages[i++];
167                         unsigned long this_len;
168
169                         this_len = PAGE_CACHE_SIZE - offset;
170                         if (this_len > len)
171                                 this_len = len;
172
173                         buf->page = page;
174                         buf->offset = offset;
175                         buf->len = this_len;
176                         buf->ops = &page_cache_pipe_buf_ops;
177                         pipe->nrbufs = ++bufs;
178                         do_wakeup = 1;
179
180                         ret += this_len;
181                         len -= this_len;
182                         offset = 0;
183                         if (!--nr_pages)
184                                 break;
185                         if (!len)
186                                 break;
187                         if (bufs < PIPE_BUFFERS)
188                                 continue;
189
190                         break;
191                 }
192
193                 if (flags & SPLICE_F_NONBLOCK) {
194                         if (!ret)
195                                 ret = -EAGAIN;
196                         break;
197                 }
198
199                 if (signal_pending(current)) {
200                         if (!ret)
201                                 ret = -ERESTARTSYS;
202                         break;
203                 }
204
205                 if (do_wakeup) {
206                         smp_mb();
207                         if (waitqueue_active(&pipe->wait))
208                                 wake_up_interruptible_sync(&pipe->wait);
209                         kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
210                         do_wakeup = 0;
211                 }
212
213                 pipe->waiting_writers++;
214                 pipe_wait(pipe);
215                 pipe->waiting_writers--;
216         }
217
218         if (pipe->inode)
219                 mutex_unlock(&pipe->inode->i_mutex);
220
221         if (do_wakeup) {
222                 smp_mb();
223                 if (waitqueue_active(&pipe->wait))
224                         wake_up_interruptible(&pipe->wait);
225                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
226         }
227
228         while (i < nr_pages)
229                 page_cache_release(pages[i++]);
230
231         return ret;
232 }
233
234 static int
235 __generic_file_splice_read(struct file *in, struct pipe_inode_info *pipe,
236                            size_t len, unsigned int flags)
237 {
238         struct address_space *mapping = in->f_mapping;
239         unsigned int offset, nr_pages;
240         struct page *pages[PIPE_BUFFERS];
241         struct page *page;
242         pgoff_t index;
243         int i, error;
244
245         index = in->f_pos >> PAGE_CACHE_SHIFT;
246         offset = in->f_pos & ~PAGE_CACHE_MASK;
247         nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
248
249         if (nr_pages > PIPE_BUFFERS)
250                 nr_pages = PIPE_BUFFERS;
251
252         /*
253          * initiate read-ahead on this page range. however, don't call into
254          * read-ahead if this is a non-zero offset (we are likely doing small
255          * chunk splice and the page is already there) for a single page.
256          */
257         if (!offset || nr_pages > 1)
258                 do_page_cache_readahead(mapping, in, index, nr_pages);
259
260         /*
261          * now fill in the holes
262          */
263         error = 0;
264         for (i = 0; i < nr_pages; i++, index++) {
265 find_page:
266                 /*
267                  * lookup the page for this index
268                  */
269                 page = find_get_page(mapping, index);
270                 if (!page) {
271                         /*
272                          * If in nonblock mode then dont block on
273                          * readpage (we've kicked readahead so there
274                          * will be asynchronous progress):
275                          */
276                         if (flags & SPLICE_F_NONBLOCK)
277                                 break;
278
279                         /*
280                          * page didn't exist, allocate one
281                          */
282                         page = page_cache_alloc_cold(mapping);
283                         if (!page)
284                                 break;
285
286                         error = add_to_page_cache_lru(page, mapping, index,
287                                                 mapping_gfp_mask(mapping));
288                         if (unlikely(error)) {
289                                 page_cache_release(page);
290                                 break;
291                         }
292
293                         goto readpage;
294                 }
295
296                 /*
297                  * If the page isn't uptodate, we may need to start io on it
298                  */
299                 if (!PageUptodate(page)) {
300                         lock_page(page);
301
302                         /*
303                          * page was truncated, stop here. if this isn't the
304                          * first page, we'll just complete what we already
305                          * added
306                          */
307                         if (!page->mapping) {
308                                 unlock_page(page);
309                                 page_cache_release(page);
310                                 break;
311                         }
312                         /*
313                          * page was already under io and is now done, great
314                          */
315                         if (PageUptodate(page)) {
316                                 unlock_page(page);
317                                 goto fill_it;
318                         }
319
320 readpage:
321                         /*
322                          * need to read in the page
323                          */
324                         error = mapping->a_ops->readpage(in, page);
325
326                         if (unlikely(error)) {
327                                 page_cache_release(page);
328                                 if (error == AOP_TRUNCATED_PAGE)
329                                         goto find_page;
330                                 break;
331                         }
332                 }
333 fill_it:
334                 pages[i] = page;
335         }
336
337         if (i)
338                 return move_to_pipe(pipe, pages, i, offset, len, flags);
339
340         return error;
341 }
342
343 /**
344  * generic_file_splice_read - splice data from file to a pipe
345  * @in:         file to splice from
346  * @pipe:       pipe to splice to
347  * @len:        number of bytes to splice
348  * @flags:      splice modifier flags
349  *
350  * Will read pages from given file and fill them into a pipe.
351  *
352  */
353 ssize_t generic_file_splice_read(struct file *in, struct pipe_inode_info *pipe,
354                                  size_t len, unsigned int flags)
355 {
356         ssize_t spliced;
357         int ret;
358
359         ret = 0;
360         spliced = 0;
361
362         while (len) {
363                 ret = __generic_file_splice_read(in, pipe, len, flags);
364
365                 if (ret <= 0)
366                         break;
367
368                 in->f_pos += ret;
369                 len -= ret;
370                 spliced += ret;
371
372                 if (!(flags & SPLICE_F_NONBLOCK))
373                         continue;
374                 ret = -EAGAIN;
375                 break;
376         }
377
378         if (spliced)
379                 return spliced;
380
381         return ret;
382 }
383
384 EXPORT_SYMBOL(generic_file_splice_read);
385
386 /*
387  * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
388  * using sendpage().
389  */
390 static int pipe_to_sendpage(struct pipe_inode_info *info,
391                             struct pipe_buffer *buf, struct splice_desc *sd)
392 {
393         struct file *file = sd->file;
394         loff_t pos = sd->pos;
395         unsigned int offset;
396         ssize_t ret;
397         void *ptr;
398         int more;
399
400         /*
401          * sub-optimal, but we are limited by the pipe ->map. we don't
402          * need a kmap'ed buffer here, we just want to make sure we
403          * have the page pinned if the pipe page originates from the
404          * page cache
405          */
406         ptr = buf->ops->map(file, info, buf);
407         if (IS_ERR(ptr))
408                 return PTR_ERR(ptr);
409
410         offset = pos & ~PAGE_CACHE_MASK;
411         more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
412
413         ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
414
415         buf->ops->unmap(info, buf);
416         if (ret == sd->len)
417                 return 0;
418
419         return -EIO;
420 }
421
422 /*
423  * This is a little more tricky than the file -> pipe splicing. There are
424  * basically three cases:
425  *
426  *      - Destination page already exists in the address space and there
427  *        are users of it. For that case we have no other option that
428  *        copying the data. Tough luck.
429  *      - Destination page already exists in the address space, but there
430  *        are no users of it. Make sure it's uptodate, then drop it. Fall
431  *        through to last case.
432  *      - Destination page does not exist, we can add the pipe page to
433  *        the page cache and avoid the copy.
434  *
435  * If asked to move pages to the output file (SPLICE_F_MOVE is set in
436  * sd->flags), we attempt to migrate pages from the pipe to the output
437  * file address space page cache. This is possible if no one else has
438  * the pipe page referenced outside of the pipe and page cache. If
439  * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
440  * a new page in the output file page cache and fill/dirty that.
441  */
442 static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
443                         struct splice_desc *sd)
444 {
445         struct file *file = sd->file;
446         struct address_space *mapping = file->f_mapping;
447         gfp_t gfp_mask = mapping_gfp_mask(mapping);
448         unsigned int offset;
449         struct page *page;
450         pgoff_t index;
451         char *src;
452         int ret;
453
454         /*
455          * make sure the data in this buffer is uptodate
456          */
457         src = buf->ops->map(file, info, buf);
458         if (IS_ERR(src))
459                 return PTR_ERR(src);
460
461         index = sd->pos >> PAGE_CACHE_SHIFT;
462         offset = sd->pos & ~PAGE_CACHE_MASK;
463
464         /*
465          * reuse buf page, if SPLICE_F_MOVE is set
466          */
467         if (sd->flags & SPLICE_F_MOVE) {
468                 /*
469                  * If steal succeeds, buf->page is now pruned from the vm
470                  * side (LRU and page cache) and we can reuse it.
471                  */
472                 if (buf->ops->steal(info, buf))
473                         goto find_page;
474
475                 /*
476                  * this will also set the page locked
477                  */
478                 page = buf->page;
479                 if (add_to_page_cache(page, mapping, index, gfp_mask))
480                         goto find_page;
481
482                 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
483                         lru_cache_add(page);
484         } else {
485 find_page:
486                 ret = -ENOMEM;
487                 page = find_or_create_page(mapping, index, gfp_mask);
488                 if (!page)
489                         goto out_nomem;
490
491                 /*
492                  * If the page is uptodate, it is also locked. If it isn't
493                  * uptodate, we can mark it uptodate if we are filling the
494                  * full page. Otherwise we need to read it in first...
495                  */
496                 if (!PageUptodate(page)) {
497                         if (sd->len < PAGE_CACHE_SIZE) {
498                                 ret = mapping->a_ops->readpage(file, page);
499                                 if (unlikely(ret))
500                                         goto out;
501
502                                 lock_page(page);
503
504                                 if (!PageUptodate(page)) {
505                                         /*
506                                          * page got invalidated, repeat
507                                          */
508                                         if (!page->mapping) {
509                                                 unlock_page(page);
510                                                 page_cache_release(page);
511                                                 goto find_page;
512                                         }
513                                         ret = -EIO;
514                                         goto out;
515                                 }
516                         } else {
517                                 WARN_ON(!PageLocked(page));
518                                 SetPageUptodate(page);
519                         }
520                 }
521         }
522
523         ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
524         if (ret == AOP_TRUNCATED_PAGE) {
525                 page_cache_release(page);
526                 goto find_page;
527         } else if (ret)
528                 goto out;
529
530         if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
531                 char *dst = kmap_atomic(page, KM_USER0);
532
533                 memcpy(dst + offset, src + buf->offset, sd->len);
534                 flush_dcache_page(page);
535                 kunmap_atomic(dst, KM_USER0);
536         }
537
538         ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
539         if (ret == AOP_TRUNCATED_PAGE) {
540                 page_cache_release(page);
541                 goto find_page;
542         } else if (ret)
543                 goto out;
544
545         mark_page_accessed(page);
546         balance_dirty_pages_ratelimited(mapping);
547 out:
548         if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
549                 page_cache_release(page);
550                 unlock_page(page);
551         }
552 out_nomem:
553         buf->ops->unmap(info, buf);
554         return ret;
555 }
556
557 typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
558                            struct splice_desc *);
559
560 /*
561  * Pipe input worker. Most of this logic works like a regular pipe, the
562  * key here is the 'actor' worker passed in that actually moves the data
563  * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
564  */
565 static ssize_t move_from_pipe(struct pipe_inode_info *pipe, struct file *out,
566                               size_t len, unsigned int flags,
567                               splice_actor *actor)
568 {
569         int ret, do_wakeup, err;
570         struct splice_desc sd;
571
572         ret = 0;
573         do_wakeup = 0;
574
575         sd.total_len = len;
576         sd.flags = flags;
577         sd.file = out;
578         sd.pos = out->f_pos;
579
580         if (pipe->inode)
581                 mutex_lock(&pipe->inode->i_mutex);
582
583         for (;;) {
584                 int bufs = pipe->nrbufs;
585
586                 if (bufs) {
587                         int curbuf = pipe->curbuf;
588                         struct pipe_buffer *buf = pipe->bufs + curbuf;
589                         struct pipe_buf_operations *ops = buf->ops;
590
591                         sd.len = buf->len;
592                         if (sd.len > sd.total_len)
593                                 sd.len = sd.total_len;
594
595                         err = actor(pipe, buf, &sd);
596                         if (err) {
597                                 if (!ret && err != -ENODATA)
598                                         ret = err;
599
600                                 break;
601                         }
602
603                         ret += sd.len;
604                         buf->offset += sd.len;
605                         buf->len -= sd.len;
606                         if (!buf->len) {
607                                 buf->ops = NULL;
608                                 ops->release(pipe, buf);
609                                 curbuf = (curbuf + 1) & (PIPE_BUFFERS - 1);
610                                 pipe->curbuf = curbuf;
611                                 pipe->nrbufs = --bufs;
612                                 do_wakeup = 1;
613                         }
614
615                         sd.pos += sd.len;
616                         sd.total_len -= sd.len;
617                         if (!sd.total_len)
618                                 break;
619                 }
620
621                 if (bufs)
622                         continue;
623                 if (!pipe->writers)
624                         break;
625                 if (!pipe->waiting_writers) {
626                         if (ret)
627                                 break;
628                 }
629
630                 if (flags & SPLICE_F_NONBLOCK) {
631                         if (!ret)
632                                 ret = -EAGAIN;
633                         break;
634                 }
635
636                 if (signal_pending(current)) {
637                         if (!ret)
638                                 ret = -ERESTARTSYS;
639                         break;
640                 }
641
642                 if (do_wakeup) {
643                         smp_mb();
644                         if (waitqueue_active(&pipe->wait))
645                                 wake_up_interruptible_sync(&pipe->wait);
646                         kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
647                         do_wakeup = 0;
648                 }
649
650                 pipe_wait(pipe);
651         }
652
653         if (pipe->inode)
654                 mutex_unlock(&pipe->inode->i_mutex);
655
656         if (do_wakeup) {
657                 smp_mb();
658                 if (waitqueue_active(&pipe->wait))
659                         wake_up_interruptible(&pipe->wait);
660                 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
661         }
662
663         mutex_lock(&out->f_mapping->host->i_mutex);
664         out->f_pos = sd.pos;
665         mutex_unlock(&out->f_mapping->host->i_mutex);
666         return ret;
667
668 }
669
670 /**
671  * generic_file_splice_write - splice data from a pipe to a file
672  * @pipe:       pipe info
673  * @out:        file to write to
674  * @len:        number of bytes to splice
675  * @flags:      splice modifier flags
676  *
677  * Will either move or copy pages (determined by @flags options) from
678  * the given pipe inode to the given file.
679  *
680  */
681 ssize_t
682 generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
683                           size_t len, unsigned int flags)
684 {
685         struct address_space *mapping = out->f_mapping;
686         ssize_t ret;
687
688         ret = move_from_pipe(pipe, out, len, flags, pipe_to_file);
689
690         /*
691          * if file or inode is SYNC and we actually wrote some data, sync it
692          */
693         if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
694             && ret > 0) {
695                 struct inode *inode = mapping->host;
696                 int err;
697
698                 mutex_lock(&inode->i_mutex);
699                 err = generic_osync_inode(mapping->host, mapping,
700                                                 OSYNC_METADATA|OSYNC_DATA);
701                 mutex_unlock(&inode->i_mutex);
702
703                 if (err)
704                         ret = err;
705         }
706
707         return ret;
708 }
709
710 EXPORT_SYMBOL(generic_file_splice_write);
711
712 /**
713  * generic_splice_sendpage - splice data from a pipe to a socket
714  * @inode:      pipe inode
715  * @out:        socket to write to
716  * @len:        number of bytes to splice
717  * @flags:      splice modifier flags
718  *
719  * Will send @len bytes from the pipe to a network socket. No data copying
720  * is involved.
721  *
722  */
723 ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
724                                 size_t len, unsigned int flags)
725 {
726         return move_from_pipe(pipe, out, len, flags, pipe_to_sendpage);
727 }
728
729 EXPORT_SYMBOL(generic_splice_sendpage);
730
731 /*
732  * Attempt to initiate a splice from pipe to file.
733  */
734 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
735                            size_t len, unsigned int flags)
736 {
737         loff_t pos;
738         int ret;
739
740         if (!out->f_op || !out->f_op->splice_write)
741                 return -EINVAL;
742
743         if (!(out->f_mode & FMODE_WRITE))
744                 return -EBADF;
745
746         pos = out->f_pos;
747
748         ret = rw_verify_area(WRITE, out, &pos, len);
749         if (unlikely(ret < 0))
750                 return ret;
751
752         return out->f_op->splice_write(pipe, out, len, flags);
753 }
754
755 /*
756  * Attempt to initiate a splice from a file to a pipe.
757  */
758 static long do_splice_to(struct file *in, struct pipe_inode_info *pipe,
759                          size_t len, unsigned int flags)
760 {
761         loff_t pos, isize, left;
762         int ret;
763
764         if (!in->f_op || !in->f_op->splice_read)
765                 return -EINVAL;
766
767         if (!(in->f_mode & FMODE_READ))
768                 return -EBADF;
769
770         pos = in->f_pos;
771
772         ret = rw_verify_area(READ, in, &pos, len);
773         if (unlikely(ret < 0))
774                 return ret;
775
776         isize = i_size_read(in->f_mapping->host);
777         if (unlikely(in->f_pos >= isize))
778                 return 0;
779         
780         left = isize - in->f_pos;
781         if (left < len)
782                 len = left;
783
784         return in->f_op->splice_read(in, pipe, len, flags);
785 }
786
787 long do_splice_direct(struct file *in, struct file *out, size_t len,
788                       unsigned int flags)
789 {
790         struct pipe_inode_info *pipe;
791         long ret, bytes;
792         umode_t i_mode;
793         int i;
794
795         /*
796          * We require the input being a regular file, as we don't want to
797          * randomly drop data for eg socket -> socket splicing. Use the
798          * piped splicing for that!
799          */
800         i_mode = in->f_dentry->d_inode->i_mode;
801         if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
802                 return -EINVAL;
803
804         /*
805          * neither in nor out is a pipe, setup an internal pipe attached to
806          * 'out' and transfer the wanted data from 'in' to 'out' through that
807          */
808         pipe = current->splice_pipe;
809         if (!pipe) {
810                 pipe = alloc_pipe_info(NULL);
811                 if (!pipe)
812                         return -ENOMEM;
813
814                 /*
815                  * We don't have an immediate reader, but we'll read the stuff
816                  * out of the pipe right after the move_to_pipe(). So set
817                  * PIPE_READERS appropriately.
818                  */
819                 pipe->readers = 1;
820
821                 current->splice_pipe = pipe;
822         }
823
824         /*
825          * do the splice
826          */
827         ret = 0;
828         bytes = 0;
829
830         while (len) {
831                 size_t read_len, max_read_len;
832
833                 /*
834                  * Do at most PIPE_BUFFERS pages worth of transfer:
835                  */
836                 max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
837
838                 ret = do_splice_to(in, pipe, max_read_len, flags);
839                 if (unlikely(ret < 0))
840                         goto out_release;
841
842                 read_len = ret;
843
844                 /*
845                  * NOTE: nonblocking mode only applies to the input. We
846                  * must not do the output in nonblocking mode as then we
847                  * could get stuck data in the internal pipe:
848                  */
849                 ret = do_splice_from(pipe, out, read_len,
850                                      flags & ~SPLICE_F_NONBLOCK);
851                 if (unlikely(ret < 0))
852                         goto out_release;
853
854                 bytes += ret;
855                 len -= ret;
856
857                 /*
858                  * In nonblocking mode, if we got back a short read then
859                  * that was due to either an IO error or due to the
860                  * pagecache entry not being there. In the IO error case
861                  * the _next_ splice attempt will produce a clean IO error
862                  * return value (not a short read), so in both cases it's
863                  * correct to break out of the loop here:
864                  */
865                 if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
866                         break;
867         }
868
869         pipe->nrbufs = pipe->curbuf = 0;
870
871         return bytes;
872
873 out_release:
874         /*
875          * If we did an incomplete transfer we must release
876          * the pipe buffers in question:
877          */
878         for (i = 0; i < PIPE_BUFFERS; i++) {
879                 struct pipe_buffer *buf = pipe->bufs + i;
880
881                 if (buf->ops) {
882                         buf->ops->release(pipe, buf);
883                         buf->ops = NULL;
884                 }
885         }
886         pipe->nrbufs = pipe->curbuf = 0;
887
888         /*
889          * If we transferred some data, return the number of bytes:
890          */
891         if (bytes > 0)
892                 return bytes;
893
894         return ret;
895 }
896
897 EXPORT_SYMBOL(do_splice_direct);
898
899 /*
900  * Determine where to splice to/from.
901  */
902 static long do_splice(struct file *in, loff_t __user *off_in,
903                       struct file *out, loff_t __user *off_out,
904                       size_t len, unsigned int flags)
905 {
906         struct pipe_inode_info *pipe;
907
908         pipe = in->f_dentry->d_inode->i_pipe;
909         if (pipe) {
910                 if (off_in)
911                         return -ESPIPE;
912                 if (off_out) {
913                         if (out->f_op->llseek == no_llseek)
914                                 return -EINVAL;
915                         if (copy_from_user(&out->f_pos, off_out,
916                                            sizeof(loff_t)))
917                                 return -EFAULT;
918                 }
919
920                 return do_splice_from(pipe, out, len, flags);
921         }
922
923         pipe = out->f_dentry->d_inode->i_pipe;
924         if (pipe) {
925                 if (off_out)
926                         return -ESPIPE;
927                 if (off_in) {
928                         if (in->f_op->llseek == no_llseek)
929                                 return -EINVAL;
930                         if (copy_from_user(&in->f_pos, off_in, sizeof(loff_t)))
931                                 return -EFAULT;
932                 }
933
934                 return do_splice_to(in, pipe, len, flags);
935         }
936
937         return -EINVAL;
938 }
939
940 asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
941                            int fd_out, loff_t __user *off_out,
942                            size_t len, unsigned int flags)
943 {
944         long error;
945         struct file *in, *out;
946         int fput_in, fput_out;
947
948         if (unlikely(!len))
949                 return 0;
950
951         error = -EBADF;
952         in = fget_light(fd_in, &fput_in);
953         if (in) {
954                 if (in->f_mode & FMODE_READ) {
955                         out = fget_light(fd_out, &fput_out);
956                         if (out) {
957                                 if (out->f_mode & FMODE_WRITE)
958                                         error = do_splice(in, off_in,
959                                                           out, off_out,
960                                                           len, flags);
961                                 fput_light(out, fput_out);
962                         }
963                 }
964
965                 fput_light(in, fput_in);
966         }
967
968         return error;
969 }