[PATCH] splice: unlikely() optimizations
[pandora-kernel.git] / fs / splice.c
1 /*
2  * "splice": joining two ropes together by interweaving their strands.
3  *
4  * This is the "extended pipe" functionality, where a pipe is used as
5  * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6  * buffer that you can use to transfer data from one end to the other.
7  *
8  * The traditional unix read/write is extended with a "splice()" operation
9  * that transfers data buffers to or from a pipe buffer.
10  *
11  * Named by Larry McVoy, original implementation from Linus, extended by
12  * Jens to support splicing to files and fixing the initial implementation
13  * bugs.
14  *
15  * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
16  * Copyright (C) 2005 Linus Torvalds <torvalds@osdl.org>
17  *
18  */
19 #include <linux/fs.h>
20 #include <linux/file.h>
21 #include <linux/pagemap.h>
22 #include <linux/pipe_fs_i.h>
23 #include <linux/mm_inline.h>
24 #include <linux/swap.h>
25 #include <linux/writeback.h>
26 #include <linux/buffer_head.h>
27 #include <linux/module.h>
28 #include <linux/syscalls.h>
29
30 /*
31  * Passed to the actors
32  */
33 struct splice_desc {
34         unsigned int len, total_len;    /* current and remaining length */
35         unsigned int flags;             /* splice flags */
36         struct file *file;              /* file to read/write */
37         loff_t pos;                     /* file position */
38 };
39
40 /*
41  * Attempt to steal a page from a pipe buffer. This should perhaps go into
42  * a vm helper function, it's already simplified quite a bit by the
43  * addition of remove_mapping(). If success is returned, the caller may
44  * attempt to reuse this page for another destination.
45  */
46 static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
47                                      struct pipe_buffer *buf)
48 {
49         struct page *page = buf->page;
50         struct address_space *mapping = page_mapping(page);
51
52         WARN_ON(!PageLocked(page));
53         WARN_ON(!PageUptodate(page));
54
55         /*
56          * At least for ext2 with nobh option, we need to wait on writeback
57          * completing on this page, since we'll remove it from the pagecache.
58          * Otherwise truncate wont wait on the page, allowing the disk
59          * blocks to be reused by someone else before we actually wrote our
60          * data to them. fs corruption ensues.
61          */
62         wait_on_page_writeback(page);
63
64         if (PagePrivate(page))
65                 try_to_release_page(page, mapping_gfp_mask(mapping));
66
67         if (!remove_mapping(mapping, page))
68                 return 1;
69
70         buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
71         return 0;
72 }
73
74 static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
75                                         struct pipe_buffer *buf)
76 {
77         page_cache_release(buf->page);
78         buf->page = NULL;
79         buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
80 }
81
82 static void *page_cache_pipe_buf_map(struct file *file,
83                                      struct pipe_inode_info *info,
84                                      struct pipe_buffer *buf)
85 {
86         struct page *page = buf->page;
87         int err;
88
89         if (!PageUptodate(page)) {
90                 lock_page(page);
91
92                 /*
93                  * Page got truncated/unhashed. This will cause a 0-byte
94                  * splice, if this is the first page
95                  */
96                 if (!page->mapping) {
97                         err = -ENODATA;
98                         goto error;
99                 }
100
101                 /*
102                  * uh oh, read-error from disk
103                  */
104                 if (!PageUptodate(page)) {
105                         err = -EIO;
106                         goto error;
107                 }
108
109                 /*
110                  * page is ok afterall, fall through to mapping
111                  */
112                 unlock_page(page);
113         }
114
115         return kmap(page);
116 error:
117         unlock_page(page);
118         return ERR_PTR(err);
119 }
120
121 static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
122                                       struct pipe_buffer *buf)
123 {
124         kunmap(buf->page);
125 }
126
127 static struct pipe_buf_operations page_cache_pipe_buf_ops = {
128         .can_merge = 0,
129         .map = page_cache_pipe_buf_map,
130         .unmap = page_cache_pipe_buf_unmap,
131         .release = page_cache_pipe_buf_release,
132         .steal = page_cache_pipe_buf_steal,
133 };
134
135 /*
136  * Pipe output worker. This sets up our pipe format with the page cache
137  * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
138  */
139 static ssize_t move_to_pipe(struct pipe_inode_info *pipe, struct page **pages,
140                             int nr_pages, unsigned long offset,
141                             unsigned long len, unsigned int flags)
142 {
143         int ret, do_wakeup, i;
144
145         ret = 0;
146         do_wakeup = 0;
147         i = 0;
148
149         if (pipe->inode)
150                 mutex_lock(&pipe->inode->i_mutex);
151
152         for (;;) {
153                 if (!pipe->readers) {
154                         send_sig(SIGPIPE, current, 0);
155                         if (!ret)
156                                 ret = -EPIPE;
157                         break;
158                 }
159
160                 if (pipe->nrbufs < PIPE_BUFFERS) {
161                         int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
162                         struct pipe_buffer *buf = pipe->bufs + newbuf;
163                         struct page *page = pages[i++];
164                         unsigned long this_len;
165
166                         this_len = PAGE_CACHE_SIZE - offset;
167                         if (this_len > len)
168                                 this_len = len;
169
170                         buf->page = page;
171                         buf->offset = offset;
172                         buf->len = this_len;
173                         buf->ops = &page_cache_pipe_buf_ops;
174                         pipe->nrbufs++;
175                         if (pipe->inode)
176                                 do_wakeup = 1;
177
178                         ret += this_len;
179                         len -= this_len;
180                         offset = 0;
181                         if (!--nr_pages)
182                                 break;
183                         if (!len)
184                                 break;
185                         if (pipe->nrbufs < PIPE_BUFFERS)
186                                 continue;
187
188                         break;
189                 }
190
191                 if (flags & SPLICE_F_NONBLOCK) {
192                         if (!ret)
193                                 ret = -EAGAIN;
194                         break;
195                 }
196
197                 if (signal_pending(current)) {
198                         if (!ret)
199                                 ret = -ERESTARTSYS;
200                         break;
201                 }
202
203                 if (do_wakeup) {
204                         smp_mb();
205                         if (waitqueue_active(&pipe->wait))
206                                 wake_up_interruptible_sync(&pipe->wait);
207                         kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
208                         do_wakeup = 0;
209                 }
210
211                 pipe->waiting_writers++;
212                 pipe_wait(pipe);
213                 pipe->waiting_writers--;
214         }
215
216         if (pipe->inode)
217                 mutex_unlock(&pipe->inode->i_mutex);
218
219         if (do_wakeup) {
220                 smp_mb();
221                 if (waitqueue_active(&pipe->wait))
222                         wake_up_interruptible(&pipe->wait);
223                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
224         }
225
226         while (i < nr_pages)
227                 page_cache_release(pages[i++]);
228
229         return ret;
230 }
231
232 static int
233 __generic_file_splice_read(struct file *in, struct pipe_inode_info *pipe,
234                            size_t len, unsigned int flags)
235 {
236         struct address_space *mapping = in->f_mapping;
237         unsigned int offset, nr_pages;
238         struct page *pages[PIPE_BUFFERS];
239         struct page *page;
240         pgoff_t index;
241         int i, error;
242
243         index = in->f_pos >> PAGE_CACHE_SHIFT;
244         offset = in->f_pos & ~PAGE_CACHE_MASK;
245         nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
246
247         if (nr_pages > PIPE_BUFFERS)
248                 nr_pages = PIPE_BUFFERS;
249
250         /*
251          * initiate read-ahead on this page range. however, don't call into
252          * read-ahead if this is a non-zero offset (we are likely doing small
253          * chunk splice and the page is already there) for a single page.
254          */
255         if (!offset || nr_pages > 1)
256                 do_page_cache_readahead(mapping, in, index, nr_pages);
257
258         /*
259          * now fill in the holes
260          */
261         error = 0;
262         for (i = 0; i < nr_pages; i++, index++) {
263 find_page:
264                 /*
265                  * lookup the page for this index
266                  */
267                 page = find_get_page(mapping, index);
268                 if (!page) {
269                         /*
270                          * If in nonblock mode then dont block on
271                          * readpage (we've kicked readahead so there
272                          * will be asynchronous progress):
273                          */
274                         if (flags & SPLICE_F_NONBLOCK)
275                                 break;
276
277                         /*
278                          * page didn't exist, allocate one
279                          */
280                         page = page_cache_alloc_cold(mapping);
281                         if (!page)
282                                 break;
283
284                         error = add_to_page_cache_lru(page, mapping, index,
285                                                 mapping_gfp_mask(mapping));
286                         if (unlikely(error)) {
287                                 page_cache_release(page);
288                                 break;
289                         }
290
291                         goto readpage;
292                 }
293
294                 /*
295                  * If the page isn't uptodate, we may need to start io on it
296                  */
297                 if (!PageUptodate(page)) {
298                         lock_page(page);
299
300                         /*
301                          * page was truncated, stop here. if this isn't the
302                          * first page, we'll just complete what we already
303                          * added
304                          */
305                         if (!page->mapping) {
306                                 unlock_page(page);
307                                 page_cache_release(page);
308                                 break;
309                         }
310                         /*
311                          * page was already under io and is now done, great
312                          */
313                         if (PageUptodate(page)) {
314                                 unlock_page(page);
315                                 goto fill_it;
316                         }
317
318 readpage:
319                         /*
320                          * need to read in the page
321                          */
322                         error = mapping->a_ops->readpage(in, page);
323
324                         if (unlikely(error)) {
325                                 page_cache_release(page);
326                                 if (error == AOP_TRUNCATED_PAGE)
327                                         goto find_page;
328                                 break;
329                         }
330                 }
331 fill_it:
332                 pages[i] = page;
333         }
334
335         if (i)
336                 return move_to_pipe(pipe, pages, i, offset, len, flags);
337
338         return error;
339 }
340
341 /**
342  * generic_file_splice_read - splice data from file to a pipe
343  * @in:         file to splice from
344  * @pipe:       pipe to splice to
345  * @len:        number of bytes to splice
346  * @flags:      splice modifier flags
347  *
348  * Will read pages from given file and fill them into a pipe.
349  */
350 ssize_t generic_file_splice_read(struct file *in, struct pipe_inode_info *pipe,
351                                  size_t len, unsigned int flags)
352 {
353         ssize_t spliced;
354         int ret;
355
356         ret = 0;
357         spliced = 0;
358
359         while (len) {
360                 ret = __generic_file_splice_read(in, pipe, len, flags);
361
362                 if (ret <= 0)
363                         break;
364
365                 in->f_pos += ret;
366                 len -= ret;
367                 spliced += ret;
368
369                 if (!(flags & SPLICE_F_NONBLOCK))
370                         continue;
371                 ret = -EAGAIN;
372                 break;
373         }
374
375         if (spliced)
376                 return spliced;
377
378         return ret;
379 }
380
381 EXPORT_SYMBOL(generic_file_splice_read);
382
383 /*
384  * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
385  * using sendpage().
386  */
387 static int pipe_to_sendpage(struct pipe_inode_info *info,
388                             struct pipe_buffer *buf, struct splice_desc *sd)
389 {
390         struct file *file = sd->file;
391         loff_t pos = sd->pos;
392         unsigned int offset;
393         ssize_t ret;
394         void *ptr;
395         int more;
396
397         /*
398          * sub-optimal, but we are limited by the pipe ->map. we don't
399          * need a kmap'ed buffer here, we just want to make sure we
400          * have the page pinned if the pipe page originates from the
401          * page cache
402          */
403         ptr = buf->ops->map(file, info, buf);
404         if (IS_ERR(ptr))
405                 return PTR_ERR(ptr);
406
407         offset = pos & ~PAGE_CACHE_MASK;
408         more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
409
410         ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
411
412         buf->ops->unmap(info, buf);
413         if (ret == sd->len)
414                 return 0;
415
416         return -EIO;
417 }
418
419 /*
420  * This is a little more tricky than the file -> pipe splicing. There are
421  * basically three cases:
422  *
423  *      - Destination page already exists in the address space and there
424  *        are users of it. For that case we have no other option that
425  *        copying the data. Tough luck.
426  *      - Destination page already exists in the address space, but there
427  *        are no users of it. Make sure it's uptodate, then drop it. Fall
428  *        through to last case.
429  *      - Destination page does not exist, we can add the pipe page to
430  *        the page cache and avoid the copy.
431  *
432  * If asked to move pages to the output file (SPLICE_F_MOVE is set in
433  * sd->flags), we attempt to migrate pages from the pipe to the output
434  * file address space page cache. This is possible if no one else has
435  * the pipe page referenced outside of the pipe and page cache. If
436  * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
437  * a new page in the output file page cache and fill/dirty that.
438  */
439 static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
440                         struct splice_desc *sd)
441 {
442         struct file *file = sd->file;
443         struct address_space *mapping = file->f_mapping;
444         gfp_t gfp_mask = mapping_gfp_mask(mapping);
445         unsigned int offset;
446         struct page *page;
447         pgoff_t index;
448         char *src;
449         int ret;
450
451         /*
452          * make sure the data in this buffer is uptodate
453          */
454         src = buf->ops->map(file, info, buf);
455         if (IS_ERR(src))
456                 return PTR_ERR(src);
457
458         index = sd->pos >> PAGE_CACHE_SHIFT;
459         offset = sd->pos & ~PAGE_CACHE_MASK;
460
461         /*
462          * reuse buf page, if SPLICE_F_MOVE is set
463          */
464         if (sd->flags & SPLICE_F_MOVE) {
465                 /*
466                  * If steal succeeds, buf->page is now pruned from the vm
467                  * side (LRU and page cache) and we can reuse it.
468                  */
469                 if (buf->ops->steal(info, buf))
470                         goto find_page;
471
472                 /*
473                  * this will also set the page locked
474                  */
475                 page = buf->page;
476                 if (add_to_page_cache(page, mapping, index, gfp_mask))
477                         goto find_page;
478
479                 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
480                         lru_cache_add(page);
481         } else {
482 find_page:
483                 ret = -ENOMEM;
484                 page = find_or_create_page(mapping, index, gfp_mask);
485                 if (!page)
486                         goto out_nomem;
487
488                 /*
489                  * If the page is uptodate, it is also locked. If it isn't
490                  * uptodate, we can mark it uptodate if we are filling the
491                  * full page. Otherwise we need to read it in first...
492                  */
493                 if (!PageUptodate(page)) {
494                         if (sd->len < PAGE_CACHE_SIZE) {
495                                 ret = mapping->a_ops->readpage(file, page);
496                                 if (unlikely(ret))
497                                         goto out;
498
499                                 lock_page(page);
500
501                                 if (!PageUptodate(page)) {
502                                         /*
503                                          * page got invalidated, repeat
504                                          */
505                                         if (!page->mapping) {
506                                                 unlock_page(page);
507                                                 page_cache_release(page);
508                                                 goto find_page;
509                                         }
510                                         ret = -EIO;
511                                         goto out;
512                                 }
513                         } else {
514                                 WARN_ON(!PageLocked(page));
515                                 SetPageUptodate(page);
516                         }
517                 }
518         }
519
520         ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
521         if (ret == AOP_TRUNCATED_PAGE) {
522                 page_cache_release(page);
523                 goto find_page;
524         } else if (ret)
525                 goto out;
526
527         if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
528                 char *dst = kmap_atomic(page, KM_USER0);
529
530                 memcpy(dst + offset, src + buf->offset, sd->len);
531                 flush_dcache_page(page);
532                 kunmap_atomic(dst, KM_USER0);
533         }
534
535         ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
536         if (ret == AOP_TRUNCATED_PAGE) {
537                 page_cache_release(page);
538                 goto find_page;
539         } else if (ret)
540                 goto out;
541
542         mark_page_accessed(page);
543         balance_dirty_pages_ratelimited(mapping);
544 out:
545         if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
546                 page_cache_release(page);
547                 unlock_page(page);
548         }
549 out_nomem:
550         buf->ops->unmap(info, buf);
551         return ret;
552 }
553
554 typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
555                            struct splice_desc *);
556
557 /*
558  * Pipe input worker. Most of this logic works like a regular pipe, the
559  * key here is the 'actor' worker passed in that actually moves the data
560  * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
561  */
562 static ssize_t move_from_pipe(struct pipe_inode_info *pipe, struct file *out,
563                               size_t len, unsigned int flags,
564                               splice_actor *actor)
565 {
566         int ret, do_wakeup, err;
567         struct splice_desc sd;
568
569         ret = 0;
570         do_wakeup = 0;
571
572         sd.total_len = len;
573         sd.flags = flags;
574         sd.file = out;
575         sd.pos = out->f_pos;
576
577         if (pipe->inode)
578                 mutex_lock(&pipe->inode->i_mutex);
579
580         for (;;) {
581                 if (pipe->nrbufs) {
582                         struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
583                         struct pipe_buf_operations *ops = buf->ops;
584
585                         sd.len = buf->len;
586                         if (sd.len > sd.total_len)
587                                 sd.len = sd.total_len;
588
589                         err = actor(pipe, buf, &sd);
590                         if (err) {
591                                 if (!ret && err != -ENODATA)
592                                         ret = err;
593
594                                 break;
595                         }
596
597                         ret += sd.len;
598                         buf->offset += sd.len;
599                         buf->len -= sd.len;
600                         if (!buf->len) {
601                                 buf->ops = NULL;
602                                 ops->release(pipe, buf);
603                                 pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
604                                 pipe->nrbufs--;
605                                 if (pipe->inode)
606                                         do_wakeup = 1;
607                         }
608
609                         sd.pos += sd.len;
610                         sd.total_len -= sd.len;
611                         if (!sd.total_len)
612                                 break;
613                 }
614
615                 if (pipe->nrbufs)
616                         continue;
617                 if (!pipe->writers)
618                         break;
619                 if (!pipe->waiting_writers) {
620                         if (ret)
621                                 break;
622                 }
623
624                 if (flags & SPLICE_F_NONBLOCK) {
625                         if (!ret)
626                                 ret = -EAGAIN;
627                         break;
628                 }
629
630                 if (signal_pending(current)) {
631                         if (!ret)
632                                 ret = -ERESTARTSYS;
633                         break;
634                 }
635
636                 if (do_wakeup) {
637                         smp_mb();
638                         if (waitqueue_active(&pipe->wait))
639                                 wake_up_interruptible_sync(&pipe->wait);
640                         kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
641                         do_wakeup = 0;
642                 }
643
644                 pipe_wait(pipe);
645         }
646
647         if (pipe->inode)
648                 mutex_unlock(&pipe->inode->i_mutex);
649
650         if (do_wakeup) {
651                 smp_mb();
652                 if (waitqueue_active(&pipe->wait))
653                         wake_up_interruptible(&pipe->wait);
654                 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
655         }
656
657         out->f_pos = sd.pos;
658         return ret;
659
660 }
661
662 /**
663  * generic_file_splice_write - splice data from a pipe to a file
664  * @pipe:       pipe info
665  * @out:        file to write to
666  * @len:        number of bytes to splice
667  * @flags:      splice modifier flags
668  *
669  * Will either move or copy pages (determined by @flags options) from
670  * the given pipe inode to the given file.
671  *
672  */
673 ssize_t
674 generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
675                           size_t len, unsigned int flags)
676 {
677         struct address_space *mapping = out->f_mapping;
678         ssize_t ret;
679
680         ret = move_from_pipe(pipe, out, len, flags, pipe_to_file);
681
682         /*
683          * if file or inode is SYNC and we actually wrote some data, sync it
684          */
685         if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
686             && ret > 0) {
687                 struct inode *inode = mapping->host;
688                 int err;
689
690                 mutex_lock(&inode->i_mutex);
691                 err = generic_osync_inode(mapping->host, mapping,
692                                           OSYNC_METADATA|OSYNC_DATA);
693                 mutex_unlock(&inode->i_mutex);
694
695                 if (err)
696                         ret = err;
697         }
698
699         return ret;
700 }
701
702 EXPORT_SYMBOL(generic_file_splice_write);
703
704 /**
705  * generic_splice_sendpage - splice data from a pipe to a socket
706  * @inode:      pipe inode
707  * @out:        socket to write to
708  * @len:        number of bytes to splice
709  * @flags:      splice modifier flags
710  *
711  * Will send @len bytes from the pipe to a network socket. No data copying
712  * is involved.
713  *
714  */
715 ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
716                                 size_t len, unsigned int flags)
717 {
718         return move_from_pipe(pipe, out, len, flags, pipe_to_sendpage);
719 }
720
721 EXPORT_SYMBOL(generic_splice_sendpage);
722
723 /*
724  * Attempt to initiate a splice from pipe to file.
725  */
726 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
727                            size_t len, unsigned int flags)
728 {
729         loff_t pos;
730         int ret;
731
732         if (unlikely(!out->f_op || !out->f_op->splice_write))
733                 return -EINVAL;
734
735         if (unlikely(!(out->f_mode & FMODE_WRITE)))
736                 return -EBADF;
737
738         pos = out->f_pos;
739
740         ret = rw_verify_area(WRITE, out, &pos, len);
741         if (unlikely(ret < 0))
742                 return ret;
743
744         return out->f_op->splice_write(pipe, out, len, flags);
745 }
746
747 /*
748  * Attempt to initiate a splice from a file to a pipe.
749  */
750 static long do_splice_to(struct file *in, struct pipe_inode_info *pipe,
751                          size_t len, unsigned int flags)
752 {
753         loff_t pos, isize, left;
754         int ret;
755
756         if (unlikely(!in->f_op || !in->f_op->splice_read))
757                 return -EINVAL;
758
759         if (unlikely(!(in->f_mode & FMODE_READ)))
760                 return -EBADF;
761
762         pos = in->f_pos;
763
764         ret = rw_verify_area(READ, in, &pos, len);
765         if (unlikely(ret < 0))
766                 return ret;
767
768         isize = i_size_read(in->f_mapping->host);
769         if (unlikely(in->f_pos >= isize))
770                 return 0;
771         
772         left = isize - in->f_pos;
773         if (unlikely(left < len))
774                 len = left;
775
776         return in->f_op->splice_read(in, pipe, len, flags);
777 }
778
779 long do_splice_direct(struct file *in, struct file *out, size_t len,
780                       unsigned int flags)
781 {
782         struct pipe_inode_info *pipe;
783         long ret, bytes;
784         umode_t i_mode;
785         int i;
786
787         /*
788          * We require the input being a regular file, as we don't want to
789          * randomly drop data for eg socket -> socket splicing. Use the
790          * piped splicing for that!
791          */
792         i_mode = in->f_dentry->d_inode->i_mode;
793         if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
794                 return -EINVAL;
795
796         /*
797          * neither in nor out is a pipe, setup an internal pipe attached to
798          * 'out' and transfer the wanted data from 'in' to 'out' through that
799          */
800         pipe = current->splice_pipe;
801         if (unlikely(!pipe)) {
802                 pipe = alloc_pipe_info(NULL);
803                 if (!pipe)
804                         return -ENOMEM;
805
806                 /*
807                  * We don't have an immediate reader, but we'll read the stuff
808                  * out of the pipe right after the move_to_pipe(). So set
809                  * PIPE_READERS appropriately.
810                  */
811                 pipe->readers = 1;
812
813                 current->splice_pipe = pipe;
814         }
815
816         /*
817          * do the splice
818          */
819         ret = 0;
820         bytes = 0;
821
822         while (len) {
823                 size_t read_len, max_read_len;
824
825                 /*
826                  * Do at most PIPE_BUFFERS pages worth of transfer:
827                  */
828                 max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
829
830                 ret = do_splice_to(in, pipe, max_read_len, flags);
831                 if (unlikely(ret < 0))
832                         goto out_release;
833
834                 read_len = ret;
835
836                 /*
837                  * NOTE: nonblocking mode only applies to the input. We
838                  * must not do the output in nonblocking mode as then we
839                  * could get stuck data in the internal pipe:
840                  */
841                 ret = do_splice_from(pipe, out, read_len,
842                                      flags & ~SPLICE_F_NONBLOCK);
843                 if (unlikely(ret < 0))
844                         goto out_release;
845
846                 bytes += ret;
847                 len -= ret;
848
849                 /*
850                  * In nonblocking mode, if we got back a short read then
851                  * that was due to either an IO error or due to the
852                  * pagecache entry not being there. In the IO error case
853                  * the _next_ splice attempt will produce a clean IO error
854                  * return value (not a short read), so in both cases it's
855                  * correct to break out of the loop here:
856                  */
857                 if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
858                         break;
859         }
860
861         pipe->nrbufs = pipe->curbuf = 0;
862
863         return bytes;
864
865 out_release:
866         /*
867          * If we did an incomplete transfer we must release
868          * the pipe buffers in question:
869          */
870         for (i = 0; i < PIPE_BUFFERS; i++) {
871                 struct pipe_buffer *buf = pipe->bufs + i;
872
873                 if (buf->ops) {
874                         buf->ops->release(pipe, buf);
875                         buf->ops = NULL;
876                 }
877         }
878         pipe->nrbufs = pipe->curbuf = 0;
879
880         /*
881          * If we transferred some data, return the number of bytes:
882          */
883         if (bytes > 0)
884                 return bytes;
885
886         return ret;
887 }
888
889 EXPORT_SYMBOL(do_splice_direct);
890
891 /*
892  * Determine where to splice to/from.
893  */
894 static long do_splice(struct file *in, loff_t __user *off_in,
895                       struct file *out, loff_t __user *off_out,
896                       size_t len, unsigned int flags)
897 {
898         struct pipe_inode_info *pipe;
899
900         pipe = in->f_dentry->d_inode->i_pipe;
901         if (pipe) {
902                 if (off_in)
903                         return -ESPIPE;
904                 if (off_out) {
905                         if (out->f_op->llseek == no_llseek)
906                                 return -EINVAL;
907                         if (copy_from_user(&out->f_pos, off_out,
908                                            sizeof(loff_t)))
909                                 return -EFAULT;
910                 }
911
912                 return do_splice_from(pipe, out, len, flags);
913         }
914
915         pipe = out->f_dentry->d_inode->i_pipe;
916         if (pipe) {
917                 if (off_out)
918                         return -ESPIPE;
919                 if (off_in) {
920                         if (in->f_op->llseek == no_llseek)
921                                 return -EINVAL;
922                         if (copy_from_user(&in->f_pos, off_in, sizeof(loff_t)))
923                                 return -EFAULT;
924                 }
925
926                 return do_splice_to(in, pipe, len, flags);
927         }
928
929         return -EINVAL;
930 }
931
932 asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
933                            int fd_out, loff_t __user *off_out,
934                            size_t len, unsigned int flags)
935 {
936         long error;
937         struct file *in, *out;
938         int fput_in, fput_out;
939
940         if (unlikely(!len))
941                 return 0;
942
943         error = -EBADF;
944         in = fget_light(fd_in, &fput_in);
945         if (in) {
946                 if (in->f_mode & FMODE_READ) {
947                         out = fget_light(fd_out, &fput_out);
948                         if (out) {
949                                 if (out->f_mode & FMODE_WRITE)
950                                         error = do_splice(in, off_in,
951                                                           out, off_out,
952                                                           len, flags);
953                                 fput_light(out, fput_out);
954                         }
955                 }
956
957                 fput_light(in, fput_in);
958         }
959
960         return error;
961 }