Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[pandora-kernel.git] / fs / ext4 / file.c
1 /*
2  *  linux/fs/ext4/file.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/file.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  ext4 fs regular file handling primitives
16  *
17  *  64-bit file support on 64-bit platforms by Jakub Jelinek
18  *      (jj@sunsite.ms.mff.cuni.cz)
19  */
20
21 #include <linux/time.h>
22 #include <linux/fs.h>
23 #include <linux/jbd2.h>
24 #include <linux/mount.h>
25 #include <linux/path.h>
26 #include <linux/quotaops.h>
27 #include <linux/pagevec.h>
28 #include "ext4.h"
29 #include "ext4_jbd2.h"
30 #include "xattr.h"
31 #include "acl.h"
32
33 /*
34  * Called when an inode is released. Note that this is different
35  * from ext4_file_open: open gets called at every open, but release
36  * gets called only when /all/ the files are closed.
37  */
38 static int ext4_release_file(struct inode *inode, struct file *filp)
39 {
40         if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
41                 ext4_alloc_da_blocks(inode);
42                 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
43         }
44         /* if we are the last writer on the inode, drop the block reservation */
45         if ((filp->f_mode & FMODE_WRITE) &&
46                         (atomic_read(&inode->i_writecount) == 1) &&
47                         !EXT4_I(inode)->i_reserved_data_blocks)
48         {
49                 down_write(&EXT4_I(inode)->i_data_sem);
50                 ext4_discard_preallocations(inode);
51                 up_write(&EXT4_I(inode)->i_data_sem);
52         }
53         if (is_dx(inode) && filp->private_data)
54                 ext4_htree_free_dir_info(filp->private_data);
55
56         return 0;
57 }
58
59 void ext4_unwritten_wait(struct inode *inode)
60 {
61         wait_queue_head_t *wq = ext4_ioend_wq(inode);
62
63         wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
64 }
65
66 /*
67  * This tests whether the IO in question is block-aligned or not.
68  * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
69  * are converted to written only after the IO is complete.  Until they are
70  * mapped, these blocks appear as holes, so dio_zero_block() will assume that
71  * it needs to zero out portions of the start and/or end block.  If 2 AIO
72  * threads are at work on the same unwritten block, they must be synchronized
73  * or one thread will zero the other's data, causing corruption.
74  */
75 static int
76 ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
77                    unsigned long nr_segs, loff_t pos)
78 {
79         struct super_block *sb = inode->i_sb;
80         int blockmask = sb->s_blocksize - 1;
81         size_t count = iov_length(iov, nr_segs);
82         loff_t final_size = pos + count;
83
84         if (pos >= inode->i_size)
85                 return 0;
86
87         if ((pos & blockmask) || (final_size & blockmask))
88                 return 1;
89
90         return 0;
91 }
92
93 static ssize_t
94 ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
95                     unsigned long nr_segs, loff_t pos)
96 {
97         struct file *file = iocb->ki_filp;
98         struct inode *inode = file->f_mapping->host;
99         struct blk_plug plug;
100         int unaligned_aio = 0;
101         ssize_t ret;
102         int overwrite = 0;
103         size_t length = iov_length(iov, nr_segs);
104
105         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
106             !is_sync_kiocb(iocb))
107                 unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
108
109         /* Unaligned direct AIO must be serialized; see comment above */
110         if (unaligned_aio) {
111                 mutex_lock(ext4_aio_mutex(inode));
112                 ext4_unwritten_wait(inode);
113         }
114
115         BUG_ON(iocb->ki_pos != pos);
116
117         mutex_lock(&inode->i_mutex);
118         blk_start_plug(&plug);
119
120         iocb->private = &overwrite;
121
122         /* check whether we do a DIO overwrite or not */
123         if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
124             !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
125                 struct ext4_map_blocks map;
126                 unsigned int blkbits = inode->i_blkbits;
127                 int err, len;
128
129                 map.m_lblk = pos >> blkbits;
130                 map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
131                         - map.m_lblk;
132                 len = map.m_len;
133
134                 err = ext4_map_blocks(NULL, inode, &map, 0);
135                 /*
136                  * 'err==len' means that all of blocks has been preallocated no
137                  * matter they are initialized or not.  For excluding
138                  * uninitialized extents, we need to check m_flags.  There are
139                  * two conditions that indicate for initialized extents.
140                  * 1) If we hit extent cache, EXT4_MAP_MAPPED flag is returned;
141                  * 2) If we do a real lookup, non-flags are returned.
142                  * So we should check these two conditions.
143                  */
144                 if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
145                         overwrite = 1;
146         }
147
148         ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
149         mutex_unlock(&inode->i_mutex);
150
151         if (ret > 0 || ret == -EIOCBQUEUED) {
152                 ssize_t err;
153
154                 err = generic_write_sync(file, pos, ret);
155                 if (err < 0 && ret > 0)
156                         ret = err;
157         }
158         blk_finish_plug(&plug);
159
160         if (unaligned_aio)
161                 mutex_unlock(ext4_aio_mutex(inode));
162
163         return ret;
164 }
165
166 static ssize_t
167 ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
168                 unsigned long nr_segs, loff_t pos)
169 {
170         struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
171         ssize_t ret;
172
173         /*
174          * If we have encountered a bitmap-format file, the size limit
175          * is smaller than s_maxbytes, which is for extent-mapped files.
176          */
177
178         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
179                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
180                 size_t length = iov_length(iov, nr_segs);
181
182                 if ((pos > sbi->s_bitmap_maxbytes ||
183                     (pos == sbi->s_bitmap_maxbytes && length > 0)))
184                         return -EFBIG;
185
186                 if (pos + length > sbi->s_bitmap_maxbytes) {
187                         nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
188                                               sbi->s_bitmap_maxbytes - pos);
189                 }
190         }
191
192         if (unlikely(iocb->ki_filp->f_flags & O_DIRECT))
193                 ret = ext4_file_dio_write(iocb, iov, nr_segs, pos);
194         else
195                 ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
196
197         return ret;
198 }
199
200 static const struct vm_operations_struct ext4_file_vm_ops = {
201         .fault          = filemap_fault,
202         .page_mkwrite   = ext4_page_mkwrite,
203         .remap_pages    = generic_file_remap_pages,
204 };
205
206 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
207 {
208         struct address_space *mapping = file->f_mapping;
209
210         if (!mapping->a_ops->readpage)
211                 return -ENOEXEC;
212         file_accessed(file);
213         vma->vm_ops = &ext4_file_vm_ops;
214         return 0;
215 }
216
217 static int ext4_file_open(struct inode * inode, struct file * filp)
218 {
219         struct super_block *sb = inode->i_sb;
220         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
221         struct ext4_inode_info *ei = EXT4_I(inode);
222         struct vfsmount *mnt = filp->f_path.mnt;
223         struct path path;
224         char buf[64], *cp;
225
226         if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
227                      !(sb->s_flags & MS_RDONLY))) {
228                 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
229                 /*
230                  * Sample where the filesystem has been mounted and
231                  * store it in the superblock for sysadmin convenience
232                  * when trying to sort through large numbers of block
233                  * devices or filesystem images.
234                  */
235                 memset(buf, 0, sizeof(buf));
236                 path.mnt = mnt;
237                 path.dentry = mnt->mnt_root;
238                 cp = d_path(&path, buf, sizeof(buf));
239                 if (!IS_ERR(cp)) {
240                         handle_t *handle;
241                         int err;
242
243                         handle = ext4_journal_start_sb(sb, 1);
244                         if (IS_ERR(handle))
245                                 return PTR_ERR(handle);
246                         err = ext4_journal_get_write_access(handle, sbi->s_sbh);
247                         if (err) {
248                                 ext4_journal_stop(handle);
249                                 return err;
250                         }
251                         strlcpy(sbi->s_es->s_last_mounted, cp,
252                                 sizeof(sbi->s_es->s_last_mounted));
253                         ext4_handle_dirty_super(handle, sb);
254                         ext4_journal_stop(handle);
255                 }
256         }
257         /*
258          * Set up the jbd2_inode if we are opening the inode for
259          * writing and the journal is present
260          */
261         if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) {
262                 struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL);
263
264                 spin_lock(&inode->i_lock);
265                 if (!ei->jinode) {
266                         if (!jinode) {
267                                 spin_unlock(&inode->i_lock);
268                                 return -ENOMEM;
269                         }
270                         ei->jinode = jinode;
271                         jbd2_journal_init_jbd_inode(ei->jinode, inode);
272                         jinode = NULL;
273                 }
274                 spin_unlock(&inode->i_lock);
275                 if (unlikely(jinode != NULL))
276                         jbd2_free_inode(jinode);
277         }
278         return dquot_file_open(inode, filp);
279 }
280
281 /*
282  * Here we use ext4_map_blocks() to get a block mapping for a extent-based
283  * file rather than ext4_ext_walk_space() because we can introduce
284  * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
285  * function.  When extent status tree has been fully implemented, it will
286  * track all extent status for a file and we can directly use it to
287  * retrieve the offset for SEEK_DATA/SEEK_HOLE.
288  */
289
290 /*
291  * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
292  * lookup page cache to check whether or not there has some data between
293  * [startoff, endoff] because, if this range contains an unwritten extent,
294  * we determine this extent as a data or a hole according to whether the
295  * page cache has data or not.
296  */
297 static int ext4_find_unwritten_pgoff(struct inode *inode,
298                                      int whence,
299                                      struct ext4_map_blocks *map,
300                                      loff_t *offset)
301 {
302         struct pagevec pvec;
303         unsigned int blkbits;
304         pgoff_t index;
305         pgoff_t end;
306         loff_t endoff;
307         loff_t startoff;
308         loff_t lastoff;
309         int found = 0;
310
311         blkbits = inode->i_sb->s_blocksize_bits;
312         startoff = *offset;
313         lastoff = startoff;
314         endoff = (map->m_lblk + map->m_len) << blkbits;
315
316         index = startoff >> PAGE_CACHE_SHIFT;
317         end = endoff >> PAGE_CACHE_SHIFT;
318
319         pagevec_init(&pvec, 0);
320         do {
321                 int i, num;
322                 unsigned long nr_pages;
323
324                 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
325                 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
326                                           (pgoff_t)num);
327                 if (nr_pages == 0) {
328                         if (whence == SEEK_DATA)
329                                 break;
330
331                         BUG_ON(whence != SEEK_HOLE);
332                         /*
333                          * If this is the first time to go into the loop and
334                          * offset is not beyond the end offset, it will be a
335                          * hole at this offset
336                          */
337                         if (lastoff == startoff || lastoff < endoff)
338                                 found = 1;
339                         break;
340                 }
341
342                 /*
343                  * If this is the first time to go into the loop and
344                  * offset is smaller than the first page offset, it will be a
345                  * hole at this offset.
346                  */
347                 if (lastoff == startoff && whence == SEEK_HOLE &&
348                     lastoff < page_offset(pvec.pages[0])) {
349                         found = 1;
350                         break;
351                 }
352
353                 for (i = 0; i < nr_pages; i++) {
354                         struct page *page = pvec.pages[i];
355                         struct buffer_head *bh, *head;
356
357                         /*
358                          * If the current offset is not beyond the end of given
359                          * range, it will be a hole.
360                          */
361                         if (lastoff < endoff && whence == SEEK_HOLE &&
362                             page->index > end) {
363                                 found = 1;
364                                 *offset = lastoff;
365                                 goto out;
366                         }
367
368                         lock_page(page);
369
370                         if (unlikely(page->mapping != inode->i_mapping)) {
371                                 unlock_page(page);
372                                 continue;
373                         }
374
375                         if (!page_has_buffers(page)) {
376                                 unlock_page(page);
377                                 continue;
378                         }
379
380                         if (page_has_buffers(page)) {
381                                 lastoff = page_offset(page);
382                                 bh = head = page_buffers(page);
383                                 do {
384                                         if (buffer_uptodate(bh) ||
385                                             buffer_unwritten(bh)) {
386                                                 if (whence == SEEK_DATA)
387                                                         found = 1;
388                                         } else {
389                                                 if (whence == SEEK_HOLE)
390                                                         found = 1;
391                                         }
392                                         if (found) {
393                                                 *offset = max_t(loff_t,
394                                                         startoff, lastoff);
395                                                 unlock_page(page);
396                                                 goto out;
397                                         }
398                                         lastoff += bh->b_size;
399                                         bh = bh->b_this_page;
400                                 } while (bh != head);
401                         }
402
403                         lastoff = page_offset(page) + PAGE_SIZE;
404                         unlock_page(page);
405                 }
406
407                 /*
408                  * The no. of pages is less than our desired, that would be a
409                  * hole in there.
410                  */
411                 if (nr_pages < num && whence == SEEK_HOLE) {
412                         found = 1;
413                         *offset = lastoff;
414                         break;
415                 }
416
417                 index = pvec.pages[i - 1]->index + 1;
418                 pagevec_release(&pvec);
419         } while (index <= end);
420
421 out:
422         pagevec_release(&pvec);
423         return found;
424 }
425
426 /*
427  * ext4_seek_data() retrieves the offset for SEEK_DATA.
428  */
429 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
430 {
431         struct inode *inode = file->f_mapping->host;
432         struct ext4_map_blocks map;
433         struct extent_status es;
434         ext4_lblk_t start, last, end;
435         loff_t dataoff, isize;
436         int blkbits;
437         int ret = 0;
438
439         mutex_lock(&inode->i_mutex);
440
441         isize = i_size_read(inode);
442         if (offset >= isize) {
443                 mutex_unlock(&inode->i_mutex);
444                 return -ENXIO;
445         }
446
447         blkbits = inode->i_sb->s_blocksize_bits;
448         start = offset >> blkbits;
449         last = start;
450         end = isize >> blkbits;
451         dataoff = offset;
452
453         do {
454                 map.m_lblk = last;
455                 map.m_len = end - last + 1;
456                 ret = ext4_map_blocks(NULL, inode, &map, 0);
457                 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
458                         if (last != start)
459                                 dataoff = last << blkbits;
460                         break;
461                 }
462
463                 /*
464                  * If there is a delay extent at this offset,
465                  * it will be as a data.
466                  */
467                 es.start = last;
468                 (void)ext4_es_find_extent(inode, &es);
469                 if (last >= es.start &&
470                     last < es.start + es.len) {
471                         if (last != start)
472                                 dataoff = last << blkbits;
473                         break;
474                 }
475
476                 /*
477                  * If there is a unwritten extent at this offset,
478                  * it will be as a data or a hole according to page
479                  * cache that has data or not.
480                  */
481                 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
482                         int unwritten;
483                         unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
484                                                               &map, &dataoff);
485                         if (unwritten)
486                                 break;
487                 }
488
489                 last++;
490                 dataoff = last << blkbits;
491         } while (last <= end);
492
493         mutex_unlock(&inode->i_mutex);
494
495         if (dataoff > isize)
496                 return -ENXIO;
497
498         if (dataoff < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
499                 return -EINVAL;
500         if (dataoff > maxsize)
501                 return -EINVAL;
502
503         if (dataoff != file->f_pos) {
504                 file->f_pos = dataoff;
505                 file->f_version = 0;
506         }
507
508         return dataoff;
509 }
510
511 /*
512  * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
513  */
514 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
515 {
516         struct inode *inode = file->f_mapping->host;
517         struct ext4_map_blocks map;
518         struct extent_status es;
519         ext4_lblk_t start, last, end;
520         loff_t holeoff, isize;
521         int blkbits;
522         int ret = 0;
523
524         mutex_lock(&inode->i_mutex);
525
526         isize = i_size_read(inode);
527         if (offset >= isize) {
528                 mutex_unlock(&inode->i_mutex);
529                 return -ENXIO;
530         }
531
532         blkbits = inode->i_sb->s_blocksize_bits;
533         start = offset >> blkbits;
534         last = start;
535         end = isize >> blkbits;
536         holeoff = offset;
537
538         do {
539                 map.m_lblk = last;
540                 map.m_len = end - last + 1;
541                 ret = ext4_map_blocks(NULL, inode, &map, 0);
542                 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
543                         last += ret;
544                         holeoff = last << blkbits;
545                         continue;
546                 }
547
548                 /*
549                  * If there is a delay extent at this offset,
550                  * we will skip this extent.
551                  */
552                 es.start = last;
553                 (void)ext4_es_find_extent(inode, &es);
554                 if (last >= es.start &&
555                     last < es.start + es.len) {
556                         last = es.start + es.len;
557                         holeoff = last << blkbits;
558                         continue;
559                 }
560
561                 /*
562                  * If there is a unwritten extent at this offset,
563                  * it will be as a data or a hole according to page
564                  * cache that has data or not.
565                  */
566                 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
567                         int unwritten;
568                         unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
569                                                               &map, &holeoff);
570                         if (!unwritten) {
571                                 last += ret;
572                                 holeoff = last << blkbits;
573                                 continue;
574                         }
575                 }
576
577                 /* find a hole */
578                 break;
579         } while (last <= end);
580
581         mutex_unlock(&inode->i_mutex);
582
583         if (holeoff > isize)
584                 holeoff = isize;
585
586         if (holeoff < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
587                 return -EINVAL;
588         if (holeoff > maxsize)
589                 return -EINVAL;
590
591         if (holeoff != file->f_pos) {
592                 file->f_pos = holeoff;
593                 file->f_version = 0;
594         }
595
596         return holeoff;
597 }
598
599 /*
600  * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
601  * by calling generic_file_llseek_size() with the appropriate maxbytes
602  * value for each.
603  */
604 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
605 {
606         struct inode *inode = file->f_mapping->host;
607         loff_t maxbytes;
608
609         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
610                 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
611         else
612                 maxbytes = inode->i_sb->s_maxbytes;
613
614         switch (whence) {
615         case SEEK_SET:
616         case SEEK_CUR:
617         case SEEK_END:
618                 return generic_file_llseek_size(file, offset, whence,
619                                                 maxbytes, i_size_read(inode));
620         case SEEK_DATA:
621                 return ext4_seek_data(file, offset, maxbytes);
622         case SEEK_HOLE:
623                 return ext4_seek_hole(file, offset, maxbytes);
624         }
625
626         return -EINVAL;
627 }
628
629 const struct file_operations ext4_file_operations = {
630         .llseek         = ext4_llseek,
631         .read           = do_sync_read,
632         .write          = do_sync_write,
633         .aio_read       = generic_file_aio_read,
634         .aio_write      = ext4_file_write,
635         .unlocked_ioctl = ext4_ioctl,
636 #ifdef CONFIG_COMPAT
637         .compat_ioctl   = ext4_compat_ioctl,
638 #endif
639         .mmap           = ext4_file_mmap,
640         .open           = ext4_file_open,
641         .release        = ext4_release_file,
642         .fsync          = ext4_sync_file,
643         .splice_read    = generic_file_splice_read,
644         .splice_write   = generic_file_splice_write,
645         .fallocate      = ext4_fallocate,
646 };
647
648 const struct inode_operations ext4_file_inode_operations = {
649         .setattr        = ext4_setattr,
650         .getattr        = ext4_getattr,
651         .setxattr       = generic_setxattr,
652         .getxattr       = generic_getxattr,
653         .listxattr      = ext4_listxattr,
654         .removexattr    = generic_removexattr,
655         .get_acl        = ext4_get_acl,
656         .fiemap         = ext4_fiemap,
657 };
658