Merge git://git.infradead.org/mtd-2.6
[pandora-kernel.git] / fs / btrfs / file.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
33 #include "ctree.h"
34 #include "disk-io.h"
35 #include "transaction.h"
36 #include "btrfs_inode.h"
37 #include "ioctl.h"
38 #include "print-tree.h"
39 #include "tree-log.h"
40 #include "locking.h"
41 #include "compat.h"
42
43
44 /* simple helper to fault in pages and copy.  This should go away
45  * and be replaced with calls into generic code.
46  */
47 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
48                                          int write_bytes,
49                                          struct page **prepared_pages,
50                                          struct iov_iter *i)
51 {
52         size_t copied = 0;
53         int pg = 0;
54         int offset = pos & (PAGE_CACHE_SIZE - 1);
55         int total_copied = 0;
56
57         while (write_bytes > 0) {
58                 size_t count = min_t(size_t,
59                                      PAGE_CACHE_SIZE - offset, write_bytes);
60                 struct page *page = prepared_pages[pg];
61                 /*
62                  * Copy data from userspace to the current page
63                  *
64                  * Disable pagefault to avoid recursive lock since
65                  * the pages are already locked
66                  */
67                 pagefault_disable();
68                 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
69                 pagefault_enable();
70
71                 /* Flush processor's dcache for this page */
72                 flush_dcache_page(page);
73                 iov_iter_advance(i, copied);
74                 write_bytes -= copied;
75                 total_copied += copied;
76
77                 /* Return to btrfs_file_aio_write to fault page */
78                 if (unlikely(copied == 0)) {
79                         break;
80                 }
81
82                 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
83                         offset += copied;
84                 } else {
85                         pg++;
86                         offset = 0;
87                 }
88         }
89         return total_copied;
90 }
91
92 /*
93  * unlocks pages after btrfs_file_write is done with them
94  */
95 static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
96 {
97         size_t i;
98         for (i = 0; i < num_pages; i++) {
99                 if (!pages[i])
100                         break;
101                 /* page checked is some magic around finding pages that
102                  * have been modified without going through btrfs_set_page_dirty
103                  * clear it here
104                  */
105                 ClearPageChecked(pages[i]);
106                 unlock_page(pages[i]);
107                 mark_page_accessed(pages[i]);
108                 page_cache_release(pages[i]);
109         }
110 }
111
112 /*
113  * after copy_from_user, pages need to be dirtied and we need to make
114  * sure holes are created between the current EOF and the start of
115  * any next extents (if required).
116  *
117  * this also makes the decision about creating an inline extent vs
118  * doing real data extents, marking pages dirty and delalloc as required.
119  */
120 static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
121                                    struct btrfs_root *root,
122                                    struct file *file,
123                                    struct page **pages,
124                                    size_t num_pages,
125                                    loff_t pos,
126                                    size_t write_bytes)
127 {
128         int err = 0;
129         int i;
130         struct inode *inode = fdentry(file)->d_inode;
131         u64 num_bytes;
132         u64 start_pos;
133         u64 end_of_last_block;
134         u64 end_pos = pos + write_bytes;
135         loff_t isize = i_size_read(inode);
136
137         start_pos = pos & ~((u64)root->sectorsize - 1);
138         num_bytes = (write_bytes + pos - start_pos +
139                     root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
140
141         end_of_last_block = start_pos + num_bytes - 1;
142         err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
143                                         NULL);
144         BUG_ON(err);
145
146         for (i = 0; i < num_pages; i++) {
147                 struct page *p = pages[i];
148                 SetPageUptodate(p);
149                 ClearPageChecked(p);
150                 set_page_dirty(p);
151         }
152         if (end_pos > isize) {
153                 i_size_write(inode, end_pos);
154                 /* we've only changed i_size in ram, and we haven't updated
155                  * the disk i_size.  There is no need to log the inode
156                  * at this time.
157                  */
158         }
159         return 0;
160 }
161
162 /*
163  * this drops all the extents in the cache that intersect the range
164  * [start, end].  Existing extents are split as required.
165  */
166 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
167                             int skip_pinned)
168 {
169         struct extent_map *em;
170         struct extent_map *split = NULL;
171         struct extent_map *split2 = NULL;
172         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
173         u64 len = end - start + 1;
174         int ret;
175         int testend = 1;
176         unsigned long flags;
177         int compressed = 0;
178
179         WARN_ON(end < start);
180         if (end == (u64)-1) {
181                 len = (u64)-1;
182                 testend = 0;
183         }
184         while (1) {
185                 if (!split)
186                         split = alloc_extent_map(GFP_NOFS);
187                 if (!split2)
188                         split2 = alloc_extent_map(GFP_NOFS);
189
190                 write_lock(&em_tree->lock);
191                 em = lookup_extent_mapping(em_tree, start, len);
192                 if (!em) {
193                         write_unlock(&em_tree->lock);
194                         break;
195                 }
196                 flags = em->flags;
197                 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
198                         if (testend && em->start + em->len >= start + len) {
199                                 free_extent_map(em);
200                                 write_unlock(&em_tree->lock);
201                                 break;
202                         }
203                         start = em->start + em->len;
204                         if (testend)
205                                 len = start + len - (em->start + em->len);
206                         free_extent_map(em);
207                         write_unlock(&em_tree->lock);
208                         continue;
209                 }
210                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
211                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
212                 remove_extent_mapping(em_tree, em);
213
214                 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
215                     em->start < start) {
216                         split->start = em->start;
217                         split->len = start - em->start;
218                         split->orig_start = em->orig_start;
219                         split->block_start = em->block_start;
220
221                         if (compressed)
222                                 split->block_len = em->block_len;
223                         else
224                                 split->block_len = split->len;
225
226                         split->bdev = em->bdev;
227                         split->flags = flags;
228                         ret = add_extent_mapping(em_tree, split);
229                         BUG_ON(ret);
230                         free_extent_map(split);
231                         split = split2;
232                         split2 = NULL;
233                 }
234                 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
235                     testend && em->start + em->len > start + len) {
236                         u64 diff = start + len - em->start;
237
238                         split->start = start + len;
239                         split->len = em->start + em->len - (start + len);
240                         split->bdev = em->bdev;
241                         split->flags = flags;
242
243                         if (compressed) {
244                                 split->block_len = em->block_len;
245                                 split->block_start = em->block_start;
246                                 split->orig_start = em->orig_start;
247                         } else {
248                                 split->block_len = split->len;
249                                 split->block_start = em->block_start + diff;
250                                 split->orig_start = split->start;
251                         }
252
253                         ret = add_extent_mapping(em_tree, split);
254                         BUG_ON(ret);
255                         free_extent_map(split);
256                         split = NULL;
257                 }
258                 write_unlock(&em_tree->lock);
259
260                 /* once for us */
261                 free_extent_map(em);
262                 /* once for the tree*/
263                 free_extent_map(em);
264         }
265         if (split)
266                 free_extent_map(split);
267         if (split2)
268                 free_extent_map(split2);
269         return 0;
270 }
271
272 /*
273  * this is very complex, but the basic idea is to drop all extents
274  * in the range start - end.  hint_block is filled in with a block number
275  * that would be a good hint to the block allocator for this file.
276  *
277  * If an extent intersects the range but is not entirely inside the range
278  * it is either truncated or split.  Anything entirely inside the range
279  * is deleted from the tree.
280  */
281 int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
282                        u64 start, u64 end, u64 *hint_byte, int drop_cache)
283 {
284         struct btrfs_root *root = BTRFS_I(inode)->root;
285         struct extent_buffer *leaf;
286         struct btrfs_file_extent_item *fi;
287         struct btrfs_path *path;
288         struct btrfs_key key;
289         struct btrfs_key new_key;
290         u64 search_start = start;
291         u64 disk_bytenr = 0;
292         u64 num_bytes = 0;
293         u64 extent_offset = 0;
294         u64 extent_end = 0;
295         int del_nr = 0;
296         int del_slot = 0;
297         int extent_type;
298         int recow;
299         int ret;
300
301         if (drop_cache)
302                 btrfs_drop_extent_cache(inode, start, end - 1, 0);
303
304         path = btrfs_alloc_path();
305         if (!path)
306                 return -ENOMEM;
307
308         while (1) {
309                 recow = 0;
310                 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
311                                                search_start, -1);
312                 if (ret < 0)
313                         break;
314                 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
315                         leaf = path->nodes[0];
316                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
317                         if (key.objectid == inode->i_ino &&
318                             key.type == BTRFS_EXTENT_DATA_KEY)
319                                 path->slots[0]--;
320                 }
321                 ret = 0;
322 next_slot:
323                 leaf = path->nodes[0];
324                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
325                         BUG_ON(del_nr > 0);
326                         ret = btrfs_next_leaf(root, path);
327                         if (ret < 0)
328                                 break;
329                         if (ret > 0) {
330                                 ret = 0;
331                                 break;
332                         }
333                         leaf = path->nodes[0];
334                         recow = 1;
335                 }
336
337                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
338                 if (key.objectid > inode->i_ino ||
339                     key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
340                         break;
341
342                 fi = btrfs_item_ptr(leaf, path->slots[0],
343                                     struct btrfs_file_extent_item);
344                 extent_type = btrfs_file_extent_type(leaf, fi);
345
346                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
347                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
348                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
349                         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
350                         extent_offset = btrfs_file_extent_offset(leaf, fi);
351                         extent_end = key.offset +
352                                 btrfs_file_extent_num_bytes(leaf, fi);
353                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
354                         extent_end = key.offset +
355                                 btrfs_file_extent_inline_len(leaf, fi);
356                 } else {
357                         WARN_ON(1);
358                         extent_end = search_start;
359                 }
360
361                 if (extent_end <= search_start) {
362                         path->slots[0]++;
363                         goto next_slot;
364                 }
365
366                 search_start = max(key.offset, start);
367                 if (recow) {
368                         btrfs_release_path(root, path);
369                         continue;
370                 }
371
372                 /*
373                  *     | - range to drop - |
374                  *  | -------- extent -------- |
375                  */
376                 if (start > key.offset && end < extent_end) {
377                         BUG_ON(del_nr > 0);
378                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
379
380                         memcpy(&new_key, &key, sizeof(new_key));
381                         new_key.offset = start;
382                         ret = btrfs_duplicate_item(trans, root, path,
383                                                    &new_key);
384                         if (ret == -EAGAIN) {
385                                 btrfs_release_path(root, path);
386                                 continue;
387                         }
388                         if (ret < 0)
389                                 break;
390
391                         leaf = path->nodes[0];
392                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
393                                             struct btrfs_file_extent_item);
394                         btrfs_set_file_extent_num_bytes(leaf, fi,
395                                                         start - key.offset);
396
397                         fi = btrfs_item_ptr(leaf, path->slots[0],
398                                             struct btrfs_file_extent_item);
399
400                         extent_offset += start - key.offset;
401                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
402                         btrfs_set_file_extent_num_bytes(leaf, fi,
403                                                         extent_end - start);
404                         btrfs_mark_buffer_dirty(leaf);
405
406                         if (disk_bytenr > 0) {
407                                 ret = btrfs_inc_extent_ref(trans, root,
408                                                 disk_bytenr, num_bytes, 0,
409                                                 root->root_key.objectid,
410                                                 new_key.objectid,
411                                                 start - extent_offset);
412                                 BUG_ON(ret);
413                                 *hint_byte = disk_bytenr;
414                         }
415                         key.offset = start;
416                 }
417                 /*
418                  *  | ---- range to drop ----- |
419                  *      | -------- extent -------- |
420                  */
421                 if (start <= key.offset && end < extent_end) {
422                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
423
424                         memcpy(&new_key, &key, sizeof(new_key));
425                         new_key.offset = end;
426                         btrfs_set_item_key_safe(trans, root, path, &new_key);
427
428                         extent_offset += end - key.offset;
429                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
430                         btrfs_set_file_extent_num_bytes(leaf, fi,
431                                                         extent_end - end);
432                         btrfs_mark_buffer_dirty(leaf);
433                         if (disk_bytenr > 0) {
434                                 inode_sub_bytes(inode, end - key.offset);
435                                 *hint_byte = disk_bytenr;
436                         }
437                         break;
438                 }
439
440                 search_start = extent_end;
441                 /*
442                  *       | ---- range to drop ----- |
443                  *  | -------- extent -------- |
444                  */
445                 if (start > key.offset && end >= extent_end) {
446                         BUG_ON(del_nr > 0);
447                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
448
449                         btrfs_set_file_extent_num_bytes(leaf, fi,
450                                                         start - key.offset);
451                         btrfs_mark_buffer_dirty(leaf);
452                         if (disk_bytenr > 0) {
453                                 inode_sub_bytes(inode, extent_end - start);
454                                 *hint_byte = disk_bytenr;
455                         }
456                         if (end == extent_end)
457                                 break;
458
459                         path->slots[0]++;
460                         goto next_slot;
461                 }
462
463                 /*
464                  *  | ---- range to drop ----- |
465                  *    | ------ extent ------ |
466                  */
467                 if (start <= key.offset && end >= extent_end) {
468                         if (del_nr == 0) {
469                                 del_slot = path->slots[0];
470                                 del_nr = 1;
471                         } else {
472                                 BUG_ON(del_slot + del_nr != path->slots[0]);
473                                 del_nr++;
474                         }
475
476                         if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
477                                 inode_sub_bytes(inode,
478                                                 extent_end - key.offset);
479                                 extent_end = ALIGN(extent_end,
480                                                    root->sectorsize);
481                         } else if (disk_bytenr > 0) {
482                                 ret = btrfs_free_extent(trans, root,
483                                                 disk_bytenr, num_bytes, 0,
484                                                 root->root_key.objectid,
485                                                 key.objectid, key.offset -
486                                                 extent_offset);
487                                 BUG_ON(ret);
488                                 inode_sub_bytes(inode,
489                                                 extent_end - key.offset);
490                                 *hint_byte = disk_bytenr;
491                         }
492
493                         if (end == extent_end)
494                                 break;
495
496                         if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
497                                 path->slots[0]++;
498                                 goto next_slot;
499                         }
500
501                         ret = btrfs_del_items(trans, root, path, del_slot,
502                                               del_nr);
503                         BUG_ON(ret);
504
505                         del_nr = 0;
506                         del_slot = 0;
507
508                         btrfs_release_path(root, path);
509                         continue;
510                 }
511
512                 BUG_ON(1);
513         }
514
515         if (del_nr > 0) {
516                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
517                 BUG_ON(ret);
518         }
519
520         btrfs_free_path(path);
521         return ret;
522 }
523
524 static int extent_mergeable(struct extent_buffer *leaf, int slot,
525                             u64 objectid, u64 bytenr, u64 orig_offset,
526                             u64 *start, u64 *end)
527 {
528         struct btrfs_file_extent_item *fi;
529         struct btrfs_key key;
530         u64 extent_end;
531
532         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
533                 return 0;
534
535         btrfs_item_key_to_cpu(leaf, &key, slot);
536         if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
537                 return 0;
538
539         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
540         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
541             btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
542             btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
543             btrfs_file_extent_compression(leaf, fi) ||
544             btrfs_file_extent_encryption(leaf, fi) ||
545             btrfs_file_extent_other_encoding(leaf, fi))
546                 return 0;
547
548         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
549         if ((*start && *start != key.offset) || (*end && *end != extent_end))
550                 return 0;
551
552         *start = key.offset;
553         *end = extent_end;
554         return 1;
555 }
556
557 /*
558  * Mark extent in the range start - end as written.
559  *
560  * This changes extent type from 'pre-allocated' to 'regular'. If only
561  * part of extent is marked as written, the extent will be split into
562  * two or three.
563  */
564 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
565                               struct inode *inode, u64 start, u64 end)
566 {
567         struct btrfs_root *root = BTRFS_I(inode)->root;
568         struct extent_buffer *leaf;
569         struct btrfs_path *path;
570         struct btrfs_file_extent_item *fi;
571         struct btrfs_key key;
572         struct btrfs_key new_key;
573         u64 bytenr;
574         u64 num_bytes;
575         u64 extent_end;
576         u64 orig_offset;
577         u64 other_start;
578         u64 other_end;
579         u64 split;
580         int del_nr = 0;
581         int del_slot = 0;
582         int recow;
583         int ret;
584
585         btrfs_drop_extent_cache(inode, start, end - 1, 0);
586
587         path = btrfs_alloc_path();
588         BUG_ON(!path);
589 again:
590         recow = 0;
591         split = start;
592         key.objectid = inode->i_ino;
593         key.type = BTRFS_EXTENT_DATA_KEY;
594         key.offset = split;
595
596         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
597         if (ret > 0 && path->slots[0] > 0)
598                 path->slots[0]--;
599
600         leaf = path->nodes[0];
601         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
602         BUG_ON(key.objectid != inode->i_ino ||
603                key.type != BTRFS_EXTENT_DATA_KEY);
604         fi = btrfs_item_ptr(leaf, path->slots[0],
605                             struct btrfs_file_extent_item);
606         BUG_ON(btrfs_file_extent_type(leaf, fi) !=
607                BTRFS_FILE_EXTENT_PREALLOC);
608         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
609         BUG_ON(key.offset > start || extent_end < end);
610
611         bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
612         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
613         orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
614         memcpy(&new_key, &key, sizeof(new_key));
615
616         if (start == key.offset && end < extent_end) {
617                 other_start = 0;
618                 other_end = start;
619                 if (extent_mergeable(leaf, path->slots[0] - 1,
620                                      inode->i_ino, bytenr, orig_offset,
621                                      &other_start, &other_end)) {
622                         new_key.offset = end;
623                         btrfs_set_item_key_safe(trans, root, path, &new_key);
624                         fi = btrfs_item_ptr(leaf, path->slots[0],
625                                             struct btrfs_file_extent_item);
626                         btrfs_set_file_extent_num_bytes(leaf, fi,
627                                                         extent_end - end);
628                         btrfs_set_file_extent_offset(leaf, fi,
629                                                      end - orig_offset);
630                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
631                                             struct btrfs_file_extent_item);
632                         btrfs_set_file_extent_num_bytes(leaf, fi,
633                                                         end - other_start);
634                         btrfs_mark_buffer_dirty(leaf);
635                         goto out;
636                 }
637         }
638
639         if (start > key.offset && end == extent_end) {
640                 other_start = end;
641                 other_end = 0;
642                 if (extent_mergeable(leaf, path->slots[0] + 1,
643                                      inode->i_ino, bytenr, orig_offset,
644                                      &other_start, &other_end)) {
645                         fi = btrfs_item_ptr(leaf, path->slots[0],
646                                             struct btrfs_file_extent_item);
647                         btrfs_set_file_extent_num_bytes(leaf, fi,
648                                                         start - key.offset);
649                         path->slots[0]++;
650                         new_key.offset = start;
651                         btrfs_set_item_key_safe(trans, root, path, &new_key);
652
653                         fi = btrfs_item_ptr(leaf, path->slots[0],
654                                             struct btrfs_file_extent_item);
655                         btrfs_set_file_extent_num_bytes(leaf, fi,
656                                                         other_end - start);
657                         btrfs_set_file_extent_offset(leaf, fi,
658                                                      start - orig_offset);
659                         btrfs_mark_buffer_dirty(leaf);
660                         goto out;
661                 }
662         }
663
664         while (start > key.offset || end < extent_end) {
665                 if (key.offset == start)
666                         split = end;
667
668                 new_key.offset = split;
669                 ret = btrfs_duplicate_item(trans, root, path, &new_key);
670                 if (ret == -EAGAIN) {
671                         btrfs_release_path(root, path);
672                         goto again;
673                 }
674                 BUG_ON(ret < 0);
675
676                 leaf = path->nodes[0];
677                 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
678                                     struct btrfs_file_extent_item);
679                 btrfs_set_file_extent_num_bytes(leaf, fi,
680                                                 split - key.offset);
681
682                 fi = btrfs_item_ptr(leaf, path->slots[0],
683                                     struct btrfs_file_extent_item);
684
685                 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
686                 btrfs_set_file_extent_num_bytes(leaf, fi,
687                                                 extent_end - split);
688                 btrfs_mark_buffer_dirty(leaf);
689
690                 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
691                                            root->root_key.objectid,
692                                            inode->i_ino, orig_offset);
693                 BUG_ON(ret);
694
695                 if (split == start) {
696                         key.offset = start;
697                 } else {
698                         BUG_ON(start != key.offset);
699                         path->slots[0]--;
700                         extent_end = end;
701                 }
702                 recow = 1;
703         }
704
705         other_start = end;
706         other_end = 0;
707         if (extent_mergeable(leaf, path->slots[0] + 1,
708                              inode->i_ino, bytenr, orig_offset,
709                              &other_start, &other_end)) {
710                 if (recow) {
711                         btrfs_release_path(root, path);
712                         goto again;
713                 }
714                 extent_end = other_end;
715                 del_slot = path->slots[0] + 1;
716                 del_nr++;
717                 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
718                                         0, root->root_key.objectid,
719                                         inode->i_ino, orig_offset);
720                 BUG_ON(ret);
721         }
722         other_start = 0;
723         other_end = start;
724         if (extent_mergeable(leaf, path->slots[0] - 1,
725                              inode->i_ino, bytenr, orig_offset,
726                              &other_start, &other_end)) {
727                 if (recow) {
728                         btrfs_release_path(root, path);
729                         goto again;
730                 }
731                 key.offset = other_start;
732                 del_slot = path->slots[0];
733                 del_nr++;
734                 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
735                                         0, root->root_key.objectid,
736                                         inode->i_ino, orig_offset);
737                 BUG_ON(ret);
738         }
739         if (del_nr == 0) {
740                 fi = btrfs_item_ptr(leaf, path->slots[0],
741                            struct btrfs_file_extent_item);
742                 btrfs_set_file_extent_type(leaf, fi,
743                                            BTRFS_FILE_EXTENT_REG);
744                 btrfs_mark_buffer_dirty(leaf);
745         } else {
746                 fi = btrfs_item_ptr(leaf, del_slot - 1,
747                            struct btrfs_file_extent_item);
748                 btrfs_set_file_extent_type(leaf, fi,
749                                            BTRFS_FILE_EXTENT_REG);
750                 btrfs_set_file_extent_num_bytes(leaf, fi,
751                                                 extent_end - key.offset);
752                 btrfs_mark_buffer_dirty(leaf);
753
754                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
755                 BUG_ON(ret);
756         }
757 out:
758         btrfs_free_path(path);
759         return 0;
760 }
761
762 /*
763  * this gets pages into the page cache and locks them down, it also properly
764  * waits for data=ordered extents to finish before allowing the pages to be
765  * modified.
766  */
767 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
768                          struct page **pages, size_t num_pages,
769                          loff_t pos, unsigned long first_index,
770                          unsigned long last_index, size_t write_bytes)
771 {
772         struct extent_state *cached_state = NULL;
773         int i;
774         unsigned long index = pos >> PAGE_CACHE_SHIFT;
775         struct inode *inode = fdentry(file)->d_inode;
776         int err = 0;
777         u64 start_pos;
778         u64 last_pos;
779
780         start_pos = pos & ~((u64)root->sectorsize - 1);
781         last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
782
783         if (start_pos > inode->i_size) {
784                 err = btrfs_cont_expand(inode, start_pos);
785                 if (err)
786                         return err;
787         }
788
789         memset(pages, 0, num_pages * sizeof(struct page *));
790 again:
791         for (i = 0; i < num_pages; i++) {
792                 pages[i] = grab_cache_page(inode->i_mapping, index + i);
793                 if (!pages[i]) {
794                         err = -ENOMEM;
795                         BUG_ON(1);
796                 }
797                 wait_on_page_writeback(pages[i]);
798         }
799         if (start_pos < inode->i_size) {
800                 struct btrfs_ordered_extent *ordered;
801                 lock_extent_bits(&BTRFS_I(inode)->io_tree,
802                                  start_pos, last_pos - 1, 0, &cached_state,
803                                  GFP_NOFS);
804                 ordered = btrfs_lookup_first_ordered_extent(inode,
805                                                             last_pos - 1);
806                 if (ordered &&
807                     ordered->file_offset + ordered->len > start_pos &&
808                     ordered->file_offset < last_pos) {
809                         btrfs_put_ordered_extent(ordered);
810                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
811                                              start_pos, last_pos - 1,
812                                              &cached_state, GFP_NOFS);
813                         for (i = 0; i < num_pages; i++) {
814                                 unlock_page(pages[i]);
815                                 page_cache_release(pages[i]);
816                         }
817                         btrfs_wait_ordered_range(inode, start_pos,
818                                                  last_pos - start_pos);
819                         goto again;
820                 }
821                 if (ordered)
822                         btrfs_put_ordered_extent(ordered);
823
824                 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
825                                   last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
826                                   EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
827                                   GFP_NOFS);
828                 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
829                                      start_pos, last_pos - 1, &cached_state,
830                                      GFP_NOFS);
831         }
832         for (i = 0; i < num_pages; i++) {
833                 clear_page_dirty_for_io(pages[i]);
834                 set_page_extent_mapped(pages[i]);
835                 WARN_ON(!PageLocked(pages[i]));
836         }
837         return 0;
838 }
839
840 static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
841                                     const struct iovec *iov,
842                                     unsigned long nr_segs, loff_t pos)
843 {
844         struct file *file = iocb->ki_filp;
845         struct inode *inode = fdentry(file)->d_inode;
846         struct btrfs_root *root = BTRFS_I(inode)->root;
847         struct page *pinned[2];
848         struct page **pages = NULL;
849         struct iov_iter i;
850         loff_t *ppos = &iocb->ki_pos;
851         loff_t start_pos;
852         ssize_t num_written = 0;
853         ssize_t err = 0;
854         size_t count;
855         size_t ocount;
856         int ret = 0;
857         int nrptrs;
858         unsigned long first_index;
859         unsigned long last_index;
860         int will_write;
861         int buffered = 0;
862         int copied = 0;
863         int dirty_pages = 0;
864
865         will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) ||
866                       (file->f_flags & O_DIRECT));
867
868         pinned[0] = NULL;
869         pinned[1] = NULL;
870
871         start_pos = pos;
872
873         vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
874
875         mutex_lock(&inode->i_mutex);
876
877         err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
878         if (err)
879                 goto out;
880         count = ocount;
881
882         current->backing_dev_info = inode->i_mapping->backing_dev_info;
883         err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
884         if (err)
885                 goto out;
886
887         if (count == 0)
888                 goto out;
889
890         err = file_remove_suid(file);
891         if (err)
892                 goto out;
893
894         file_update_time(file);
895         BTRFS_I(inode)->sequence++;
896
897         if (unlikely(file->f_flags & O_DIRECT)) {
898                 num_written = generic_file_direct_write(iocb, iov, &nr_segs,
899                                                         pos, ppos, count,
900                                                         ocount);
901                 /*
902                  * the generic O_DIRECT will update in-memory i_size after the
903                  * DIOs are done.  But our endio handlers that update the on
904                  * disk i_size never update past the in memory i_size.  So we
905                  * need one more update here to catch any additions to the
906                  * file
907                  */
908                 if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
909                         btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
910                         mark_inode_dirty(inode);
911                 }
912
913                 if (num_written < 0) {
914                         ret = num_written;
915                         num_written = 0;
916                         goto out;
917                 } else if (num_written == count) {
918                         /* pick up pos changes done by the generic code */
919                         pos = *ppos;
920                         goto out;
921                 }
922                 /*
923                  * We are going to do buffered for the rest of the range, so we
924                  * need to make sure to invalidate the buffered pages when we're
925                  * done.
926                  */
927                 buffered = 1;
928                 pos += num_written;
929         }
930
931         iov_iter_init(&i, iov, nr_segs, count, num_written);
932         nrptrs = min((iov_iter_count(&i) + PAGE_CACHE_SIZE - 1) /
933                      PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
934                      (sizeof(struct page *)));
935         pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
936
937         /* generic_write_checks can change our pos */
938         start_pos = pos;
939
940         first_index = pos >> PAGE_CACHE_SHIFT;
941         last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT;
942
943         /*
944          * there are lots of better ways to do this, but this code
945          * makes sure the first and last page in the file range are
946          * up to date and ready for cow
947          */
948         if ((pos & (PAGE_CACHE_SIZE - 1))) {
949                 pinned[0] = grab_cache_page(inode->i_mapping, first_index);
950                 if (!PageUptodate(pinned[0])) {
951                         ret = btrfs_readpage(NULL, pinned[0]);
952                         BUG_ON(ret);
953                         wait_on_page_locked(pinned[0]);
954                 } else {
955                         unlock_page(pinned[0]);
956                 }
957         }
958         if ((pos + iov_iter_count(&i)) & (PAGE_CACHE_SIZE - 1)) {
959                 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
960                 if (!PageUptodate(pinned[1])) {
961                         ret = btrfs_readpage(NULL, pinned[1]);
962                         BUG_ON(ret);
963                         wait_on_page_locked(pinned[1]);
964                 } else {
965                         unlock_page(pinned[1]);
966                 }
967         }
968
969         while (iov_iter_count(&i) > 0) {
970                 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
971                 size_t write_bytes = min(iov_iter_count(&i),
972                                          nrptrs * (size_t)PAGE_CACHE_SIZE -
973                                          offset);
974                 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
975                                         PAGE_CACHE_SHIFT;
976
977                 WARN_ON(num_pages > nrptrs);
978                 memset(pages, 0, sizeof(struct page *) * nrptrs);
979
980                 /*
981                  * Fault pages before locking them in prepare_pages
982                  * to avoid recursive lock
983                  */
984                 if (unlikely(iov_iter_fault_in_readable(&i, write_bytes))) {
985                         ret = -EFAULT;
986                         goto out;
987                 }
988
989                 ret = btrfs_delalloc_reserve_space(inode,
990                                         num_pages << PAGE_CACHE_SHIFT);
991                 if (ret)
992                         goto out;
993
994                 ret = prepare_pages(root, file, pages, num_pages,
995                                     pos, first_index, last_index,
996                                     write_bytes);
997                 if (ret) {
998                         btrfs_delalloc_release_space(inode,
999                                         num_pages << PAGE_CACHE_SHIFT);
1000                         goto out;
1001                 }
1002
1003                 copied = btrfs_copy_from_user(pos, num_pages,
1004                                            write_bytes, pages, &i);
1005                 dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >>
1006                                         PAGE_CACHE_SHIFT;
1007
1008                 if (num_pages > dirty_pages) {
1009                         if (copied > 0)
1010                                 atomic_inc(
1011                                         &BTRFS_I(inode)->outstanding_extents);
1012                         btrfs_delalloc_release_space(inode,
1013                                         (num_pages - dirty_pages) <<
1014                                         PAGE_CACHE_SHIFT);
1015                 }
1016
1017                 if (copied > 0) {
1018                         dirty_and_release_pages(NULL, root, file, pages,
1019                                                 dirty_pages, pos, copied);
1020                 }
1021
1022                 btrfs_drop_pages(pages, num_pages);
1023
1024                 if (copied > 0) {
1025                         if (will_write) {
1026                                 filemap_fdatawrite_range(inode->i_mapping, pos,
1027                                                          pos + copied - 1);
1028                         } else {
1029                                 balance_dirty_pages_ratelimited_nr(
1030                                                         inode->i_mapping,
1031                                                         dirty_pages);
1032                                 if (dirty_pages <
1033                                 (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1034                                         btrfs_btree_balance_dirty(root, 1);
1035                                 btrfs_throttle(root);
1036                         }
1037                 }
1038
1039                 pos += copied;
1040                 num_written += copied;
1041
1042                 cond_resched();
1043         }
1044 out:
1045         mutex_unlock(&inode->i_mutex);
1046         if (ret)
1047                 err = ret;
1048
1049         kfree(pages);
1050         if (pinned[0])
1051                 page_cache_release(pinned[0]);
1052         if (pinned[1])
1053                 page_cache_release(pinned[1]);
1054         *ppos = pos;
1055
1056         /*
1057          * we want to make sure fsync finds this change
1058          * but we haven't joined a transaction running right now.
1059          *
1060          * Later on, someone is sure to update the inode and get the
1061          * real transid recorded.
1062          *
1063          * We set last_trans now to the fs_info generation + 1,
1064          * this will either be one more than the running transaction
1065          * or the generation used for the next transaction if there isn't
1066          * one running right now.
1067          */
1068         BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1069
1070         if (num_written > 0 && will_write) {
1071                 struct btrfs_trans_handle *trans;
1072
1073                 err = btrfs_wait_ordered_range(inode, start_pos, num_written);
1074                 if (err)
1075                         num_written = err;
1076
1077                 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
1078                         trans = btrfs_start_transaction(root, 0);
1079                         if (IS_ERR(trans)) {
1080                                 num_written = PTR_ERR(trans);
1081                                 goto done;
1082                         }
1083                         mutex_lock(&inode->i_mutex);
1084                         ret = btrfs_log_dentry_safe(trans, root,
1085                                                     file->f_dentry);
1086                         mutex_unlock(&inode->i_mutex);
1087                         if (ret == 0) {
1088                                 ret = btrfs_sync_log(trans, root);
1089                                 if (ret == 0)
1090                                         btrfs_end_transaction(trans, root);
1091                                 else
1092                                         btrfs_commit_transaction(trans, root);
1093                         } else if (ret != BTRFS_NO_LOG_SYNC) {
1094                                 btrfs_commit_transaction(trans, root);
1095                         } else {
1096                                 btrfs_end_transaction(trans, root);
1097                         }
1098                 }
1099                 if (file->f_flags & O_DIRECT && buffered) {
1100                         invalidate_mapping_pages(inode->i_mapping,
1101                               start_pos >> PAGE_CACHE_SHIFT,
1102                              (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
1103                 }
1104         }
1105 done:
1106         current->backing_dev_info = NULL;
1107         return num_written ? num_written : err;
1108 }
1109
1110 int btrfs_release_file(struct inode *inode, struct file *filp)
1111 {
1112         /*
1113          * ordered_data_close is set by settattr when we are about to truncate
1114          * a file from a non-zero size to a zero size.  This tries to
1115          * flush down new bytes that may have been written if the
1116          * application were using truncate to replace a file in place.
1117          */
1118         if (BTRFS_I(inode)->ordered_data_close) {
1119                 BTRFS_I(inode)->ordered_data_close = 0;
1120                 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1121                 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1122                         filemap_flush(inode->i_mapping);
1123         }
1124         if (filp->private_data)
1125                 btrfs_ioctl_trans_end(filp);
1126         return 0;
1127 }
1128
1129 /*
1130  * fsync call for both files and directories.  This logs the inode into
1131  * the tree log instead of forcing full commits whenever possible.
1132  *
1133  * It needs to call filemap_fdatawait so that all ordered extent updates are
1134  * in the metadata btree are up to date for copying to the log.
1135  *
1136  * It drops the inode mutex before doing the tree log commit.  This is an
1137  * important optimization for directories because holding the mutex prevents
1138  * new operations on the dir while we write to disk.
1139  */
1140 int btrfs_sync_file(struct file *file, int datasync)
1141 {
1142         struct dentry *dentry = file->f_path.dentry;
1143         struct inode *inode = dentry->d_inode;
1144         struct btrfs_root *root = BTRFS_I(inode)->root;
1145         int ret = 0;
1146         struct btrfs_trans_handle *trans;
1147
1148
1149         /* we wait first, since the writeback may change the inode */
1150         root->log_batch++;
1151         /* the VFS called filemap_fdatawrite for us */
1152         btrfs_wait_ordered_range(inode, 0, (u64)-1);
1153         root->log_batch++;
1154
1155         /*
1156          * check the transaction that last modified this inode
1157          * and see if its already been committed
1158          */
1159         if (!BTRFS_I(inode)->last_trans)
1160                 goto out;
1161
1162         /*
1163          * if the last transaction that changed this file was before
1164          * the current transaction, we can bail out now without any
1165          * syncing
1166          */
1167         mutex_lock(&root->fs_info->trans_mutex);
1168         if (BTRFS_I(inode)->last_trans <=
1169             root->fs_info->last_trans_committed) {
1170                 BTRFS_I(inode)->last_trans = 0;
1171                 mutex_unlock(&root->fs_info->trans_mutex);
1172                 goto out;
1173         }
1174         mutex_unlock(&root->fs_info->trans_mutex);
1175
1176         /*
1177          * ok we haven't committed the transaction yet, lets do a commit
1178          */
1179         if (file->private_data)
1180                 btrfs_ioctl_trans_end(file);
1181
1182         trans = btrfs_start_transaction(root, 0);
1183         if (IS_ERR(trans)) {
1184                 ret = PTR_ERR(trans);
1185                 goto out;
1186         }
1187
1188         ret = btrfs_log_dentry_safe(trans, root, dentry);
1189         if (ret < 0)
1190                 goto out;
1191
1192         /* we've logged all the items and now have a consistent
1193          * version of the file in the log.  It is possible that
1194          * someone will come in and modify the file, but that's
1195          * fine because the log is consistent on disk, and we
1196          * have references to all of the file's extents
1197          *
1198          * It is possible that someone will come in and log the
1199          * file again, but that will end up using the synchronization
1200          * inside btrfs_sync_log to keep things safe.
1201          */
1202         mutex_unlock(&dentry->d_inode->i_mutex);
1203
1204         if (ret != BTRFS_NO_LOG_SYNC) {
1205                 if (ret > 0) {
1206                         ret = btrfs_commit_transaction(trans, root);
1207                 } else {
1208                         ret = btrfs_sync_log(trans, root);
1209                         if (ret == 0)
1210                                 ret = btrfs_end_transaction(trans, root);
1211                         else
1212                                 ret = btrfs_commit_transaction(trans, root);
1213                 }
1214         } else {
1215                 ret = btrfs_end_transaction(trans, root);
1216         }
1217         mutex_lock(&dentry->d_inode->i_mutex);
1218 out:
1219         return ret > 0 ? -EIO : ret;
1220 }
1221
1222 static const struct vm_operations_struct btrfs_file_vm_ops = {
1223         .fault          = filemap_fault,
1224         .page_mkwrite   = btrfs_page_mkwrite,
1225 };
1226
1227 static int btrfs_file_mmap(struct file  *filp, struct vm_area_struct *vma)
1228 {
1229         struct address_space *mapping = filp->f_mapping;
1230
1231         if (!mapping->a_ops->readpage)
1232                 return -ENOEXEC;
1233
1234         file_accessed(filp);
1235         vma->vm_ops = &btrfs_file_vm_ops;
1236         vma->vm_flags |= VM_CAN_NONLINEAR;
1237
1238         return 0;
1239 }
1240
1241 static long btrfs_fallocate(struct file *file, int mode,
1242                             loff_t offset, loff_t len)
1243 {
1244         struct inode *inode = file->f_path.dentry->d_inode;
1245         struct extent_state *cached_state = NULL;
1246         u64 cur_offset;
1247         u64 last_byte;
1248         u64 alloc_start;
1249         u64 alloc_end;
1250         u64 alloc_hint = 0;
1251         u64 locked_end;
1252         u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
1253         struct extent_map *em;
1254         int ret;
1255
1256         alloc_start = offset & ~mask;
1257         alloc_end =  (offset + len + mask) & ~mask;
1258
1259         /* We only support the FALLOC_FL_KEEP_SIZE mode */
1260         if (mode & ~FALLOC_FL_KEEP_SIZE)
1261                 return -EOPNOTSUPP;
1262
1263         /*
1264          * wait for ordered IO before we have any locks.  We'll loop again
1265          * below with the locks held.
1266          */
1267         btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
1268
1269         mutex_lock(&inode->i_mutex);
1270         ret = inode_newsize_ok(inode, alloc_end);
1271         if (ret)
1272                 goto out;
1273
1274         if (alloc_start > inode->i_size) {
1275                 ret = btrfs_cont_expand(inode, alloc_start);
1276                 if (ret)
1277                         goto out;
1278         }
1279
1280         ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
1281         if (ret)
1282                 goto out;
1283
1284         locked_end = alloc_end - 1;
1285         while (1) {
1286                 struct btrfs_ordered_extent *ordered;
1287
1288                 /* the extent lock is ordered inside the running
1289                  * transaction
1290                  */
1291                 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
1292                                  locked_end, 0, &cached_state, GFP_NOFS);
1293                 ordered = btrfs_lookup_first_ordered_extent(inode,
1294                                                             alloc_end - 1);
1295                 if (ordered &&
1296                     ordered->file_offset + ordered->len > alloc_start &&
1297                     ordered->file_offset < alloc_end) {
1298                         btrfs_put_ordered_extent(ordered);
1299                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1300                                              alloc_start, locked_end,
1301                                              &cached_state, GFP_NOFS);
1302                         /*
1303                          * we can't wait on the range with the transaction
1304                          * running or with the extent lock held
1305                          */
1306                         btrfs_wait_ordered_range(inode, alloc_start,
1307                                                  alloc_end - alloc_start);
1308                 } else {
1309                         if (ordered)
1310                                 btrfs_put_ordered_extent(ordered);
1311                         break;
1312                 }
1313         }
1314
1315         cur_offset = alloc_start;
1316         while (1) {
1317                 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
1318                                       alloc_end - cur_offset, 0);
1319                 BUG_ON(IS_ERR(em) || !em);
1320                 last_byte = min(extent_map_end(em), alloc_end);
1321                 last_byte = (last_byte + mask) & ~mask;
1322                 if (em->block_start == EXTENT_MAP_HOLE ||
1323                     (cur_offset >= inode->i_size &&
1324                      !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
1325                         ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
1326                                                         last_byte - cur_offset,
1327                                                         1 << inode->i_blkbits,
1328                                                         offset + len,
1329                                                         &alloc_hint);
1330                         if (ret < 0) {
1331                                 free_extent_map(em);
1332                                 break;
1333                         }
1334                 }
1335                 free_extent_map(em);
1336
1337                 cur_offset = last_byte;
1338                 if (cur_offset >= alloc_end) {
1339                         ret = 0;
1340                         break;
1341                 }
1342         }
1343         unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
1344                              &cached_state, GFP_NOFS);
1345
1346         btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
1347 out:
1348         mutex_unlock(&inode->i_mutex);
1349         return ret;
1350 }
1351
1352 const struct file_operations btrfs_file_operations = {
1353         .llseek         = generic_file_llseek,
1354         .read           = do_sync_read,
1355         .write          = do_sync_write,
1356         .aio_read       = generic_file_aio_read,
1357         .splice_read    = generic_file_splice_read,
1358         .aio_write      = btrfs_file_aio_write,
1359         .mmap           = btrfs_file_mmap,
1360         .open           = generic_file_open,
1361         .release        = btrfs_release_file,
1362         .fsync          = btrfs_sync_file,
1363         .fallocate      = btrfs_fallocate,
1364         .unlocked_ioctl = btrfs_ioctl,
1365 #ifdef CONFIG_COMPAT
1366         .compat_ioctl   = btrfs_ioctl,
1367 #endif
1368 };