2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
35 #include "transaction.h"
36 #include "btrfs_inode.h"
38 #include "print-tree.h"
44 /* simple helper to fault in pages and copy. This should go away
45 * and be replaced with calls into generic code.
47 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
49 struct page **prepared_pages,
53 size_t total_copied = 0;
55 int offset = pos & (PAGE_CACHE_SIZE - 1);
57 while (write_bytes > 0) {
58 size_t count = min_t(size_t,
59 PAGE_CACHE_SIZE - offset, write_bytes);
60 struct page *page = prepared_pages[pg];
62 * Copy data from userspace to the current page
64 * Disable pagefault to avoid recursive lock since
65 * the pages are already locked
68 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
71 /* Flush processor's dcache for this page */
72 flush_dcache_page(page);
75 * if we get a partial write, we can end up with
76 * partially up to date pages. These add
77 * a lot of complexity, so make sure they don't
78 * happen by forcing this copy to be retried.
80 * The rest of the btrfs_file_write code will fall
81 * back to page at a time copies after we return 0.
83 if (!PageUptodate(page) && copied < count)
86 iov_iter_advance(i, copied);
87 write_bytes -= copied;
88 total_copied += copied;
90 /* Return to btrfs_file_aio_write to fault page */
91 if (unlikely(copied == 0))
94 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
105 * unlocks pages after btrfs_file_write is done with them
107 void btrfs_drop_pages(struct page **pages, size_t num_pages)
110 for (i = 0; i < num_pages; i++) {
111 /* page checked is some magic around finding pages that
112 * have been modified without going through btrfs_set_page_dirty
115 ClearPageChecked(pages[i]);
116 unlock_page(pages[i]);
117 mark_page_accessed(pages[i]);
118 page_cache_release(pages[i]);
123 * after copy_from_user, pages need to be dirtied and we need to make
124 * sure holes are created between the current EOF and the start of
125 * any next extents (if required).
127 * this also makes the decision about creating an inline extent vs
128 * doing real data extents, marking pages dirty and delalloc as required.
130 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
131 struct page **pages, size_t num_pages,
132 loff_t pos, size_t write_bytes,
133 struct extent_state **cached)
139 u64 end_of_last_block;
140 u64 end_pos = pos + write_bytes;
141 loff_t isize = i_size_read(inode);
143 start_pos = pos & ~((u64)root->sectorsize - 1);
144 num_bytes = (write_bytes + pos - start_pos +
145 root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
147 end_of_last_block = start_pos + num_bytes - 1;
148 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
153 for (i = 0; i < num_pages; i++) {
154 struct page *p = pages[i];
161 * we've only changed i_size in ram, and we haven't updated
162 * the disk i_size. There is no need to log the inode
166 i_size_write(inode, end_pos);
171 * this drops all the extents in the cache that intersect the range
172 * [start, end]. Existing extents are split as required.
174 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
177 struct extent_map *em;
178 struct extent_map *split = NULL;
179 struct extent_map *split2 = NULL;
180 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
181 u64 len = end - start + 1;
187 WARN_ON(end < start);
188 if (end == (u64)-1) {
194 split = alloc_extent_map(GFP_NOFS);
196 split2 = alloc_extent_map(GFP_NOFS);
197 BUG_ON(!split || !split2);
199 write_lock(&em_tree->lock);
200 em = lookup_extent_mapping(em_tree, start, len);
202 write_unlock(&em_tree->lock);
206 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
207 if (testend && em->start + em->len >= start + len) {
209 write_unlock(&em_tree->lock);
212 start = em->start + em->len;
214 len = start + len - (em->start + em->len);
216 write_unlock(&em_tree->lock);
219 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
220 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
221 remove_extent_mapping(em_tree, em);
223 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
225 split->start = em->start;
226 split->len = start - em->start;
227 split->orig_start = em->orig_start;
228 split->block_start = em->block_start;
231 split->block_len = em->block_len;
233 split->block_len = split->len;
235 split->bdev = em->bdev;
236 split->flags = flags;
237 split->compress_type = em->compress_type;
238 ret = add_extent_mapping(em_tree, split);
240 free_extent_map(split);
244 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
245 testend && em->start + em->len > start + len) {
246 u64 diff = start + len - em->start;
248 split->start = start + len;
249 split->len = em->start + em->len - (start + len);
250 split->bdev = em->bdev;
251 split->flags = flags;
252 split->compress_type = em->compress_type;
255 split->block_len = em->block_len;
256 split->block_start = em->block_start;
257 split->orig_start = em->orig_start;
259 split->block_len = split->len;
260 split->block_start = em->block_start + diff;
261 split->orig_start = split->start;
264 ret = add_extent_mapping(em_tree, split);
266 free_extent_map(split);
269 write_unlock(&em_tree->lock);
273 /* once for the tree*/
277 free_extent_map(split);
279 free_extent_map(split2);
284 * this is very complex, but the basic idea is to drop all extents
285 * in the range start - end. hint_block is filled in with a block number
286 * that would be a good hint to the block allocator for this file.
288 * If an extent intersects the range but is not entirely inside the range
289 * it is either truncated or split. Anything entirely inside the range
290 * is deleted from the tree.
292 int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
293 u64 start, u64 end, u64 *hint_byte, int drop_cache)
295 struct btrfs_root *root = BTRFS_I(inode)->root;
296 struct extent_buffer *leaf;
297 struct btrfs_file_extent_item *fi;
298 struct btrfs_path *path;
299 struct btrfs_key key;
300 struct btrfs_key new_key;
301 u64 search_start = start;
304 u64 extent_offset = 0;
313 btrfs_drop_extent_cache(inode, start, end - 1, 0);
315 path = btrfs_alloc_path();
321 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
325 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
326 leaf = path->nodes[0];
327 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
328 if (key.objectid == inode->i_ino &&
329 key.type == BTRFS_EXTENT_DATA_KEY)
334 leaf = path->nodes[0];
335 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
337 ret = btrfs_next_leaf(root, path);
344 leaf = path->nodes[0];
348 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
349 if (key.objectid > inode->i_ino ||
350 key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
353 fi = btrfs_item_ptr(leaf, path->slots[0],
354 struct btrfs_file_extent_item);
355 extent_type = btrfs_file_extent_type(leaf, fi);
357 if (extent_type == BTRFS_FILE_EXTENT_REG ||
358 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
359 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
360 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
361 extent_offset = btrfs_file_extent_offset(leaf, fi);
362 extent_end = key.offset +
363 btrfs_file_extent_num_bytes(leaf, fi);
364 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
365 extent_end = key.offset +
366 btrfs_file_extent_inline_len(leaf, fi);
369 extent_end = search_start;
372 if (extent_end <= search_start) {
377 search_start = max(key.offset, start);
379 btrfs_release_path(root, path);
384 * | - range to drop - |
385 * | -------- extent -------- |
387 if (start > key.offset && end < extent_end) {
389 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
391 memcpy(&new_key, &key, sizeof(new_key));
392 new_key.offset = start;
393 ret = btrfs_duplicate_item(trans, root, path,
395 if (ret == -EAGAIN) {
396 btrfs_release_path(root, path);
402 leaf = path->nodes[0];
403 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
404 struct btrfs_file_extent_item);
405 btrfs_set_file_extent_num_bytes(leaf, fi,
408 fi = btrfs_item_ptr(leaf, path->slots[0],
409 struct btrfs_file_extent_item);
411 extent_offset += start - key.offset;
412 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
413 btrfs_set_file_extent_num_bytes(leaf, fi,
415 btrfs_mark_buffer_dirty(leaf);
417 if (disk_bytenr > 0) {
418 ret = btrfs_inc_extent_ref(trans, root,
419 disk_bytenr, num_bytes, 0,
420 root->root_key.objectid,
422 start - extent_offset);
424 *hint_byte = disk_bytenr;
429 * | ---- range to drop ----- |
430 * | -------- extent -------- |
432 if (start <= key.offset && end < extent_end) {
433 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
435 memcpy(&new_key, &key, sizeof(new_key));
436 new_key.offset = end;
437 btrfs_set_item_key_safe(trans, root, path, &new_key);
439 extent_offset += end - key.offset;
440 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
441 btrfs_set_file_extent_num_bytes(leaf, fi,
443 btrfs_mark_buffer_dirty(leaf);
444 if (disk_bytenr > 0) {
445 inode_sub_bytes(inode, end - key.offset);
446 *hint_byte = disk_bytenr;
451 search_start = extent_end;
453 * | ---- range to drop ----- |
454 * | -------- extent -------- |
456 if (start > key.offset && end >= extent_end) {
458 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
460 btrfs_set_file_extent_num_bytes(leaf, fi,
462 btrfs_mark_buffer_dirty(leaf);
463 if (disk_bytenr > 0) {
464 inode_sub_bytes(inode, extent_end - start);
465 *hint_byte = disk_bytenr;
467 if (end == extent_end)
475 * | ---- range to drop ----- |
476 * | ------ extent ------ |
478 if (start <= key.offset && end >= extent_end) {
480 del_slot = path->slots[0];
483 BUG_ON(del_slot + del_nr != path->slots[0]);
487 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
488 inode_sub_bytes(inode,
489 extent_end - key.offset);
490 extent_end = ALIGN(extent_end,
492 } else if (disk_bytenr > 0) {
493 ret = btrfs_free_extent(trans, root,
494 disk_bytenr, num_bytes, 0,
495 root->root_key.objectid,
496 key.objectid, key.offset -
499 inode_sub_bytes(inode,
500 extent_end - key.offset);
501 *hint_byte = disk_bytenr;
504 if (end == extent_end)
507 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
512 ret = btrfs_del_items(trans, root, path, del_slot,
519 btrfs_release_path(root, path);
527 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
531 btrfs_free_path(path);
535 static int extent_mergeable(struct extent_buffer *leaf, int slot,
536 u64 objectid, u64 bytenr, u64 orig_offset,
537 u64 *start, u64 *end)
539 struct btrfs_file_extent_item *fi;
540 struct btrfs_key key;
543 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
546 btrfs_item_key_to_cpu(leaf, &key, slot);
547 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
550 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
551 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
552 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
553 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
554 btrfs_file_extent_compression(leaf, fi) ||
555 btrfs_file_extent_encryption(leaf, fi) ||
556 btrfs_file_extent_other_encoding(leaf, fi))
559 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
560 if ((*start && *start != key.offset) || (*end && *end != extent_end))
569 * Mark extent in the range start - end as written.
571 * This changes extent type from 'pre-allocated' to 'regular'. If only
572 * part of extent is marked as written, the extent will be split into
575 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
576 struct inode *inode, u64 start, u64 end)
578 struct btrfs_root *root = BTRFS_I(inode)->root;
579 struct extent_buffer *leaf;
580 struct btrfs_path *path;
581 struct btrfs_file_extent_item *fi;
582 struct btrfs_key key;
583 struct btrfs_key new_key;
596 btrfs_drop_extent_cache(inode, start, end - 1, 0);
598 path = btrfs_alloc_path();
603 key.objectid = inode->i_ino;
604 key.type = BTRFS_EXTENT_DATA_KEY;
607 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
610 if (ret > 0 && path->slots[0] > 0)
613 leaf = path->nodes[0];
614 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
615 BUG_ON(key.objectid != inode->i_ino ||
616 key.type != BTRFS_EXTENT_DATA_KEY);
617 fi = btrfs_item_ptr(leaf, path->slots[0],
618 struct btrfs_file_extent_item);
619 BUG_ON(btrfs_file_extent_type(leaf, fi) !=
620 BTRFS_FILE_EXTENT_PREALLOC);
621 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
622 BUG_ON(key.offset > start || extent_end < end);
624 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
625 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
626 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
627 memcpy(&new_key, &key, sizeof(new_key));
629 if (start == key.offset && end < extent_end) {
632 if (extent_mergeable(leaf, path->slots[0] - 1,
633 inode->i_ino, bytenr, orig_offset,
634 &other_start, &other_end)) {
635 new_key.offset = end;
636 btrfs_set_item_key_safe(trans, root, path, &new_key);
637 fi = btrfs_item_ptr(leaf, path->slots[0],
638 struct btrfs_file_extent_item);
639 btrfs_set_file_extent_num_bytes(leaf, fi,
641 btrfs_set_file_extent_offset(leaf, fi,
643 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
644 struct btrfs_file_extent_item);
645 btrfs_set_file_extent_num_bytes(leaf, fi,
647 btrfs_mark_buffer_dirty(leaf);
652 if (start > key.offset && end == extent_end) {
655 if (extent_mergeable(leaf, path->slots[0] + 1,
656 inode->i_ino, bytenr, orig_offset,
657 &other_start, &other_end)) {
658 fi = btrfs_item_ptr(leaf, path->slots[0],
659 struct btrfs_file_extent_item);
660 btrfs_set_file_extent_num_bytes(leaf, fi,
663 new_key.offset = start;
664 btrfs_set_item_key_safe(trans, root, path, &new_key);
666 fi = btrfs_item_ptr(leaf, path->slots[0],
667 struct btrfs_file_extent_item);
668 btrfs_set_file_extent_num_bytes(leaf, fi,
670 btrfs_set_file_extent_offset(leaf, fi,
671 start - orig_offset);
672 btrfs_mark_buffer_dirty(leaf);
677 while (start > key.offset || end < extent_end) {
678 if (key.offset == start)
681 new_key.offset = split;
682 ret = btrfs_duplicate_item(trans, root, path, &new_key);
683 if (ret == -EAGAIN) {
684 btrfs_release_path(root, path);
689 leaf = path->nodes[0];
690 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
691 struct btrfs_file_extent_item);
692 btrfs_set_file_extent_num_bytes(leaf, fi,
695 fi = btrfs_item_ptr(leaf, path->slots[0],
696 struct btrfs_file_extent_item);
698 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
699 btrfs_set_file_extent_num_bytes(leaf, fi,
701 btrfs_mark_buffer_dirty(leaf);
703 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
704 root->root_key.objectid,
705 inode->i_ino, orig_offset);
708 if (split == start) {
711 BUG_ON(start != key.offset);
720 if (extent_mergeable(leaf, path->slots[0] + 1,
721 inode->i_ino, bytenr, orig_offset,
722 &other_start, &other_end)) {
724 btrfs_release_path(root, path);
727 extent_end = other_end;
728 del_slot = path->slots[0] + 1;
730 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
731 0, root->root_key.objectid,
732 inode->i_ino, orig_offset);
737 if (extent_mergeable(leaf, path->slots[0] - 1,
738 inode->i_ino, bytenr, orig_offset,
739 &other_start, &other_end)) {
741 btrfs_release_path(root, path);
744 key.offset = other_start;
745 del_slot = path->slots[0];
747 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
748 0, root->root_key.objectid,
749 inode->i_ino, orig_offset);
753 fi = btrfs_item_ptr(leaf, path->slots[0],
754 struct btrfs_file_extent_item);
755 btrfs_set_file_extent_type(leaf, fi,
756 BTRFS_FILE_EXTENT_REG);
757 btrfs_mark_buffer_dirty(leaf);
759 fi = btrfs_item_ptr(leaf, del_slot - 1,
760 struct btrfs_file_extent_item);
761 btrfs_set_file_extent_type(leaf, fi,
762 BTRFS_FILE_EXTENT_REG);
763 btrfs_set_file_extent_num_bytes(leaf, fi,
764 extent_end - key.offset);
765 btrfs_mark_buffer_dirty(leaf);
767 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
771 btrfs_free_path(path);
776 * on error we return an unlocked page and the error value
777 * on success we return a locked page and 0
779 static int prepare_uptodate_page(struct page *page, u64 pos)
783 if ((pos & (PAGE_CACHE_SIZE - 1)) && !PageUptodate(page)) {
784 ret = btrfs_readpage(NULL, page);
788 if (!PageUptodate(page)) {
797 * this gets pages into the page cache and locks them down, it also properly
798 * waits for data=ordered extents to finish before allowing the pages to be
801 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
802 struct page **pages, size_t num_pages,
803 loff_t pos, unsigned long first_index,
804 unsigned long last_index, size_t write_bytes)
806 struct extent_state *cached_state = NULL;
808 unsigned long index = pos >> PAGE_CACHE_SHIFT;
809 struct inode *inode = fdentry(file)->d_inode;
815 start_pos = pos & ~((u64)root->sectorsize - 1);
816 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
818 if (start_pos > inode->i_size) {
819 err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
825 for (i = 0; i < num_pages; i++) {
826 pages[i] = grab_cache_page(inode->i_mapping, index + i);
834 err = prepare_uptodate_page(pages[i], pos);
835 if (i == num_pages - 1)
836 err = prepare_uptodate_page(pages[i],
839 page_cache_release(pages[i]);
843 wait_on_page_writeback(pages[i]);
846 if (start_pos < inode->i_size) {
847 struct btrfs_ordered_extent *ordered;
848 lock_extent_bits(&BTRFS_I(inode)->io_tree,
849 start_pos, last_pos - 1, 0, &cached_state,
851 ordered = btrfs_lookup_first_ordered_extent(inode,
854 ordered->file_offset + ordered->len > start_pos &&
855 ordered->file_offset < last_pos) {
856 btrfs_put_ordered_extent(ordered);
857 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
858 start_pos, last_pos - 1,
859 &cached_state, GFP_NOFS);
860 for (i = 0; i < num_pages; i++) {
861 unlock_page(pages[i]);
862 page_cache_release(pages[i]);
864 btrfs_wait_ordered_range(inode, start_pos,
865 last_pos - start_pos);
869 btrfs_put_ordered_extent(ordered);
871 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
872 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
873 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
875 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
876 start_pos, last_pos - 1, &cached_state,
879 for (i = 0; i < num_pages; i++) {
880 clear_page_dirty_for_io(pages[i]);
881 set_page_extent_mapped(pages[i]);
882 WARN_ON(!PageLocked(pages[i]));
887 unlock_page(pages[faili]);
888 page_cache_release(pages[faili]);
895 static noinline ssize_t __btrfs_buffered_write(struct file *file,
899 struct inode *inode = fdentry(file)->d_inode;
900 struct btrfs_root *root = BTRFS_I(inode)->root;
901 struct page **pages = NULL;
902 unsigned long first_index;
903 unsigned long last_index;
904 size_t num_written = 0;
908 nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
909 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
910 (sizeof(struct page *)));
911 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
915 first_index = pos >> PAGE_CACHE_SHIFT;
916 last_index = (pos + iov_iter_count(i)) >> PAGE_CACHE_SHIFT;
918 while (iov_iter_count(i) > 0) {
919 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
920 size_t write_bytes = min(iov_iter_count(i),
921 nrptrs * (size_t)PAGE_CACHE_SIZE -
923 size_t num_pages = (write_bytes + offset +
924 PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
928 WARN_ON(num_pages > nrptrs);
931 * Fault pages before locking them in prepare_pages
932 * to avoid recursive lock
934 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
939 ret = btrfs_delalloc_reserve_space(inode,
940 num_pages << PAGE_CACHE_SHIFT);
945 * This is going to setup the pages array with the number of
946 * pages we want, so we don't really need to worry about the
947 * contents of pages from loop to loop
949 ret = prepare_pages(root, file, pages, num_pages,
950 pos, first_index, last_index,
953 btrfs_delalloc_release_space(inode,
954 num_pages << PAGE_CACHE_SHIFT);
958 copied = btrfs_copy_from_user(pos, num_pages,
959 write_bytes, pages, i);
962 * if we have trouble faulting in the pages, fall
963 * back to one page at a time
965 if (copied < write_bytes)
971 dirty_pages = (copied + offset +
972 PAGE_CACHE_SIZE - 1) >>
976 * If we had a short copy we need to release the excess delaloc
977 * bytes we reserved. We need to increment outstanding_extents
978 * because btrfs_delalloc_release_space will decrement it, but
979 * we still have an outstanding extent for the chunk we actually
982 if (num_pages > dirty_pages) {
985 &BTRFS_I(inode)->outstanding_extents);
986 btrfs_delalloc_release_space(inode,
987 (num_pages - dirty_pages) <<
992 ret = btrfs_dirty_pages(root, inode, pages,
993 dirty_pages, pos, copied,
996 btrfs_delalloc_release_space(inode,
997 dirty_pages << PAGE_CACHE_SHIFT);
998 btrfs_drop_pages(pages, num_pages);
1003 btrfs_drop_pages(pages, num_pages);
1007 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
1009 if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1010 btrfs_btree_balance_dirty(root, 1);
1011 btrfs_throttle(root);
1014 num_written += copied;
1019 return num_written ? num_written : ret;
1022 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1023 const struct iovec *iov,
1024 unsigned long nr_segs, loff_t pos,
1025 loff_t *ppos, size_t count, size_t ocount)
1027 struct file *file = iocb->ki_filp;
1028 struct inode *inode = fdentry(file)->d_inode;
1031 ssize_t written_buffered;
1035 written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
1039 * the generic O_DIRECT will update in-memory i_size after the
1040 * DIOs are done. But our endio handlers that update the on
1041 * disk i_size never update past the in memory i_size. So we
1042 * need one more update here to catch any additions to the
1045 if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
1046 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
1047 mark_inode_dirty(inode);
1050 if (written < 0 || written == count)
1055 iov_iter_init(&i, iov, nr_segs, count, written);
1056 written_buffered = __btrfs_buffered_write(file, &i, pos);
1057 if (written_buffered < 0) {
1058 err = written_buffered;
1061 endbyte = pos + written_buffered - 1;
1062 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
1065 written += written_buffered;
1066 *ppos = pos + written_buffered;
1067 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1068 endbyte >> PAGE_CACHE_SHIFT);
1070 return written ? written : err;
1073 static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1074 const struct iovec *iov,
1075 unsigned long nr_segs, loff_t pos)
1077 struct file *file = iocb->ki_filp;
1078 struct inode *inode = fdentry(file)->d_inode;
1079 struct btrfs_root *root = BTRFS_I(inode)->root;
1080 loff_t *ppos = &iocb->ki_pos;
1081 ssize_t num_written = 0;
1083 size_t count, ocount;
1085 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
1087 mutex_lock(&inode->i_mutex);
1089 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1091 mutex_unlock(&inode->i_mutex);
1096 current->backing_dev_info = inode->i_mapping->backing_dev_info;
1097 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1099 mutex_unlock(&inode->i_mutex);
1104 mutex_unlock(&inode->i_mutex);
1108 err = file_remove_suid(file);
1110 mutex_unlock(&inode->i_mutex);
1115 * If BTRFS flips readonly due to some impossible error
1116 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1117 * although we have opened a file as writable, we have
1118 * to stop this write operation to ensure FS consistency.
1120 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
1121 mutex_unlock(&inode->i_mutex);
1126 file_update_time(file);
1127 BTRFS_I(inode)->sequence++;
1129 if (unlikely(file->f_flags & O_DIRECT)) {
1130 num_written = __btrfs_direct_write(iocb, iov, nr_segs,
1131 pos, ppos, count, ocount);
1135 iov_iter_init(&i, iov, nr_segs, count, num_written);
1137 num_written = __btrfs_buffered_write(file, &i, pos);
1138 if (num_written > 0)
1139 *ppos = pos + num_written;
1142 mutex_unlock(&inode->i_mutex);
1145 * we want to make sure fsync finds this change
1146 * but we haven't joined a transaction running right now.
1148 * Later on, someone is sure to update the inode and get the
1149 * real transid recorded.
1151 * We set last_trans now to the fs_info generation + 1,
1152 * this will either be one more than the running transaction
1153 * or the generation used for the next transaction if there isn't
1154 * one running right now.
1156 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1157 if (num_written > 0 || num_written == -EIOCBQUEUED) {
1158 err = generic_write_sync(file, pos, num_written);
1159 if (err < 0 && num_written > 0)
1163 current->backing_dev_info = NULL;
1164 return num_written ? num_written : err;
1167 int btrfs_release_file(struct inode *inode, struct file *filp)
1170 * ordered_data_close is set by settattr when we are about to truncate
1171 * a file from a non-zero size to a zero size. This tries to
1172 * flush down new bytes that may have been written if the
1173 * application were using truncate to replace a file in place.
1175 if (BTRFS_I(inode)->ordered_data_close) {
1176 BTRFS_I(inode)->ordered_data_close = 0;
1177 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1178 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1179 filemap_flush(inode->i_mapping);
1181 if (filp->private_data)
1182 btrfs_ioctl_trans_end(filp);
1187 * fsync call for both files and directories. This logs the inode into
1188 * the tree log instead of forcing full commits whenever possible.
1190 * It needs to call filemap_fdatawait so that all ordered extent updates are
1191 * in the metadata btree are up to date for copying to the log.
1193 * It drops the inode mutex before doing the tree log commit. This is an
1194 * important optimization for directories because holding the mutex prevents
1195 * new operations on the dir while we write to disk.
1197 int btrfs_sync_file(struct file *file, int datasync)
1199 struct dentry *dentry = file->f_path.dentry;
1200 struct inode *inode = dentry->d_inode;
1201 struct btrfs_root *root = BTRFS_I(inode)->root;
1203 struct btrfs_trans_handle *trans;
1205 trace_btrfs_sync_file(file, datasync);
1207 /* we wait first, since the writeback may change the inode */
1209 /* the VFS called filemap_fdatawrite for us */
1210 btrfs_wait_ordered_range(inode, 0, (u64)-1);
1214 * check the transaction that last modified this inode
1215 * and see if its already been committed
1217 if (!BTRFS_I(inode)->last_trans)
1221 * if the last transaction that changed this file was before
1222 * the current transaction, we can bail out now without any
1225 mutex_lock(&root->fs_info->trans_mutex);
1226 if (BTRFS_I(inode)->last_trans <=
1227 root->fs_info->last_trans_committed) {
1228 BTRFS_I(inode)->last_trans = 0;
1229 mutex_unlock(&root->fs_info->trans_mutex);
1232 mutex_unlock(&root->fs_info->trans_mutex);
1235 * ok we haven't committed the transaction yet, lets do a commit
1237 if (file->private_data)
1238 btrfs_ioctl_trans_end(file);
1240 trans = btrfs_start_transaction(root, 0);
1241 if (IS_ERR(trans)) {
1242 ret = PTR_ERR(trans);
1246 ret = btrfs_log_dentry_safe(trans, root, dentry);
1250 /* we've logged all the items and now have a consistent
1251 * version of the file in the log. It is possible that
1252 * someone will come in and modify the file, but that's
1253 * fine because the log is consistent on disk, and we
1254 * have references to all of the file's extents
1256 * It is possible that someone will come in and log the
1257 * file again, but that will end up using the synchronization
1258 * inside btrfs_sync_log to keep things safe.
1260 mutex_unlock(&dentry->d_inode->i_mutex);
1262 if (ret != BTRFS_NO_LOG_SYNC) {
1264 ret = btrfs_commit_transaction(trans, root);
1266 ret = btrfs_sync_log(trans, root);
1268 ret = btrfs_end_transaction(trans, root);
1270 ret = btrfs_commit_transaction(trans, root);
1273 ret = btrfs_end_transaction(trans, root);
1275 mutex_lock(&dentry->d_inode->i_mutex);
1277 return ret > 0 ? -EIO : ret;
1280 static const struct vm_operations_struct btrfs_file_vm_ops = {
1281 .fault = filemap_fault,
1282 .page_mkwrite = btrfs_page_mkwrite,
1285 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
1287 struct address_space *mapping = filp->f_mapping;
1289 if (!mapping->a_ops->readpage)
1292 file_accessed(filp);
1293 vma->vm_ops = &btrfs_file_vm_ops;
1294 vma->vm_flags |= VM_CAN_NONLINEAR;
1299 static long btrfs_fallocate(struct file *file, int mode,
1300 loff_t offset, loff_t len)
1302 struct inode *inode = file->f_path.dentry->d_inode;
1303 struct extent_state *cached_state = NULL;
1310 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
1311 struct extent_map *em;
1314 alloc_start = offset & ~mask;
1315 alloc_end = (offset + len + mask) & ~mask;
1317 /* We only support the FALLOC_FL_KEEP_SIZE mode */
1318 if (mode & ~FALLOC_FL_KEEP_SIZE)
1322 * wait for ordered IO before we have any locks. We'll loop again
1323 * below with the locks held.
1325 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
1327 mutex_lock(&inode->i_mutex);
1328 ret = inode_newsize_ok(inode, alloc_end);
1332 if (alloc_start > inode->i_size) {
1333 ret = btrfs_cont_expand(inode, i_size_read(inode),
1339 ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
1343 locked_end = alloc_end - 1;
1345 struct btrfs_ordered_extent *ordered;
1347 /* the extent lock is ordered inside the running
1350 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
1351 locked_end, 0, &cached_state, GFP_NOFS);
1352 ordered = btrfs_lookup_first_ordered_extent(inode,
1355 ordered->file_offset + ordered->len > alloc_start &&
1356 ordered->file_offset < alloc_end) {
1357 btrfs_put_ordered_extent(ordered);
1358 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1359 alloc_start, locked_end,
1360 &cached_state, GFP_NOFS);
1362 * we can't wait on the range with the transaction
1363 * running or with the extent lock held
1365 btrfs_wait_ordered_range(inode, alloc_start,
1366 alloc_end - alloc_start);
1369 btrfs_put_ordered_extent(ordered);
1374 cur_offset = alloc_start;
1376 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
1377 alloc_end - cur_offset, 0);
1378 BUG_ON(IS_ERR(em) || !em);
1379 last_byte = min(extent_map_end(em), alloc_end);
1380 last_byte = (last_byte + mask) & ~mask;
1381 if (em->block_start == EXTENT_MAP_HOLE ||
1382 (cur_offset >= inode->i_size &&
1383 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
1384 ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
1385 last_byte - cur_offset,
1386 1 << inode->i_blkbits,
1390 free_extent_map(em);
1394 free_extent_map(em);
1396 cur_offset = last_byte;
1397 if (cur_offset >= alloc_end) {
1402 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
1403 &cached_state, GFP_NOFS);
1405 btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
1407 mutex_unlock(&inode->i_mutex);
1411 const struct file_operations btrfs_file_operations = {
1412 .llseek = generic_file_llseek,
1413 .read = do_sync_read,
1414 .write = do_sync_write,
1415 .aio_read = generic_file_aio_read,
1416 .splice_read = generic_file_splice_read,
1417 .aio_write = btrfs_file_aio_write,
1418 .mmap = btrfs_file_mmap,
1419 .open = generic_file_open,
1420 .release = btrfs_release_file,
1421 .fsync = btrfs_sync_file,
1422 .fallocate = btrfs_fallocate,
1423 .unlocked_ioctl = btrfs_ioctl,
1424 #ifdef CONFIG_COMPAT
1425 .compat_ioctl = btrfs_ioctl,