2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
35 #include "transaction.h"
36 #include "btrfs_inode.h"
38 #include "print-tree.h"
44 * when auto defrag is enabled we
45 * queue up these defrag structs to remember which
46 * inodes need defragging passes
49 struct rb_node rb_node;
53 * transid where the defrag was added, we search for
54 * extents newer than this
61 /* last offset we were able to defrag */
64 /* if we've wrapped around back to zero once already */
68 /* pop a record for an inode into the defrag tree. The lock
69 * must be held already
71 * If you're inserting a record for an older transid than an
72 * existing record, the transid already in the tree is lowered
74 * If an existing record is found the defrag item you
77 static void __btrfs_add_inode_defrag(struct inode *inode,
78 struct inode_defrag *defrag)
80 struct btrfs_root *root = BTRFS_I(inode)->root;
81 struct inode_defrag *entry;
83 struct rb_node *parent = NULL;
85 p = &root->fs_info->defrag_inodes.rb_node;
88 entry = rb_entry(parent, struct inode_defrag, rb_node);
90 if (defrag->ino < entry->ino)
92 else if (defrag->ino > entry->ino)
93 p = &parent->rb_right;
95 /* if we're reinserting an entry for
96 * an old defrag run, make sure to
97 * lower the transid of our existing record
99 if (defrag->transid < entry->transid)
100 entry->transid = defrag->transid;
101 if (defrag->last_offset > entry->last_offset)
102 entry->last_offset = defrag->last_offset;
106 BTRFS_I(inode)->in_defrag = 1;
107 rb_link_node(&defrag->rb_node, parent, p);
108 rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
118 * insert a defrag record for this inode if auto defrag is
121 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
124 struct btrfs_root *root = BTRFS_I(inode)->root;
125 struct inode_defrag *defrag;
128 if (!btrfs_test_opt(root, AUTO_DEFRAG))
131 if (btrfs_fs_closing(root->fs_info))
134 if (BTRFS_I(inode)->in_defrag)
138 transid = trans->transid;
140 transid = BTRFS_I(inode)->root->last_trans;
142 defrag = kzalloc(sizeof(*defrag), GFP_NOFS);
146 defrag->ino = btrfs_ino(inode);
147 defrag->transid = transid;
148 defrag->root = root->root_key.objectid;
150 spin_lock(&root->fs_info->defrag_inodes_lock);
151 if (!BTRFS_I(inode)->in_defrag)
152 __btrfs_add_inode_defrag(inode, defrag);
155 spin_unlock(&root->fs_info->defrag_inodes_lock);
160 * must be called with the defrag_inodes lock held
162 struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info, u64 ino,
163 struct rb_node **next)
165 struct inode_defrag *entry = NULL;
167 struct rb_node *parent = NULL;
169 p = info->defrag_inodes.rb_node;
172 entry = rb_entry(parent, struct inode_defrag, rb_node);
174 if (ino < entry->ino)
176 else if (ino > entry->ino)
177 p = parent->rb_right;
183 while (parent && ino > entry->ino) {
184 parent = rb_next(parent);
185 entry = rb_entry(parent, struct inode_defrag, rb_node);
193 * run through the list of inodes in the FS that need
196 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
198 struct inode_defrag *defrag;
199 struct btrfs_root *inode_root;
202 struct btrfs_key key;
203 struct btrfs_ioctl_defrag_range_args range;
206 int defrag_batch = 1024;
208 memset(&range, 0, sizeof(range));
211 atomic_inc(&fs_info->defrag_running);
212 spin_lock(&fs_info->defrag_inodes_lock);
216 /* find an inode to defrag */
217 defrag = btrfs_find_defrag_inode(fs_info, first_ino, &n);
220 defrag = rb_entry(n, struct inode_defrag, rb_node);
221 else if (first_ino) {
229 /* remove it from the rbtree */
230 first_ino = defrag->ino + 1;
231 rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
233 if (btrfs_fs_closing(fs_info))
236 spin_unlock(&fs_info->defrag_inodes_lock);
239 key.objectid = defrag->root;
240 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
241 key.offset = (u64)-1;
242 inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
243 if (IS_ERR(inode_root))
246 key.objectid = defrag->ino;
247 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
250 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
254 /* do a chunk of defrag */
255 BTRFS_I(inode)->in_defrag = 0;
256 range.start = defrag->last_offset;
257 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
260 * if we filled the whole defrag batch, there
261 * must be more work to do. Queue this defrag
264 if (num_defrag == defrag_batch) {
265 defrag->last_offset = range.start;
266 __btrfs_add_inode_defrag(inode, defrag);
268 * we don't want to kfree defrag, we added it back to
272 } else if (defrag->last_offset && !defrag->cycled) {
274 * we didn't fill our defrag batch, but
275 * we didn't start at zero. Make sure we loop
276 * around to the start of the file.
278 defrag->last_offset = 0;
280 __btrfs_add_inode_defrag(inode, defrag);
286 spin_lock(&fs_info->defrag_inodes_lock);
290 spin_unlock(&fs_info->defrag_inodes_lock);
292 atomic_dec(&fs_info->defrag_running);
295 * during unmount, we use the transaction_wait queue to
296 * wait for the defragger to stop
298 wake_up(&fs_info->transaction_wait);
302 /* simple helper to fault in pages and copy. This should go away
303 * and be replaced with calls into generic code.
305 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
307 struct page **prepared_pages,
311 size_t total_copied = 0;
313 int offset = pos & (PAGE_CACHE_SIZE - 1);
315 while (write_bytes > 0) {
316 size_t count = min_t(size_t,
317 PAGE_CACHE_SIZE - offset, write_bytes);
318 struct page *page = prepared_pages[pg];
320 * Copy data from userspace to the current page
322 * Disable pagefault to avoid recursive lock since
323 * the pages are already locked
326 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
329 /* Flush processor's dcache for this page */
330 flush_dcache_page(page);
333 * if we get a partial write, we can end up with
334 * partially up to date pages. These add
335 * a lot of complexity, so make sure they don't
336 * happen by forcing this copy to be retried.
338 * The rest of the btrfs_file_write code will fall
339 * back to page at a time copies after we return 0.
341 if (!PageUptodate(page) && copied < count)
344 iov_iter_advance(i, copied);
345 write_bytes -= copied;
346 total_copied += copied;
348 /* Return to btrfs_file_aio_write to fault page */
349 if (unlikely(copied == 0))
352 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
363 * unlocks pages after btrfs_file_write is done with them
365 void btrfs_drop_pages(struct page **pages, size_t num_pages)
368 for (i = 0; i < num_pages; i++) {
369 /* page checked is some magic around finding pages that
370 * have been modified without going through btrfs_set_page_dirty
373 ClearPageChecked(pages[i]);
374 unlock_page(pages[i]);
375 mark_page_accessed(pages[i]);
376 page_cache_release(pages[i]);
381 * after copy_from_user, pages need to be dirtied and we need to make
382 * sure holes are created between the current EOF and the start of
383 * any next extents (if required).
385 * this also makes the decision about creating an inline extent vs
386 * doing real data extents, marking pages dirty and delalloc as required.
388 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
389 struct page **pages, size_t num_pages,
390 loff_t pos, size_t write_bytes,
391 struct extent_state **cached)
397 u64 end_of_last_block;
398 u64 end_pos = pos + write_bytes;
399 loff_t isize = i_size_read(inode);
401 start_pos = pos & ~((u64)root->sectorsize - 1);
402 num_bytes = (write_bytes + pos - start_pos +
403 root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
405 end_of_last_block = start_pos + num_bytes - 1;
406 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
411 for (i = 0; i < num_pages; i++) {
412 struct page *p = pages[i];
419 * we've only changed i_size in ram, and we haven't updated
420 * the disk i_size. There is no need to log the inode
424 i_size_write(inode, end_pos);
429 * this drops all the extents in the cache that intersect the range
430 * [start, end]. Existing extents are split as required.
432 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
435 struct extent_map *em;
436 struct extent_map *split = NULL;
437 struct extent_map *split2 = NULL;
438 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
439 u64 len = end - start + 1;
445 WARN_ON(end < start);
446 if (end == (u64)-1) {
452 split = alloc_extent_map();
454 split2 = alloc_extent_map();
455 BUG_ON(!split || !split2);
457 write_lock(&em_tree->lock);
458 em = lookup_extent_mapping(em_tree, start, len);
460 write_unlock(&em_tree->lock);
464 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
465 if (testend && em->start + em->len >= start + len) {
467 write_unlock(&em_tree->lock);
470 start = em->start + em->len;
472 len = start + len - (em->start + em->len);
474 write_unlock(&em_tree->lock);
477 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
478 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
479 remove_extent_mapping(em_tree, em);
481 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
483 split->start = em->start;
484 split->len = start - em->start;
485 split->orig_start = em->orig_start;
486 split->block_start = em->block_start;
489 split->block_len = em->block_len;
491 split->block_len = split->len;
493 split->bdev = em->bdev;
494 split->flags = flags;
495 split->compress_type = em->compress_type;
496 ret = add_extent_mapping(em_tree, split);
498 free_extent_map(split);
502 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
503 testend && em->start + em->len > start + len) {
504 u64 diff = start + len - em->start;
506 split->start = start + len;
507 split->len = em->start + em->len - (start + len);
508 split->bdev = em->bdev;
509 split->flags = flags;
510 split->compress_type = em->compress_type;
513 split->block_len = em->block_len;
514 split->block_start = em->block_start;
515 split->orig_start = em->orig_start;
517 split->block_len = split->len;
518 split->block_start = em->block_start + diff;
519 split->orig_start = split->start;
522 ret = add_extent_mapping(em_tree, split);
524 free_extent_map(split);
527 write_unlock(&em_tree->lock);
531 /* once for the tree*/
535 free_extent_map(split);
537 free_extent_map(split2);
542 * this is very complex, but the basic idea is to drop all extents
543 * in the range start - end. hint_block is filled in with a block number
544 * that would be a good hint to the block allocator for this file.
546 * If an extent intersects the range but is not entirely inside the range
547 * it is either truncated or split. Anything entirely inside the range
548 * is deleted from the tree.
550 int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
551 u64 start, u64 end, u64 *hint_byte, int drop_cache)
553 struct btrfs_root *root = BTRFS_I(inode)->root;
554 struct extent_buffer *leaf;
555 struct btrfs_file_extent_item *fi;
556 struct btrfs_path *path;
557 struct btrfs_key key;
558 struct btrfs_key new_key;
559 u64 ino = btrfs_ino(inode);
560 u64 search_start = start;
563 u64 extent_offset = 0;
572 btrfs_drop_extent_cache(inode, start, end - 1, 0);
574 path = btrfs_alloc_path();
580 ret = btrfs_lookup_file_extent(trans, root, path, ino,
584 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
585 leaf = path->nodes[0];
586 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
587 if (key.objectid == ino &&
588 key.type == BTRFS_EXTENT_DATA_KEY)
593 leaf = path->nodes[0];
594 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
596 ret = btrfs_next_leaf(root, path);
603 leaf = path->nodes[0];
607 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
608 if (key.objectid > ino ||
609 key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
612 fi = btrfs_item_ptr(leaf, path->slots[0],
613 struct btrfs_file_extent_item);
614 extent_type = btrfs_file_extent_type(leaf, fi);
616 if (extent_type == BTRFS_FILE_EXTENT_REG ||
617 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
618 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
619 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
620 extent_offset = btrfs_file_extent_offset(leaf, fi);
621 extent_end = key.offset +
622 btrfs_file_extent_num_bytes(leaf, fi);
623 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
624 extent_end = key.offset +
625 btrfs_file_extent_inline_len(leaf,
629 extent_end = search_start;
632 if (extent_end <= search_start) {
637 search_start = max(key.offset, start);
639 btrfs_release_path(path);
644 * | - range to drop - |
645 * | -------- extent -------- |
647 if (start > key.offset && end < extent_end) {
649 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
651 memcpy(&new_key, &key, sizeof(new_key));
652 new_key.offset = start;
653 ret = btrfs_duplicate_item(trans, root, path,
655 if (ret == -EAGAIN) {
656 btrfs_release_path(path);
662 leaf = path->nodes[0];
663 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
664 struct btrfs_file_extent_item);
665 btrfs_set_file_extent_num_bytes(leaf, fi,
668 fi = btrfs_item_ptr(leaf, path->slots[0],
669 struct btrfs_file_extent_item);
671 extent_offset += start - key.offset;
672 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
673 btrfs_set_file_extent_num_bytes(leaf, fi,
675 btrfs_mark_buffer_dirty(leaf);
677 if (disk_bytenr > 0) {
678 ret = btrfs_inc_extent_ref(trans, root,
679 disk_bytenr, num_bytes, 0,
680 root->root_key.objectid,
682 start - extent_offset);
684 *hint_byte = disk_bytenr;
689 * | ---- range to drop ----- |
690 * | -------- extent -------- |
692 if (start <= key.offset && end < extent_end) {
693 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
695 memcpy(&new_key, &key, sizeof(new_key));
696 new_key.offset = end;
697 btrfs_set_item_key_safe(trans, root, path, &new_key);
699 extent_offset += end - key.offset;
700 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
701 btrfs_set_file_extent_num_bytes(leaf, fi,
703 btrfs_mark_buffer_dirty(leaf);
704 if (disk_bytenr > 0) {
705 inode_sub_bytes(inode, end - key.offset);
706 *hint_byte = disk_bytenr;
711 search_start = extent_end;
713 * | ---- range to drop ----- |
714 * | -------- extent -------- |
716 if (start > key.offset && end >= extent_end) {
718 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
720 btrfs_set_file_extent_num_bytes(leaf, fi,
722 btrfs_mark_buffer_dirty(leaf);
723 if (disk_bytenr > 0) {
724 inode_sub_bytes(inode, extent_end - start);
725 *hint_byte = disk_bytenr;
727 if (end == extent_end)
735 * | ---- range to drop ----- |
736 * | ------ extent ------ |
738 if (start <= key.offset && end >= extent_end) {
740 del_slot = path->slots[0];
743 BUG_ON(del_slot + del_nr != path->slots[0]);
747 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
748 inode_sub_bytes(inode,
749 extent_end - key.offset);
750 extent_end = ALIGN(extent_end,
752 } else if (disk_bytenr > 0) {
753 ret = btrfs_free_extent(trans, root,
754 disk_bytenr, num_bytes, 0,
755 root->root_key.objectid,
756 key.objectid, key.offset -
759 inode_sub_bytes(inode,
760 extent_end - key.offset);
761 *hint_byte = disk_bytenr;
764 if (end == extent_end)
767 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
772 ret = btrfs_del_items(trans, root, path, del_slot,
779 btrfs_release_path(path);
787 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
791 btrfs_free_path(path);
795 static int extent_mergeable(struct extent_buffer *leaf, int slot,
796 u64 objectid, u64 bytenr, u64 orig_offset,
797 u64 *start, u64 *end)
799 struct btrfs_file_extent_item *fi;
800 struct btrfs_key key;
803 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
806 btrfs_item_key_to_cpu(leaf, &key, slot);
807 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
810 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
811 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
812 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
813 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
814 btrfs_file_extent_compression(leaf, fi) ||
815 btrfs_file_extent_encryption(leaf, fi) ||
816 btrfs_file_extent_other_encoding(leaf, fi))
819 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
820 if ((*start && *start != key.offset) || (*end && *end != extent_end))
829 * Mark extent in the range start - end as written.
831 * This changes extent type from 'pre-allocated' to 'regular'. If only
832 * part of extent is marked as written, the extent will be split into
835 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
836 struct inode *inode, u64 start, u64 end)
838 struct btrfs_root *root = BTRFS_I(inode)->root;
839 struct extent_buffer *leaf;
840 struct btrfs_path *path;
841 struct btrfs_file_extent_item *fi;
842 struct btrfs_key key;
843 struct btrfs_key new_key;
855 u64 ino = btrfs_ino(inode);
857 btrfs_drop_extent_cache(inode, start, end - 1, 0);
859 path = btrfs_alloc_path();
866 key.type = BTRFS_EXTENT_DATA_KEY;
869 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
872 if (ret > 0 && path->slots[0] > 0)
875 leaf = path->nodes[0];
876 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
877 BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
878 fi = btrfs_item_ptr(leaf, path->slots[0],
879 struct btrfs_file_extent_item);
880 BUG_ON(btrfs_file_extent_type(leaf, fi) !=
881 BTRFS_FILE_EXTENT_PREALLOC);
882 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
883 BUG_ON(key.offset > start || extent_end < end);
885 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
886 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
887 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
888 memcpy(&new_key, &key, sizeof(new_key));
890 if (start == key.offset && end < extent_end) {
893 if (extent_mergeable(leaf, path->slots[0] - 1,
894 ino, bytenr, orig_offset,
895 &other_start, &other_end)) {
896 new_key.offset = end;
897 btrfs_set_item_key_safe(trans, root, path, &new_key);
898 fi = btrfs_item_ptr(leaf, path->slots[0],
899 struct btrfs_file_extent_item);
900 btrfs_set_file_extent_num_bytes(leaf, fi,
902 btrfs_set_file_extent_offset(leaf, fi,
904 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
905 struct btrfs_file_extent_item);
906 btrfs_set_file_extent_num_bytes(leaf, fi,
908 btrfs_mark_buffer_dirty(leaf);
913 if (start > key.offset && end == extent_end) {
916 if (extent_mergeable(leaf, path->slots[0] + 1,
917 ino, bytenr, orig_offset,
918 &other_start, &other_end)) {
919 fi = btrfs_item_ptr(leaf, path->slots[0],
920 struct btrfs_file_extent_item);
921 btrfs_set_file_extent_num_bytes(leaf, fi,
924 new_key.offset = start;
925 btrfs_set_item_key_safe(trans, root, path, &new_key);
927 fi = btrfs_item_ptr(leaf, path->slots[0],
928 struct btrfs_file_extent_item);
929 btrfs_set_file_extent_num_bytes(leaf, fi,
931 btrfs_set_file_extent_offset(leaf, fi,
932 start - orig_offset);
933 btrfs_mark_buffer_dirty(leaf);
938 while (start > key.offset || end < extent_end) {
939 if (key.offset == start)
942 new_key.offset = split;
943 ret = btrfs_duplicate_item(trans, root, path, &new_key);
944 if (ret == -EAGAIN) {
945 btrfs_release_path(path);
950 leaf = path->nodes[0];
951 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
952 struct btrfs_file_extent_item);
953 btrfs_set_file_extent_num_bytes(leaf, fi,
956 fi = btrfs_item_ptr(leaf, path->slots[0],
957 struct btrfs_file_extent_item);
959 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
960 btrfs_set_file_extent_num_bytes(leaf, fi,
962 btrfs_mark_buffer_dirty(leaf);
964 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
965 root->root_key.objectid,
969 if (split == start) {
972 BUG_ON(start != key.offset);
981 if (extent_mergeable(leaf, path->slots[0] + 1,
982 ino, bytenr, orig_offset,
983 &other_start, &other_end)) {
985 btrfs_release_path(path);
988 extent_end = other_end;
989 del_slot = path->slots[0] + 1;
991 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
992 0, root->root_key.objectid,
998 if (extent_mergeable(leaf, path->slots[0] - 1,
999 ino, bytenr, orig_offset,
1000 &other_start, &other_end)) {
1002 btrfs_release_path(path);
1005 key.offset = other_start;
1006 del_slot = path->slots[0];
1008 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1009 0, root->root_key.objectid,
1014 fi = btrfs_item_ptr(leaf, path->slots[0],
1015 struct btrfs_file_extent_item);
1016 btrfs_set_file_extent_type(leaf, fi,
1017 BTRFS_FILE_EXTENT_REG);
1018 btrfs_mark_buffer_dirty(leaf);
1020 fi = btrfs_item_ptr(leaf, del_slot - 1,
1021 struct btrfs_file_extent_item);
1022 btrfs_set_file_extent_type(leaf, fi,
1023 BTRFS_FILE_EXTENT_REG);
1024 btrfs_set_file_extent_num_bytes(leaf, fi,
1025 extent_end - key.offset);
1026 btrfs_mark_buffer_dirty(leaf);
1028 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1032 btrfs_free_path(path);
1037 * on error we return an unlocked page and the error value
1038 * on success we return a locked page and 0
1040 static int prepare_uptodate_page(struct page *page, u64 pos,
1041 bool force_uptodate)
1045 if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1046 !PageUptodate(page)) {
1047 ret = btrfs_readpage(NULL, page);
1051 if (!PageUptodate(page)) {
1060 * this gets pages into the page cache and locks them down, it also properly
1061 * waits for data=ordered extents to finish before allowing the pages to be
1064 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
1065 struct page **pages, size_t num_pages,
1066 loff_t pos, unsigned long first_index,
1067 size_t write_bytes, bool force_uptodate)
1069 struct extent_state *cached_state = NULL;
1071 unsigned long index = pos >> PAGE_CACHE_SHIFT;
1072 struct inode *inode = fdentry(file)->d_inode;
1073 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1079 start_pos = pos & ~((u64)root->sectorsize - 1);
1080 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
1083 for (i = 0; i < num_pages; i++) {
1084 pages[i] = find_or_create_page(inode->i_mapping, index + i,
1093 err = prepare_uptodate_page(pages[i], pos,
1095 if (i == num_pages - 1)
1096 err = prepare_uptodate_page(pages[i],
1097 pos + write_bytes, false);
1099 page_cache_release(pages[i]);
1103 wait_on_page_writeback(pages[i]);
1106 if (start_pos < inode->i_size) {
1107 struct btrfs_ordered_extent *ordered;
1108 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1109 start_pos, last_pos - 1, 0, &cached_state,
1111 ordered = btrfs_lookup_first_ordered_extent(inode,
1114 ordered->file_offset + ordered->len > start_pos &&
1115 ordered->file_offset < last_pos) {
1116 btrfs_put_ordered_extent(ordered);
1117 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1118 start_pos, last_pos - 1,
1119 &cached_state, GFP_NOFS);
1120 for (i = 0; i < num_pages; i++) {
1121 unlock_page(pages[i]);
1122 page_cache_release(pages[i]);
1124 btrfs_wait_ordered_range(inode, start_pos,
1125 last_pos - start_pos);
1129 btrfs_put_ordered_extent(ordered);
1131 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
1132 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1133 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
1135 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1136 start_pos, last_pos - 1, &cached_state,
1139 for (i = 0; i < num_pages; i++) {
1140 clear_page_dirty_for_io(pages[i]);
1141 set_page_extent_mapped(pages[i]);
1142 WARN_ON(!PageLocked(pages[i]));
1146 while (faili >= 0) {
1147 unlock_page(pages[faili]);
1148 page_cache_release(pages[faili]);
1155 static noinline ssize_t __btrfs_buffered_write(struct file *file,
1159 struct inode *inode = fdentry(file)->d_inode;
1160 struct btrfs_root *root = BTRFS_I(inode)->root;
1161 struct page **pages = NULL;
1162 unsigned long first_index;
1163 size_t num_written = 0;
1166 bool force_page_uptodate = false;
1168 nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
1169 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
1170 (sizeof(struct page *)));
1171 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1172 nrptrs = max(nrptrs, 8);
1173 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1177 first_index = pos >> PAGE_CACHE_SHIFT;
1179 while (iov_iter_count(i) > 0) {
1180 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1181 size_t write_bytes = min(iov_iter_count(i),
1182 nrptrs * (size_t)PAGE_CACHE_SIZE -
1184 size_t num_pages = (write_bytes + offset +
1185 PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1189 WARN_ON(num_pages > nrptrs);
1192 * Fault pages before locking them in prepare_pages
1193 * to avoid recursive lock
1195 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1200 ret = btrfs_delalloc_reserve_space(inode,
1201 num_pages << PAGE_CACHE_SHIFT);
1206 * This is going to setup the pages array with the number of
1207 * pages we want, so we don't really need to worry about the
1208 * contents of pages from loop to loop
1210 ret = prepare_pages(root, file, pages, num_pages,
1211 pos, first_index, write_bytes,
1212 force_page_uptodate);
1214 btrfs_delalloc_release_space(inode,
1215 num_pages << PAGE_CACHE_SHIFT);
1219 copied = btrfs_copy_from_user(pos, num_pages,
1220 write_bytes, pages, i);
1223 * if we have trouble faulting in the pages, fall
1224 * back to one page at a time
1226 if (copied < write_bytes)
1230 force_page_uptodate = true;
1233 force_page_uptodate = false;
1234 dirty_pages = (copied + offset +
1235 PAGE_CACHE_SIZE - 1) >>
1240 * If we had a short copy we need to release the excess delaloc
1241 * bytes we reserved. We need to increment outstanding_extents
1242 * because btrfs_delalloc_release_space will decrement it, but
1243 * we still have an outstanding extent for the chunk we actually
1246 if (num_pages > dirty_pages) {
1248 spin_lock(&BTRFS_I(inode)->lock);
1249 BTRFS_I(inode)->outstanding_extents++;
1250 spin_unlock(&BTRFS_I(inode)->lock);
1252 btrfs_delalloc_release_space(inode,
1253 (num_pages - dirty_pages) <<
1258 ret = btrfs_dirty_pages(root, inode, pages,
1259 dirty_pages, pos, copied,
1262 btrfs_delalloc_release_space(inode,
1263 dirty_pages << PAGE_CACHE_SHIFT);
1264 btrfs_drop_pages(pages, num_pages);
1269 btrfs_drop_pages(pages, num_pages);
1273 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
1275 if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1276 btrfs_btree_balance_dirty(root, 1);
1277 btrfs_throttle(root);
1280 num_written += copied;
1285 return num_written ? num_written : ret;
1288 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1289 const struct iovec *iov,
1290 unsigned long nr_segs, loff_t pos,
1291 loff_t *ppos, size_t count, size_t ocount)
1293 struct file *file = iocb->ki_filp;
1294 struct inode *inode = fdentry(file)->d_inode;
1297 ssize_t written_buffered;
1301 written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
1305 * the generic O_DIRECT will update in-memory i_size after the
1306 * DIOs are done. But our endio handlers that update the on
1307 * disk i_size never update past the in memory i_size. So we
1308 * need one more update here to catch any additions to the
1311 if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
1312 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
1313 mark_inode_dirty(inode);
1316 if (written < 0 || written == count)
1321 iov_iter_init(&i, iov, nr_segs, count, written);
1322 written_buffered = __btrfs_buffered_write(file, &i, pos);
1323 if (written_buffered < 0) {
1324 err = written_buffered;
1327 endbyte = pos + written_buffered - 1;
1328 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
1331 written += written_buffered;
1332 *ppos = pos + written_buffered;
1333 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1334 endbyte >> PAGE_CACHE_SHIFT);
1336 return written ? written : err;
1339 static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1340 const struct iovec *iov,
1341 unsigned long nr_segs, loff_t pos)
1343 struct file *file = iocb->ki_filp;
1344 struct inode *inode = fdentry(file)->d_inode;
1345 struct btrfs_root *root = BTRFS_I(inode)->root;
1346 loff_t *ppos = &iocb->ki_pos;
1348 ssize_t num_written = 0;
1350 size_t count, ocount;
1352 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
1354 mutex_lock(&inode->i_mutex);
1356 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1358 mutex_unlock(&inode->i_mutex);
1363 current->backing_dev_info = inode->i_mapping->backing_dev_info;
1364 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1366 mutex_unlock(&inode->i_mutex);
1371 mutex_unlock(&inode->i_mutex);
1375 err = file_remove_suid(file);
1377 mutex_unlock(&inode->i_mutex);
1382 * If BTRFS flips readonly due to some impossible error
1383 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1384 * although we have opened a file as writable, we have
1385 * to stop this write operation to ensure FS consistency.
1387 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
1388 mutex_unlock(&inode->i_mutex);
1393 err = btrfs_update_time(file);
1395 mutex_unlock(&inode->i_mutex);
1398 BTRFS_I(inode)->sequence++;
1400 start_pos = round_down(pos, root->sectorsize);
1401 if (start_pos > i_size_read(inode)) {
1402 err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
1404 mutex_unlock(&inode->i_mutex);
1409 if (unlikely(file->f_flags & O_DIRECT)) {
1410 num_written = __btrfs_direct_write(iocb, iov, nr_segs,
1411 pos, ppos, count, ocount);
1415 iov_iter_init(&i, iov, nr_segs, count, num_written);
1417 num_written = __btrfs_buffered_write(file, &i, pos);
1418 if (num_written > 0)
1419 *ppos = pos + num_written;
1422 mutex_unlock(&inode->i_mutex);
1425 * we want to make sure fsync finds this change
1426 * but we haven't joined a transaction running right now.
1428 * Later on, someone is sure to update the inode and get the
1429 * real transid recorded.
1431 * We set last_trans now to the fs_info generation + 1,
1432 * this will either be one more than the running transaction
1433 * or the generation used for the next transaction if there isn't
1434 * one running right now.
1436 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1437 if (num_written > 0 || num_written == -EIOCBQUEUED) {
1438 err = generic_write_sync(file, pos, num_written);
1439 if (err < 0 && num_written > 0)
1443 current->backing_dev_info = NULL;
1444 return num_written ? num_written : err;
1447 int btrfs_release_file(struct inode *inode, struct file *filp)
1450 * ordered_data_close is set by settattr when we are about to truncate
1451 * a file from a non-zero size to a zero size. This tries to
1452 * flush down new bytes that may have been written if the
1453 * application were using truncate to replace a file in place.
1455 if (BTRFS_I(inode)->ordered_data_close) {
1456 BTRFS_I(inode)->ordered_data_close = 0;
1457 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1458 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1459 filemap_flush(inode->i_mapping);
1461 if (filp->private_data)
1462 btrfs_ioctl_trans_end(filp);
1467 * fsync call for both files and directories. This logs the inode into
1468 * the tree log instead of forcing full commits whenever possible.
1470 * It needs to call filemap_fdatawait so that all ordered extent updates are
1471 * in the metadata btree are up to date for copying to the log.
1473 * It drops the inode mutex before doing the tree log commit. This is an
1474 * important optimization for directories because holding the mutex prevents
1475 * new operations on the dir while we write to disk.
1477 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1479 struct dentry *dentry = file->f_path.dentry;
1480 struct inode *inode = dentry->d_inode;
1481 struct btrfs_root *root = BTRFS_I(inode)->root;
1483 struct btrfs_trans_handle *trans;
1485 trace_btrfs_sync_file(file, datasync);
1487 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1490 mutex_lock(&inode->i_mutex);
1492 /* we wait first, since the writeback may change the inode */
1494 btrfs_wait_ordered_range(inode, 0, (u64)-1);
1498 * check the transaction that last modified this inode
1499 * and see if its already been committed
1501 if (!BTRFS_I(inode)->last_trans) {
1502 mutex_unlock(&inode->i_mutex);
1507 * if the last transaction that changed this file was before
1508 * the current transaction, we can bail out now without any
1512 if (BTRFS_I(inode)->last_trans <=
1513 root->fs_info->last_trans_committed) {
1514 BTRFS_I(inode)->last_trans = 0;
1515 mutex_unlock(&inode->i_mutex);
1520 * ok we haven't committed the transaction yet, lets do a commit
1522 if (file->private_data)
1523 btrfs_ioctl_trans_end(file);
1525 trans = btrfs_start_transaction(root, 0);
1526 if (IS_ERR(trans)) {
1527 ret = PTR_ERR(trans);
1528 mutex_unlock(&inode->i_mutex);
1532 ret = btrfs_log_dentry_safe(trans, root, dentry);
1534 mutex_unlock(&inode->i_mutex);
1538 /* we've logged all the items and now have a consistent
1539 * version of the file in the log. It is possible that
1540 * someone will come in and modify the file, but that's
1541 * fine because the log is consistent on disk, and we
1542 * have references to all of the file's extents
1544 * It is possible that someone will come in and log the
1545 * file again, but that will end up using the synchronization
1546 * inside btrfs_sync_log to keep things safe.
1548 mutex_unlock(&inode->i_mutex);
1550 if (ret != BTRFS_NO_LOG_SYNC) {
1552 ret = btrfs_commit_transaction(trans, root);
1554 ret = btrfs_sync_log(trans, root);
1556 ret = btrfs_end_transaction(trans, root);
1558 ret = btrfs_commit_transaction(trans, root);
1561 ret = btrfs_end_transaction(trans, root);
1564 return ret > 0 ? -EIO : ret;
1567 static const struct vm_operations_struct btrfs_file_vm_ops = {
1568 .fault = filemap_fault,
1569 .page_mkwrite = btrfs_page_mkwrite,
1572 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
1574 struct address_space *mapping = filp->f_mapping;
1576 if (!mapping->a_ops->readpage)
1579 file_accessed(filp);
1580 vma->vm_ops = &btrfs_file_vm_ops;
1581 vma->vm_flags |= VM_CAN_NONLINEAR;
1586 static long btrfs_fallocate(struct file *file, int mode,
1587 loff_t offset, loff_t len)
1589 struct inode *inode = file->f_path.dentry->d_inode;
1590 struct extent_state *cached_state = NULL;
1597 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
1598 struct extent_map *em;
1601 alloc_start = offset & ~mask;
1602 alloc_end = (offset + len + mask) & ~mask;
1604 /* We only support the FALLOC_FL_KEEP_SIZE mode */
1605 if (mode & ~FALLOC_FL_KEEP_SIZE)
1609 * wait for ordered IO before we have any locks. We'll loop again
1610 * below with the locks held.
1612 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
1614 mutex_lock(&inode->i_mutex);
1615 ret = inode_newsize_ok(inode, alloc_end);
1619 if (alloc_start > inode->i_size) {
1620 ret = btrfs_cont_expand(inode, i_size_read(inode),
1626 locked_end = alloc_end - 1;
1628 struct btrfs_ordered_extent *ordered;
1630 /* the extent lock is ordered inside the running
1633 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
1634 locked_end, 0, &cached_state, GFP_NOFS);
1635 ordered = btrfs_lookup_first_ordered_extent(inode,
1638 ordered->file_offset + ordered->len > alloc_start &&
1639 ordered->file_offset < alloc_end) {
1640 btrfs_put_ordered_extent(ordered);
1641 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1642 alloc_start, locked_end,
1643 &cached_state, GFP_NOFS);
1645 * we can't wait on the range with the transaction
1646 * running or with the extent lock held
1648 btrfs_wait_ordered_range(inode, alloc_start,
1649 alloc_end - alloc_start);
1652 btrfs_put_ordered_extent(ordered);
1657 cur_offset = alloc_start;
1661 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
1662 alloc_end - cur_offset, 0);
1663 BUG_ON(IS_ERR_OR_NULL(em));
1664 last_byte = min(extent_map_end(em), alloc_end);
1665 actual_end = min_t(u64, extent_map_end(em), offset + len);
1666 last_byte = (last_byte + mask) & ~mask;
1668 if (em->block_start == EXTENT_MAP_HOLE ||
1669 (cur_offset >= inode->i_size &&
1670 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
1673 * Make sure we have enough space before we do the
1676 ret = btrfs_check_data_free_space(inode, last_byte -
1679 free_extent_map(em);
1683 ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
1684 last_byte - cur_offset,
1685 1 << inode->i_blkbits,
1689 /* Let go of our reservation. */
1690 btrfs_free_reserved_data_space(inode, last_byte -
1693 free_extent_map(em);
1696 } else if (actual_end > inode->i_size &&
1697 !(mode & FALLOC_FL_KEEP_SIZE)) {
1699 * We didn't need to allocate any more space, but we
1700 * still extended the size of the file so we need to
1703 inode->i_ctime = CURRENT_TIME;
1704 i_size_write(inode, actual_end);
1705 btrfs_ordered_update_i_size(inode, actual_end, NULL);
1707 free_extent_map(em);
1709 cur_offset = last_byte;
1710 if (cur_offset >= alloc_end) {
1715 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
1716 &cached_state, GFP_NOFS);
1718 mutex_unlock(&inode->i_mutex);
1722 static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
1724 struct btrfs_root *root = BTRFS_I(inode)->root;
1725 struct extent_map *em;
1726 struct extent_state *cached_state = NULL;
1727 u64 lockstart = *offset;
1728 u64 lockend = i_size_read(inode);
1729 u64 start = *offset;
1730 u64 orig_start = *offset;
1731 u64 len = i_size_read(inode);
1735 lockend = max_t(u64, root->sectorsize, lockend);
1736 if (lockend <= lockstart)
1737 lockend = lockstart + root->sectorsize;
1739 len = lockend - lockstart + 1;
1741 len = max_t(u64, len, root->sectorsize);
1742 if (inode->i_size == 0)
1745 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
1746 &cached_state, GFP_NOFS);
1749 * Delalloc is such a pain. If we have a hole and we have pending
1750 * delalloc for a portion of the hole we will get back a hole that
1751 * exists for the entire range since it hasn't been actually written
1752 * yet. So to take care of this case we need to look for an extent just
1753 * before the position we want in case there is outstanding delalloc
1756 if (origin == SEEK_HOLE && start != 0) {
1757 if (start <= root->sectorsize)
1758 em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
1759 root->sectorsize, 0);
1761 em = btrfs_get_extent_fiemap(inode, NULL, 0,
1762 start - root->sectorsize,
1763 root->sectorsize, 0);
1768 last_end = em->start + em->len;
1769 if (em->block_start == EXTENT_MAP_DELALLOC)
1770 last_end = min_t(u64, last_end, inode->i_size);
1771 free_extent_map(em);
1775 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
1781 if (em->block_start == EXTENT_MAP_HOLE) {
1782 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
1783 if (last_end <= orig_start) {
1784 free_extent_map(em);
1790 if (origin == SEEK_HOLE) {
1792 free_extent_map(em);
1796 if (origin == SEEK_DATA) {
1797 if (em->block_start == EXTENT_MAP_DELALLOC) {
1798 if (start >= inode->i_size) {
1799 free_extent_map(em);
1806 free_extent_map(em);
1811 start = em->start + em->len;
1812 last_end = em->start + em->len;
1814 if (em->block_start == EXTENT_MAP_DELALLOC)
1815 last_end = min_t(u64, last_end, inode->i_size);
1817 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
1818 free_extent_map(em);
1822 free_extent_map(em);
1826 *offset = min(*offset, inode->i_size);
1828 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
1829 &cached_state, GFP_NOFS);
1833 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
1835 struct inode *inode = file->f_mapping->host;
1838 mutex_lock(&inode->i_mutex);
1842 offset = generic_file_llseek(file, offset, origin);
1846 if (offset >= i_size_read(inode)) {
1847 mutex_unlock(&inode->i_mutex);
1851 ret = find_desired_extent(inode, &offset, origin);
1853 mutex_unlock(&inode->i_mutex);
1858 if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
1862 if (offset > inode->i_sb->s_maxbytes) {
1867 /* Special lock needed here? */
1868 if (offset != file->f_pos) {
1869 file->f_pos = offset;
1870 file->f_version = 0;
1873 mutex_unlock(&inode->i_mutex);
1877 const struct file_operations btrfs_file_operations = {
1878 .llseek = btrfs_file_llseek,
1879 .read = do_sync_read,
1880 .write = do_sync_write,
1881 .aio_read = generic_file_aio_read,
1882 .splice_read = generic_file_splice_read,
1883 .aio_write = btrfs_file_aio_write,
1884 .mmap = btrfs_file_mmap,
1885 .open = generic_file_open,
1886 .release = btrfs_release_file,
1887 .fsync = btrfs_sync_file,
1888 .fallocate = btrfs_fallocate,
1889 .unlocked_ioctl = btrfs_ioctl,
1890 #ifdef CONFIG_COMPAT
1891 .compat_ioctl = btrfs_ioctl,