1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/sched.h>
9 #include "transaction.h"
11 #include "accessors.h"
13 #include "delalloc-space.h"
16 #include "file-item.h"
19 static struct kmem_cache *btrfs_inode_defrag_cachep;
22 * When auto defrag is enabled we queue up these defrag structs to remember
23 * which inodes need defragging passes.
26 struct rb_node rb_node;
30 * Transid where the defrag was added, we search for extents newer than
39 * The extent size threshold for autodefrag.
41 * This value is different for compressed/non-compressed extents, thus
42 * needs to be passed from higher layer.
43 * (aka, inode_should_defrag())
48 static int __compare_inode_defrag(struct inode_defrag *defrag1,
49 struct inode_defrag *defrag2)
51 if (defrag1->root > defrag2->root)
53 else if (defrag1->root < defrag2->root)
55 else if (defrag1->ino > defrag2->ino)
57 else if (defrag1->ino < defrag2->ino)
64 * Pop a record for an inode into the defrag tree. The lock must be held
67 * If you're inserting a record for an older transid than an existing record,
68 * the transid already in the tree is lowered.
70 * If an existing record is found the defrag item you pass in is freed.
72 static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
73 struct inode_defrag *defrag)
75 struct btrfs_fs_info *fs_info = inode->root->fs_info;
76 struct inode_defrag *entry;
78 struct rb_node *parent = NULL;
81 p = &fs_info->defrag_inodes.rb_node;
84 entry = rb_entry(parent, struct inode_defrag, rb_node);
86 ret = __compare_inode_defrag(defrag, entry);
90 p = &parent->rb_right;
93 * If we're reinserting an entry for an old defrag run,
94 * make sure to lower the transid of our existing
97 if (defrag->transid < entry->transid)
98 entry->transid = defrag->transid;
99 entry->extent_thresh = min(defrag->extent_thresh,
100 entry->extent_thresh);
104 set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
105 rb_link_node(&defrag->rb_node, parent, p);
106 rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
110 static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
112 if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
115 if (btrfs_fs_closing(fs_info))
122 * Insert a defrag record for this inode if auto defrag is enabled.
124 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
125 struct btrfs_inode *inode, u32 extent_thresh)
127 struct btrfs_root *root = inode->root;
128 struct btrfs_fs_info *fs_info = root->fs_info;
129 struct inode_defrag *defrag;
133 if (!__need_auto_defrag(fs_info))
136 if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
140 transid = trans->transid;
142 transid = btrfs_get_root_last_trans(root);
144 defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
148 defrag->ino = btrfs_ino(inode);
149 defrag->transid = transid;
150 defrag->root = btrfs_root_id(root);
151 defrag->extent_thresh = extent_thresh;
153 spin_lock(&fs_info->defrag_inodes_lock);
154 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
156 * If we set IN_DEFRAG flag and evict the inode from memory,
157 * and then re-read this inode, this new inode doesn't have
158 * IN_DEFRAG flag. At the case, we may find the existed defrag.
160 ret = __btrfs_add_inode_defrag(inode, defrag);
162 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
164 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
166 spin_unlock(&fs_info->defrag_inodes_lock);
171 * Pick the defragable inode that we want, if it doesn't exist, we will get the
174 static struct inode_defrag *btrfs_pick_defrag_inode(
175 struct btrfs_fs_info *fs_info, u64 root, u64 ino)
177 struct inode_defrag *entry = NULL;
178 struct inode_defrag tmp;
180 struct rb_node *parent = NULL;
186 spin_lock(&fs_info->defrag_inodes_lock);
187 p = fs_info->defrag_inodes.rb_node;
190 entry = rb_entry(parent, struct inode_defrag, rb_node);
192 ret = __compare_inode_defrag(&tmp, entry);
196 p = parent->rb_right;
201 if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
202 parent = rb_next(parent);
204 entry = rb_entry(parent, struct inode_defrag, rb_node);
210 rb_erase(parent, &fs_info->defrag_inodes);
211 spin_unlock(&fs_info->defrag_inodes_lock);
215 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
217 struct inode_defrag *defrag;
218 struct rb_node *node;
220 spin_lock(&fs_info->defrag_inodes_lock);
221 node = rb_first(&fs_info->defrag_inodes);
223 rb_erase(node, &fs_info->defrag_inodes);
224 defrag = rb_entry(node, struct inode_defrag, rb_node);
225 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
227 cond_resched_lock(&fs_info->defrag_inodes_lock);
229 node = rb_first(&fs_info->defrag_inodes);
231 spin_unlock(&fs_info->defrag_inodes_lock);
234 #define BTRFS_DEFRAG_BATCH 1024
236 static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
237 struct inode_defrag *defrag)
239 struct btrfs_root *inode_root;
241 struct btrfs_ioctl_defrag_range_args range;
246 if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
248 if (!__need_auto_defrag(fs_info))
252 inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
253 if (IS_ERR(inode_root)) {
254 ret = PTR_ERR(inode_root);
258 inode = btrfs_iget(defrag->ino, inode_root);
259 btrfs_put_root(inode_root);
261 ret = PTR_ERR(inode);
265 if (cur >= i_size_read(inode)) {
270 /* Do a chunk of defrag */
271 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
272 memset(&range, 0, sizeof(range));
275 range.extent_thresh = defrag->extent_thresh;
277 sb_start_write(fs_info->sb);
278 ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
280 sb_end_write(fs_info->sb);
286 cur = max(cur + fs_info->sectorsize, range.start);
290 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
295 * Run through the list of inodes in the FS that need defragging.
297 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
299 struct inode_defrag *defrag;
301 u64 root_objectid = 0;
303 atomic_inc(&fs_info->defrag_running);
305 /* Pause the auto defragger. */
306 if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
309 if (!__need_auto_defrag(fs_info))
312 /* find an inode to defrag */
313 defrag = btrfs_pick_defrag_inode(fs_info, root_objectid, first_ino);
315 if (root_objectid || first_ino) {
324 first_ino = defrag->ino + 1;
325 root_objectid = defrag->root;
327 __btrfs_run_defrag_inode(fs_info, defrag);
329 atomic_dec(&fs_info->defrag_running);
332 * During unmount, we use the transaction_wait queue to wait for the
335 wake_up(&fs_info->transaction_wait);
340 * Check if two blocks addresses are close, used by defrag.
342 static bool close_blocks(u64 blocknr, u64 other, u32 blocksize)
344 if (blocknr < other && other - (blocknr + blocksize) < SZ_32K)
346 if (blocknr > other && blocknr - (other + blocksize) < SZ_32K)
352 * Go through all the leaves pointed to by a node and reallocate them so that
353 * disk order is close to key order.
355 static int btrfs_realloc_node(struct btrfs_trans_handle *trans,
356 struct btrfs_root *root,
357 struct extent_buffer *parent,
358 int start_slot, u64 *last_ret,
359 struct btrfs_key *progress)
361 struct btrfs_fs_info *fs_info = root->fs_info;
362 const u32 blocksize = fs_info->nodesize;
363 const int end_slot = btrfs_header_nritems(parent) - 1;
364 u64 search_start = *last_ret;
367 bool progress_passed = false;
370 * COWing must happen through a running transaction, which always
371 * matches the current fs generation (it's a transaction with a state
372 * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
373 * into error state to prevent the commit of any transaction.
375 if (unlikely(trans->transaction != fs_info->running_transaction ||
376 trans->transid != fs_info->generation)) {
377 btrfs_abort_transaction(trans, -EUCLEAN);
379 "unexpected transaction when attempting to reallocate parent %llu for root %llu, transaction %llu running transaction %llu fs generation %llu",
380 parent->start, btrfs_root_id(root), trans->transid,
381 fs_info->running_transaction->transid,
382 fs_info->generation);
386 if (btrfs_header_nritems(parent) <= 1)
389 for (int i = start_slot; i <= end_slot; i++) {
390 struct extent_buffer *cur;
391 struct btrfs_disk_key disk_key;
396 btrfs_node_key(parent, &disk_key, i);
397 if (!progress_passed && btrfs_comp_keys(&disk_key, progress) < 0)
400 progress_passed = true;
401 blocknr = btrfs_node_blockptr(parent, i);
403 last_block = blocknr;
406 other = btrfs_node_blockptr(parent, i - 1);
407 close = close_blocks(blocknr, other, blocksize);
409 if (!close && i < end_slot) {
410 other = btrfs_node_blockptr(parent, i + 1);
411 close = close_blocks(blocknr, other, blocksize);
414 last_block = blocknr;
418 cur = btrfs_read_node_slot(parent, i);
421 if (search_start == 0)
422 search_start = last_block;
424 btrfs_tree_lock(cur);
425 ret = btrfs_force_cow_block(trans, root, cur, parent, i,
428 (end_slot - i) * blocksize),
431 btrfs_tree_unlock(cur);
432 free_extent_buffer(cur);
435 search_start = cur->start;
436 last_block = cur->start;
437 *last_ret = search_start;
438 btrfs_tree_unlock(cur);
439 free_extent_buffer(cur);
445 * Defrag all the leaves in a given btree.
446 * Read all the leaves and try to get key order to
447 * better reflect disk order
450 static int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
451 struct btrfs_root *root)
453 struct btrfs_path *path = NULL;
454 struct btrfs_key key;
458 int next_key_ret = 0;
461 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
464 path = btrfs_alloc_path();
470 level = btrfs_header_level(root->node);
475 if (root->defrag_progress.objectid == 0) {
476 struct extent_buffer *root_node;
479 root_node = btrfs_lock_root_node(root);
480 nritems = btrfs_header_nritems(root_node);
481 root->defrag_max.objectid = 0;
482 /* from above we know this is not a leaf */
483 btrfs_node_key_to_cpu(root_node, &root->defrag_max,
485 btrfs_tree_unlock(root_node);
486 free_extent_buffer(root_node);
487 memset(&key, 0, sizeof(key));
489 memcpy(&key, &root->defrag_progress, sizeof(key));
492 path->keep_locks = 1;
494 ret = btrfs_search_forward(root, &key, path, BTRFS_OLDEST_GENERATION);
501 btrfs_release_path(path);
503 * We don't need a lock on a leaf. btrfs_realloc_node() will lock all
504 * leafs from path->nodes[1], so set lowest_level to 1 to avoid later
505 * a deadlock (attempting to write lock an already write locked leaf).
507 path->lowest_level = 1;
508 wret = btrfs_search_slot(trans, root, &key, path, 0, 1);
514 if (!path->nodes[1]) {
519 * The node at level 1 must always be locked when our path has
520 * keep_locks set and lowest_level is 1, regardless of the value of
523 ASSERT(path->locks[1] != 0);
524 ret = btrfs_realloc_node(trans, root,
527 &root->defrag_progress);
529 WARN_ON(ret == -EAGAIN);
533 * Now that we reallocated the node we can find the next key. Note that
534 * btrfs_find_next_key() can release our path and do another search
535 * without COWing, this is because even with path->keep_locks = 1,
536 * btrfs_search_slot() / ctree.c:unlock_up() does not keeps a lock on a
537 * node when path->slots[node_level - 1] does not point to the last
538 * item or a slot beyond the last item (ctree.c:unlock_up()). Therefore
539 * we search for the next key after reallocating our node.
541 path->slots[1] = btrfs_header_nritems(path->nodes[1]);
542 next_key_ret = btrfs_find_next_key(root, path, &key, 1,
543 BTRFS_OLDEST_GENERATION);
544 if (next_key_ret == 0) {
545 memcpy(&root->defrag_progress, &key, sizeof(key));
549 btrfs_free_path(path);
550 if (ret == -EAGAIN) {
551 if (root->defrag_max.objectid > root->defrag_progress.objectid)
553 if (root->defrag_max.type > root->defrag_progress.type)
555 if (root->defrag_max.offset > root->defrag_progress.offset)
561 memset(&root->defrag_progress, 0,
562 sizeof(root->defrag_progress));
568 * Defrag a given btree. Every leaf in the btree is read and defragmented.
570 int btrfs_defrag_root(struct btrfs_root *root)
572 struct btrfs_fs_info *fs_info = root->fs_info;
575 if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
579 struct btrfs_trans_handle *trans;
581 trans = btrfs_start_transaction(root, 0);
583 ret = PTR_ERR(trans);
587 ret = btrfs_defrag_leaves(trans, root);
589 btrfs_end_transaction(trans);
590 btrfs_btree_balance_dirty(fs_info);
593 if (btrfs_fs_closing(fs_info) || ret != -EAGAIN)
596 if (btrfs_defrag_cancelled(fs_info)) {
597 btrfs_debug(fs_info, "defrag_root cancelled");
602 clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
607 * Defrag specific helper to get an extent map.
609 * Differences between this and btrfs_get_extent() are:
611 * - No extent_map will be added to inode->extent_tree
612 * To reduce memory usage in the long run.
614 * - Extra optimization to skip file extents older than @newer_than
615 * By using btrfs_search_forward() we can skip entire file ranges that
616 * have extents created in past transactions, because btrfs_search_forward()
617 * will not visit leaves and nodes with a generation smaller than given
618 * minimal generation threshold (@newer_than).
620 * Return valid em if we find a file extent matching the requirement.
621 * Return NULL if we can not find a file extent matching the requirement.
623 * Return ERR_PTR() for error.
625 static struct extent_map *defrag_get_extent(struct btrfs_inode *inode,
626 u64 start, u64 newer_than)
628 struct btrfs_root *root = inode->root;
629 struct btrfs_file_extent_item *fi;
630 struct btrfs_path path = { 0 };
631 struct extent_map *em;
632 struct btrfs_key key;
633 u64 ino = btrfs_ino(inode);
636 em = alloc_extent_map();
643 key.type = BTRFS_EXTENT_DATA_KEY;
647 ret = btrfs_search_forward(root, &key, &path, newer_than);
650 /* Can't find anything newer */
654 ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
658 if (path.slots[0] >= btrfs_header_nritems(path.nodes[0])) {
660 * If btrfs_search_slot() makes path to point beyond nritems,
661 * we should not have an empty leaf, as this inode must at
662 * least have its INODE_ITEM.
664 ASSERT(btrfs_header_nritems(path.nodes[0]));
665 path.slots[0] = btrfs_header_nritems(path.nodes[0]) - 1;
667 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
668 /* Perfect match, no need to go one slot back */
669 if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY &&
673 /* We didn't find a perfect match, needs to go one slot back */
674 if (path.slots[0] > 0) {
675 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
676 if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
681 /* Iterate through the path to find a file extent covering @start */
685 if (path.slots[0] >= btrfs_header_nritems(path.nodes[0]))
688 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
691 * We may go one slot back to INODE_REF/XATTR item, then
692 * need to go forward until we reach an EXTENT_DATA.
693 * But we should still has the correct ino as key.objectid.
695 if (WARN_ON(key.objectid < ino) || key.type < BTRFS_EXTENT_DATA_KEY)
698 /* It's beyond our target range, definitely not extent found */
699 if (key.objectid > ino || key.type > BTRFS_EXTENT_DATA_KEY)
703 * | |<- File extent ->|
706 * This means there is a hole between start and key.offset.
708 if (key.offset > start) {
710 em->disk_bytenr = EXTENT_MAP_HOLE;
711 em->disk_num_bytes = 0;
714 em->len = key.offset - start;
718 fi = btrfs_item_ptr(path.nodes[0], path.slots[0],
719 struct btrfs_file_extent_item);
720 extent_end = btrfs_file_extent_end(&path);
723 * |<- file extent ->| |
726 * We haven't reached start, search next slot.
728 if (extent_end <= start)
731 /* Now this extent covers @start, convert it to em */
732 btrfs_extent_item_to_extent_map(inode, &path, fi, em);
735 ret = btrfs_next_item(root, &path);
741 btrfs_release_path(&path);
745 btrfs_release_path(&path);
750 btrfs_release_path(&path);
755 static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
756 u64 newer_than, bool locked)
758 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
759 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
760 struct extent_map *em;
761 const u32 sectorsize = BTRFS_I(inode)->root->fs_info->sectorsize;
764 * Hopefully we have this extent in the tree already, try without the
767 read_lock(&em_tree->lock);
768 em = lookup_extent_mapping(em_tree, start, sectorsize);
769 read_unlock(&em_tree->lock);
772 * We can get a merged extent, in that case, we need to re-search
773 * tree to get the original em for defrag.
775 * If @newer_than is 0 or em::generation < newer_than, we can trust
776 * this em, as either we don't care about the generation, or the
777 * merged extent map will be rejected anyway.
779 if (em && (em->flags & EXTENT_FLAG_MERGED) &&
780 newer_than && em->generation >= newer_than) {
786 struct extent_state *cached = NULL;
787 u64 end = start + sectorsize - 1;
789 /* Get the big lock and read metadata off disk. */
791 lock_extent(io_tree, start, end, &cached);
792 em = defrag_get_extent(BTRFS_I(inode), start, newer_than);
794 unlock_extent(io_tree, start, end, &cached);
803 static u32 get_extent_max_capacity(const struct btrfs_fs_info *fs_info,
804 const struct extent_map *em)
806 if (extent_map_is_compressed(em))
807 return BTRFS_MAX_COMPRESSED;
808 return fs_info->max_extent_size;
811 static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
812 u32 extent_thresh, u64 newer_than, bool locked)
814 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
815 struct extent_map *next;
818 /* This is the last extent */
819 if (em->start + em->len >= i_size_read(inode))
823 * Here we need to pass @newer_then when checking the next extent, or
824 * we will hit a case we mark current extent for defrag, but the next
825 * one will not be a target.
826 * This will just cause extra IO without really reducing the fragments.
828 next = defrag_lookup_extent(inode, em->start + em->len, newer_than, locked);
829 /* No more em or hole */
830 if (!next || next->disk_bytenr >= EXTENT_MAP_LAST_BYTE)
832 if (next->flags & EXTENT_FLAG_PREALLOC)
835 * If the next extent is at its max capacity, defragging current extent
836 * makes no sense, as the total number of extents won't change.
838 if (next->len >= get_extent_max_capacity(fs_info, em))
840 /* Skip older extent */
841 if (next->generation < newer_than)
843 /* Also check extent size */
844 if (next->len >= extent_thresh)
849 free_extent_map(next);
854 * Prepare one page to be defragged.
858 * - Returned page is locked and has been set up properly.
859 * - No ordered extent exists in the page.
860 * - The page is uptodate.
862 * NOTE: Caller should also wait for page writeback after the cluster is
863 * prepared, here we don't do writeback wait for each page.
865 static struct folio *defrag_prepare_one_folio(struct btrfs_inode *inode, pgoff_t index)
867 struct address_space *mapping = inode->vfs_inode.i_mapping;
868 gfp_t mask = btrfs_alloc_write_mask(mapping);
869 u64 page_start = (u64)index << PAGE_SHIFT;
870 u64 page_end = page_start + PAGE_SIZE - 1;
871 struct extent_state *cached_state = NULL;
876 folio = __filemap_get_folio(mapping, index,
877 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
882 * Since we can defragment files opened read-only, we can encounter
883 * transparent huge pages here (see CONFIG_READ_ONLY_THP_FOR_FS). We
884 * can't do I/O using huge pages yet, so return an error for now.
885 * Filesystem transparent huge pages are typically only used for
886 * executables that explicitly enable them, so this isn't very
889 if (folio_test_large(folio)) {
892 return ERR_PTR(-ETXTBSY);
895 ret = set_folio_extent_mapped(folio);
902 /* Wait for any existing ordered extent in the range */
904 struct btrfs_ordered_extent *ordered;
906 lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
907 ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
908 unlock_extent(&inode->io_tree, page_start, page_end,
914 btrfs_start_ordered_extent(ordered);
915 btrfs_put_ordered_extent(ordered);
918 * We unlocked the folio above, so we need check if it was
921 if (folio->mapping != mapping || !folio->private) {
929 * Now the page range has no ordered extent any more. Read the page to
932 if (!folio_test_uptodate(folio)) {
933 btrfs_read_folio(NULL, folio);
935 if (folio->mapping != mapping || !folio->private) {
940 if (!folio_test_uptodate(folio)) {
943 return ERR_PTR(-EIO);
949 struct defrag_target_range {
950 struct list_head list;
956 * Collect all valid target extents.
958 * @start: file offset to lookup
959 * @len: length to lookup
960 * @extent_thresh: file extent size threshold, any extent size >= this value
962 * @newer_than: only defrag extents newer than this value
963 * @do_compress: whether the defrag is doing compression
964 * if true, @extent_thresh will be ignored and all regular
965 * file extents meeting @newer_than will be targets.
966 * @locked: if the range has already held extent lock
967 * @target_list: list of targets file extents
969 static int defrag_collect_targets(struct btrfs_inode *inode,
970 u64 start, u64 len, u32 extent_thresh,
971 u64 newer_than, bool do_compress,
972 bool locked, struct list_head *target_list,
973 u64 *last_scanned_ret)
975 struct btrfs_fs_info *fs_info = inode->root->fs_info;
976 bool last_is_target = false;
980 while (cur < start + len) {
981 struct extent_map *em;
982 struct defrag_target_range *new;
983 bool next_mergeable = true;
986 last_is_target = false;
987 em = defrag_lookup_extent(&inode->vfs_inode, cur, newer_than, locked);
992 * If the file extent is an inlined one, we may still want to
993 * defrag it (fallthrough) if it will cause a regular extent.
994 * This is for users who want to convert inline extents to
995 * regular ones through max_inline= mount option.
997 if (em->disk_bytenr == EXTENT_MAP_INLINE &&
998 em->len <= inode->root->fs_info->max_inline)
1001 /* Skip holes and preallocated extents. */
1002 if (em->disk_bytenr == EXTENT_MAP_HOLE ||
1003 (em->flags & EXTENT_FLAG_PREALLOC))
1006 /* Skip older extent */
1007 if (em->generation < newer_than)
1010 /* This em is under writeback, no need to defrag */
1011 if (em->generation == (u64)-1)
1015 * Our start offset might be in the middle of an existing extent
1016 * map, so take that into account.
1018 range_len = em->len - (cur - em->start);
1020 * If this range of the extent map is already flagged for delalloc,
1023 * 1) We could deadlock later, when trying to reserve space for
1024 * delalloc, because in case we can't immediately reserve space
1025 * the flusher can start delalloc and wait for the respective
1026 * ordered extents to complete. The deadlock would happen
1027 * because we do the space reservation while holding the range
1028 * locked, and starting writeback, or finishing an ordered
1029 * extent, requires locking the range;
1031 * 2) If there's delalloc there, it means there's dirty pages for
1032 * which writeback has not started yet (we clean the delalloc
1033 * flag when starting writeback and after creating an ordered
1034 * extent). If we mark pages in an adjacent range for defrag,
1035 * then we will have a larger contiguous range for delalloc,
1036 * very likely resulting in a larger extent after writeback is
1037 * triggered (except in a case of free space fragmentation).
1039 if (test_range_bit_exists(&inode->io_tree, cur, cur + range_len - 1,
1044 * For do_compress case, we want to compress all valid file
1045 * extents, thus no @extent_thresh or mergeable check.
1050 /* Skip too large extent */
1051 if (em->len >= extent_thresh)
1055 * Skip extents already at its max capacity, this is mostly for
1056 * compressed extents, which max cap is only 128K.
1058 if (em->len >= get_extent_max_capacity(fs_info, em))
1062 * Normally there are no more extents after an inline one, thus
1063 * @next_mergeable will normally be false and not defragged.
1064 * So if an inline extent passed all above checks, just add it
1065 * for defrag, and be converted to regular extents.
1067 if (em->disk_bytenr == EXTENT_MAP_INLINE)
1070 next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
1071 extent_thresh, newer_than, locked);
1072 if (!next_mergeable) {
1073 struct defrag_target_range *last;
1075 /* Empty target list, no way to merge with last entry */
1076 if (list_empty(target_list))
1078 last = list_entry(target_list->prev,
1079 struct defrag_target_range, list);
1080 /* Not mergeable with last entry */
1081 if (last->start + last->len != cur)
1084 /* Mergeable, fall through to add it to @target_list. */
1088 last_is_target = true;
1089 range_len = min(extent_map_end(em), start + len) - cur;
1091 * This one is a good target, check if it can be merged into
1092 * last range of the target list.
1094 if (!list_empty(target_list)) {
1095 struct defrag_target_range *last;
1097 last = list_entry(target_list->prev,
1098 struct defrag_target_range, list);
1099 ASSERT(last->start + last->len <= cur);
1100 if (last->start + last->len == cur) {
1101 /* Mergeable, enlarge the last entry */
1102 last->len += range_len;
1105 /* Fall through to allocate a new entry */
1108 /* Allocate new defrag_target_range */
1109 new = kmalloc(sizeof(*new), GFP_NOFS);
1111 free_extent_map(em);
1116 new->len = range_len;
1117 list_add_tail(&new->list, target_list);
1120 cur = extent_map_end(em);
1121 free_extent_map(em);
1124 struct defrag_target_range *entry;
1125 struct defrag_target_range *tmp;
1127 list_for_each_entry_safe(entry, tmp, target_list, list) {
1128 list_del_init(&entry->list);
1132 if (!ret && last_scanned_ret) {
1134 * If the last extent is not a target, the caller can skip to
1135 * the end of that extent.
1136 * Otherwise, we can only go the end of the specified range.
1138 if (!last_is_target)
1139 *last_scanned_ret = max(cur, *last_scanned_ret);
1141 *last_scanned_ret = max(start + len, *last_scanned_ret);
1146 #define CLUSTER_SIZE (SZ_256K)
1147 static_assert(PAGE_ALIGNED(CLUSTER_SIZE));
1150 * Defrag one contiguous target range.
1152 * @inode: target inode
1153 * @target: target range to defrag
1154 * @pages: locked pages covering the defrag range
1155 * @nr_pages: number of locked pages
1157 * Caller should ensure:
1159 * - Pages are prepared
1160 * Pages should be locked, no ordered extent in the pages range,
1163 * - Extent bits are locked
1165 static int defrag_one_locked_target(struct btrfs_inode *inode,
1166 struct defrag_target_range *target,
1167 struct folio **folios, int nr_pages,
1168 struct extent_state **cached_state)
1170 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1171 struct extent_changeset *data_reserved = NULL;
1172 const u64 start = target->start;
1173 const u64 len = target->len;
1174 unsigned long last_index = (start + len - 1) >> PAGE_SHIFT;
1175 unsigned long start_index = start >> PAGE_SHIFT;
1176 unsigned long first_index = folios[0]->index;
1180 ASSERT(last_index - first_index + 1 <= nr_pages);
1182 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, start, len);
1185 clear_extent_bit(&inode->io_tree, start, start + len - 1,
1186 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
1187 EXTENT_DEFRAG, cached_state);
1188 set_extent_bit(&inode->io_tree, start, start + len - 1,
1189 EXTENT_DELALLOC | EXTENT_DEFRAG, cached_state);
1191 /* Update the page status */
1192 for (i = start_index - first_index; i <= last_index - first_index; i++) {
1193 folio_clear_checked(folios[i]);
1194 btrfs_folio_clamp_set_dirty(fs_info, folios[i], start, len);
1196 btrfs_delalloc_release_extents(inode, len);
1197 extent_changeset_free(data_reserved);
1202 static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
1203 u32 extent_thresh, u64 newer_than, bool do_compress,
1204 u64 *last_scanned_ret)
1206 struct extent_state *cached_state = NULL;
1207 struct defrag_target_range *entry;
1208 struct defrag_target_range *tmp;
1209 LIST_HEAD(target_list);
1210 struct folio **folios;
1211 const u32 sectorsize = inode->root->fs_info->sectorsize;
1212 u64 last_index = (start + len - 1) >> PAGE_SHIFT;
1213 u64 start_index = start >> PAGE_SHIFT;
1214 unsigned int nr_pages = last_index - start_index + 1;
1218 ASSERT(nr_pages <= CLUSTER_SIZE / PAGE_SIZE);
1219 ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(len, sectorsize));
1221 folios = kcalloc(nr_pages, sizeof(struct folio *), GFP_NOFS);
1225 /* Prepare all pages */
1226 for (i = 0; i < nr_pages; i++) {
1227 folios[i] = defrag_prepare_one_folio(inode, start_index + i);
1228 if (IS_ERR(folios[i])) {
1229 ret = PTR_ERR(folios[i]);
1234 for (i = 0; i < nr_pages; i++)
1235 folio_wait_writeback(folios[i]);
1237 /* Lock the pages range */
1238 lock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
1239 (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
1242 * Now we have a consistent view about the extent map, re-check
1243 * which range really needs to be defragged.
1245 * And this time we have extent locked already, pass @locked = true
1246 * so that we won't relock the extent range and cause deadlock.
1248 ret = defrag_collect_targets(inode, start, len, extent_thresh,
1249 newer_than, do_compress, true,
1250 &target_list, last_scanned_ret);
1254 list_for_each_entry(entry, &target_list, list) {
1255 ret = defrag_one_locked_target(inode, entry, folios, nr_pages,
1261 list_for_each_entry_safe(entry, tmp, &target_list, list) {
1262 list_del_init(&entry->list);
1266 unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
1267 (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
1270 for (i = 0; i < nr_pages; i++) {
1271 folio_unlock(folios[i]);
1272 folio_put(folios[i]);
1278 static int defrag_one_cluster(struct btrfs_inode *inode,
1279 struct file_ra_state *ra,
1280 u64 start, u32 len, u32 extent_thresh,
1281 u64 newer_than, bool do_compress,
1282 unsigned long *sectors_defragged,
1283 unsigned long max_sectors,
1284 u64 *last_scanned_ret)
1286 const u32 sectorsize = inode->root->fs_info->sectorsize;
1287 struct defrag_target_range *entry;
1288 struct defrag_target_range *tmp;
1289 LIST_HEAD(target_list);
1292 ret = defrag_collect_targets(inode, start, len, extent_thresh,
1293 newer_than, do_compress, false,
1294 &target_list, NULL);
1298 list_for_each_entry(entry, &target_list, list) {
1299 u32 range_len = entry->len;
1301 /* Reached or beyond the limit */
1302 if (max_sectors && *sectors_defragged >= max_sectors) {
1308 range_len = min_t(u32, range_len,
1309 (max_sectors - *sectors_defragged) * sectorsize);
1312 * If defrag_one_range() has updated last_scanned_ret,
1313 * our range may already be invalid (e.g. hole punched).
1314 * Skip if our range is before last_scanned_ret, as there is
1315 * no need to defrag the range anymore.
1317 if (entry->start + range_len <= *last_scanned_ret)
1321 page_cache_sync_readahead(inode->vfs_inode.i_mapping,
1322 ra, NULL, entry->start >> PAGE_SHIFT,
1323 ((entry->start + range_len - 1) >> PAGE_SHIFT) -
1324 (entry->start >> PAGE_SHIFT) + 1);
1326 * Here we may not defrag any range if holes are punched before
1327 * we locked the pages.
1328 * But that's fine, it only affects the @sectors_defragged
1331 ret = defrag_one_range(inode, entry->start, range_len,
1332 extent_thresh, newer_than, do_compress,
1336 *sectors_defragged += range_len >>
1337 inode->root->fs_info->sectorsize_bits;
1340 list_for_each_entry_safe(entry, tmp, &target_list, list) {
1341 list_del_init(&entry->list);
1345 *last_scanned_ret = max(*last_scanned_ret, start + len);
1350 * Entry point to file defragmentation.
1352 * @inode: inode to be defragged
1353 * @ra: readahead state (can be NUL)
1354 * @range: defrag options including range and flags
1355 * @newer_than: minimum transid to defrag
1356 * @max_to_defrag: max number of sectors to be defragged, if 0, the whole inode
1357 * will be defragged.
1359 * Return <0 for error.
1360 * Return >=0 for the number of sectors defragged, and range->start will be updated
1361 * to indicate the file offset where next defrag should be started at.
1362 * (Mostly for autodefrag, which sets @max_to_defrag thus we may exit early without
1363 * defragging all the range).
1365 int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
1366 struct btrfs_ioctl_defrag_range_args *range,
1367 u64 newer_than, unsigned long max_to_defrag)
1369 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
1370 unsigned long sectors_defragged = 0;
1371 u64 isize = i_size_read(inode);
1374 bool do_compress = (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS);
1375 bool ra_allocated = false;
1376 int compress_type = BTRFS_COMPRESS_ZLIB;
1378 u32 extent_thresh = range->extent_thresh;
1379 pgoff_t start_index;
1384 if (range->start >= isize)
1388 if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES)
1390 if (range->compress_type)
1391 compress_type = range->compress_type;
1394 if (extent_thresh == 0)
1395 extent_thresh = SZ_256K;
1397 if (range->start + range->len > range->start) {
1398 /* Got a specific range */
1399 last_byte = min(isize, range->start + range->len);
1401 /* Defrag until file end */
1405 /* Align the range */
1406 cur = round_down(range->start, fs_info->sectorsize);
1407 last_byte = round_up(last_byte, fs_info->sectorsize) - 1;
1410 * If we were not given a ra, allocate a readahead context. As
1411 * readahead is just an optimization, defrag will work without it so
1412 * we don't error out.
1415 ra_allocated = true;
1416 ra = kzalloc(sizeof(*ra), GFP_KERNEL);
1418 file_ra_state_init(ra, inode->i_mapping);
1422 * Make writeback start from the beginning of the range, so that the
1423 * defrag range can be written sequentially.
1425 start_index = cur >> PAGE_SHIFT;
1426 if (start_index < inode->i_mapping->writeback_index)
1427 inode->i_mapping->writeback_index = start_index;
1429 while (cur < last_byte) {
1430 const unsigned long prev_sectors_defragged = sectors_defragged;
1431 u64 last_scanned = cur;
1434 if (btrfs_defrag_cancelled(fs_info)) {
1439 /* We want the cluster end at page boundary when possible */
1440 cluster_end = (((cur >> PAGE_SHIFT) +
1441 (SZ_256K >> PAGE_SHIFT)) << PAGE_SHIFT) - 1;
1442 cluster_end = min(cluster_end, last_byte);
1444 btrfs_inode_lock(BTRFS_I(inode), 0);
1445 if (IS_SWAPFILE(inode)) {
1447 btrfs_inode_unlock(BTRFS_I(inode), 0);
1450 if (!(inode->i_sb->s_flags & SB_ACTIVE)) {
1451 btrfs_inode_unlock(BTRFS_I(inode), 0);
1455 BTRFS_I(inode)->defrag_compress = compress_type;
1456 ret = defrag_one_cluster(BTRFS_I(inode), ra, cur,
1457 cluster_end + 1 - cur, extent_thresh,
1458 newer_than, do_compress, §ors_defragged,
1459 max_to_defrag, &last_scanned);
1461 if (sectors_defragged > prev_sectors_defragged)
1462 balance_dirty_pages_ratelimited(inode->i_mapping);
1464 btrfs_inode_unlock(BTRFS_I(inode), 0);
1467 cur = max(cluster_end + 1, last_scanned);
1478 * Update range.start for autodefrag, this will indicate where to start
1482 if (sectors_defragged) {
1484 * We have defragged some sectors, for compression case they
1485 * need to be written back immediately.
1487 if (range->flags & BTRFS_DEFRAG_RANGE_START_IO) {
1488 filemap_flush(inode->i_mapping);
1489 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1490 &BTRFS_I(inode)->runtime_flags))
1491 filemap_flush(inode->i_mapping);
1493 if (range->compress_type == BTRFS_COMPRESS_LZO)
1494 btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
1495 else if (range->compress_type == BTRFS_COMPRESS_ZSTD)
1496 btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
1497 ret = sectors_defragged;
1500 btrfs_inode_lock(BTRFS_I(inode), 0);
1501 BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
1502 btrfs_inode_unlock(BTRFS_I(inode), 0);
1507 void __cold btrfs_auto_defrag_exit(void)
1509 kmem_cache_destroy(btrfs_inode_defrag_cachep);
1512 int __init btrfs_auto_defrag_init(void)
1514 btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
1515 sizeof(struct inode_defrag), 0, 0, NULL);
1516 if (!btrfs_inode_defrag_cachep)