1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/writeback.h>
9 #include <linux/sched/mm.h>
13 #include "transaction.h"
14 #include "btrfs_inode.h"
15 #include "extent_io.h"
17 #include "compression.h"
18 #include "delalloc-space.h"
23 static struct kmem_cache *btrfs_ordered_extent_cache;
25 static u64 entry_end(struct btrfs_ordered_extent *entry)
27 if (entry->file_offset + entry->num_bytes < entry->file_offset)
29 return entry->file_offset + entry->num_bytes;
32 /* returns NULL if the insertion worked, or it returns the node it did find
35 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
38 struct rb_node **p = &root->rb_node;
39 struct rb_node *parent = NULL;
40 struct btrfs_ordered_extent *entry;
44 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
46 if (file_offset < entry->file_offset)
48 else if (file_offset >= entry_end(entry))
54 rb_link_node(node, parent, p);
55 rb_insert_color(node, root);
60 * look for a given offset in the tree, and if it can't be found return the
63 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
64 struct rb_node **prev_ret)
66 struct rb_node *n = root->rb_node;
67 struct rb_node *prev = NULL;
69 struct btrfs_ordered_extent *entry;
70 struct btrfs_ordered_extent *prev_entry = NULL;
73 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
77 if (file_offset < entry->file_offset)
79 else if (file_offset >= entry_end(entry))
87 while (prev && file_offset >= entry_end(prev_entry)) {
91 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
93 if (file_offset < entry_end(prev_entry))
99 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
101 while (prev && file_offset < entry_end(prev_entry)) {
102 test = rb_prev(prev);
105 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
113 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
116 if (file_offset + len <= entry->file_offset ||
117 entry->file_offset + entry->num_bytes <= file_offset)
123 * look find the first ordered struct that has this offset, otherwise
124 * the first one less than this offset
126 static inline struct rb_node *ordered_tree_search(struct btrfs_inode *inode,
129 struct rb_node *prev = NULL;
131 struct btrfs_ordered_extent *entry;
133 if (inode->ordered_tree_last) {
134 entry = rb_entry(inode->ordered_tree_last, struct btrfs_ordered_extent,
136 if (in_range(file_offset, entry->file_offset, entry->num_bytes))
137 return inode->ordered_tree_last;
139 ret = __tree_search(&inode->ordered_tree, file_offset, &prev);
143 inode->ordered_tree_last = ret;
147 static struct btrfs_ordered_extent *alloc_ordered_extent(
148 struct btrfs_inode *inode, u64 file_offset, u64 num_bytes,
149 u64 ram_bytes, u64 disk_bytenr, u64 disk_num_bytes,
150 u64 offset, unsigned long flags, int compress_type)
152 struct btrfs_ordered_extent *entry;
157 ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
158 /* For nocow write, we can release the qgroup rsv right now */
159 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
164 * The ordered extent has reserved qgroup space, release now
165 * and pass the reserved number for qgroup_record to free.
167 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes, &qgroup_rsv);
171 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
173 return ERR_PTR(-ENOMEM);
175 entry->file_offset = file_offset;
176 entry->num_bytes = num_bytes;
177 entry->ram_bytes = ram_bytes;
178 entry->disk_bytenr = disk_bytenr;
179 entry->disk_num_bytes = disk_num_bytes;
180 entry->offset = offset;
181 entry->bytes_left = num_bytes;
182 entry->inode = igrab(&inode->vfs_inode);
183 entry->compress_type = compress_type;
184 entry->truncated_len = (u64)-1;
185 entry->qgroup_rsv = qgroup_rsv;
186 entry->flags = flags;
187 refcount_set(&entry->refs, 1);
188 init_waitqueue_head(&entry->wait);
189 INIT_LIST_HEAD(&entry->list);
190 INIT_LIST_HEAD(&entry->log_list);
191 INIT_LIST_HEAD(&entry->root_extent_list);
192 INIT_LIST_HEAD(&entry->work_list);
193 INIT_LIST_HEAD(&entry->bioc_list);
194 init_completion(&entry->completion);
197 * We don't need the count_max_extents here, we can assume that all of
198 * that work has been done at higher layers, so this is truly the
199 * smallest the extent is going to get.
201 spin_lock(&inode->lock);
202 btrfs_mod_outstanding_extents(inode, 1);
203 spin_unlock(&inode->lock);
208 static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
210 struct btrfs_inode *inode = BTRFS_I(entry->inode);
211 struct btrfs_root *root = inode->root;
212 struct btrfs_fs_info *fs_info = root->fs_info;
213 struct rb_node *node;
215 trace_btrfs_ordered_extent_add(inode, entry);
217 percpu_counter_add_batch(&fs_info->ordered_bytes, entry->num_bytes,
218 fs_info->delalloc_batch);
220 /* One ref for the tree. */
221 refcount_inc(&entry->refs);
223 spin_lock_irq(&inode->ordered_tree_lock);
224 node = tree_insert(&inode->ordered_tree, entry->file_offset,
227 btrfs_panic(fs_info, -EEXIST,
228 "inconsistency in ordered tree at offset %llu",
230 spin_unlock_irq(&inode->ordered_tree_lock);
232 spin_lock(&root->ordered_extent_lock);
233 list_add_tail(&entry->root_extent_list,
234 &root->ordered_extents);
235 root->nr_ordered_extents++;
236 if (root->nr_ordered_extents == 1) {
237 spin_lock(&fs_info->ordered_root_lock);
238 BUG_ON(!list_empty(&root->ordered_root));
239 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
240 spin_unlock(&fs_info->ordered_root_lock);
242 spin_unlock(&root->ordered_extent_lock);
246 * Add an ordered extent to the per-inode tree.
248 * @inode: Inode that this extent is for.
249 * @file_offset: Logical offset in file where the extent starts.
250 * @num_bytes: Logical length of extent in file.
251 * @ram_bytes: Full length of unencoded data.
252 * @disk_bytenr: Offset of extent on disk.
253 * @disk_num_bytes: Size of extent on disk.
254 * @offset: Offset into unencoded data where file data starts.
255 * @flags: Flags specifying type of extent (1 << BTRFS_ORDERED_*).
256 * @compress_type: Compression algorithm used for data.
258 * Most of these parameters correspond to &struct btrfs_file_extent_item. The
259 * tree is given a single reference on the ordered extent that was inserted, and
260 * the returned pointer is given a second reference.
262 * Return: the new ordered extent or error pointer.
264 struct btrfs_ordered_extent *btrfs_alloc_ordered_extent(
265 struct btrfs_inode *inode, u64 file_offset,
266 u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
267 u64 disk_num_bytes, u64 offset, unsigned long flags,
270 struct btrfs_ordered_extent *entry;
272 ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);
274 entry = alloc_ordered_extent(inode, file_offset, num_bytes, ram_bytes,
275 disk_bytenr, disk_num_bytes, offset, flags,
278 insert_ordered_extent(entry);
283 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
284 * when an ordered extent is finished. If the list covers more than one
285 * ordered extent, it is split across multiples.
287 void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
288 struct btrfs_ordered_sum *sum)
290 struct btrfs_inode *inode = BTRFS_I(entry->inode);
292 spin_lock_irq(&inode->ordered_tree_lock);
293 list_add_tail(&sum->list, &entry->list);
294 spin_unlock_irq(&inode->ordered_tree_lock);
297 void btrfs_mark_ordered_extent_error(struct btrfs_ordered_extent *ordered)
299 if (!test_and_set_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
300 mapping_set_error(ordered->inode->i_mapping, -EIO);
303 static void finish_ordered_fn(struct btrfs_work *work)
305 struct btrfs_ordered_extent *ordered_extent;
307 ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
308 btrfs_finish_ordered_io(ordered_extent);
311 static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
312 struct page *page, u64 file_offset,
313 u64 len, bool uptodate)
315 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
316 struct btrfs_fs_info *fs_info = inode->root->fs_info;
318 lockdep_assert_held(&inode->ordered_tree_lock);
321 ASSERT(page->mapping);
322 ASSERT(page_offset(page) <= file_offset);
323 ASSERT(file_offset + len <= page_offset(page) + PAGE_SIZE);
326 * Ordered (Private2) bit indicates whether we still have
327 * pending io unfinished for the ordered extent.
329 * If there's no such bit, we need to skip to next range.
331 if (!btrfs_folio_test_ordered(fs_info, page_folio(page),
334 btrfs_folio_clear_ordered(fs_info, page_folio(page), file_offset, len);
337 /* Now we're fine to update the accounting. */
338 if (WARN_ON_ONCE(len > ordered->bytes_left)) {
340 "bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%llu left=%llu",
341 btrfs_root_id(inode->root), btrfs_ino(inode),
342 ordered->file_offset, ordered->num_bytes,
343 len, ordered->bytes_left);
344 ordered->bytes_left = 0;
346 ordered->bytes_left -= len;
350 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
352 if (ordered->bytes_left)
356 * All the IO of the ordered extent is finished, we need to queue
357 * the finish_func to be executed.
359 set_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags);
360 cond_wake_up(&ordered->wait);
361 refcount_inc(&ordered->refs);
362 trace_btrfs_ordered_extent_mark_finished(inode, ordered);
366 static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered)
368 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
369 struct btrfs_fs_info *fs_info = inode->root->fs_info;
370 struct btrfs_workqueue *wq = btrfs_is_free_space_inode(inode) ?
371 fs_info->endio_freespace_worker : fs_info->endio_write_workers;
373 btrfs_init_work(&ordered->work, finish_ordered_fn, NULL);
374 btrfs_queue_work(wq, &ordered->work);
377 bool btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
378 struct page *page, u64 file_offset, u64 len,
381 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
385 trace_btrfs_finish_ordered_extent(inode, file_offset, len, uptodate);
387 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
388 ret = can_finish_ordered_extent(ordered, page, file_offset, len, uptodate);
389 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
392 * If this is a COW write it means we created new extent maps for the
393 * range and they point to unwritten locations if we got an error either
394 * before submitting a bio or during IO.
396 * We have marked the ordered extent with BTRFS_ORDERED_IOERR, and we
397 * are queuing its completion below. During completion, at
398 * btrfs_finish_one_ordered(), we will drop the extent maps for the
401 * However because completion runs in a work queue we can end up having
402 * a fast fsync running before that. In the case of direct IO, once we
403 * unlock the inode the fsync might start, and we queue the completion
404 * before unlocking the inode. In the case of buffered IO when writeback
405 * finishes (end_bbio_data_write()) we queue the completion, so if the
406 * writeback was triggered by a fast fsync, the fsync might start
407 * logging before ordered extent completion runs in the work queue.
409 * The fast fsync will log file extent items based on the extent maps it
410 * finds, so if by the time it collects extent maps the ordered extent
411 * completion didn't happen yet, it will log file extent items that
412 * point to unwritten extents, resulting in a corruption if a crash
413 * happens and the log tree is replayed. Note that a fast fsync does not
414 * wait for completion of ordered extents in order to reduce latency.
416 * Set a flag in the inode so that the next fast fsync will wait for
417 * ordered extents to complete before starting to log.
419 if (!uptodate && !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
420 set_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags);
423 btrfs_queue_ordered_fn(ordered);
428 * Mark all ordered extents io inside the specified range finished.
430 * @page: The involved page for the operation.
431 * For uncompressed buffered IO, the page status also needs to be
432 * updated to indicate whether the pending ordered io is finished.
433 * Can be NULL for direct IO and compressed write.
434 * For these cases, callers are ensured they won't execute the
435 * endio function twice.
437 * This function is called for endio, thus the range must have ordered
438 * extent(s) covering it.
440 void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
441 struct page *page, u64 file_offset,
442 u64 num_bytes, bool uptodate)
444 struct rb_node *node;
445 struct btrfs_ordered_extent *entry = NULL;
447 u64 cur = file_offset;
449 trace_btrfs_writepage_end_io_hook(inode, file_offset,
450 file_offset + num_bytes - 1,
453 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
454 while (cur < file_offset + num_bytes) {
459 node = ordered_tree_search(inode, cur);
460 /* No ordered extents at all */
464 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
465 entry_end = entry->file_offset + entry->num_bytes;
471 if (cur >= entry_end) {
472 node = rb_next(node);
473 /* No more ordered extents, exit */
476 entry = rb_entry(node, struct btrfs_ordered_extent,
479 /* Go to next ordered extent and continue */
480 cur = entry->file_offset;
486 * Go to the start of OE.
488 if (cur < entry->file_offset) {
489 cur = entry->file_offset;
494 * Now we are definitely inside one ordered extent.
500 end = min(entry->file_offset + entry->num_bytes,
501 file_offset + num_bytes) - 1;
502 ASSERT(end + 1 - cur < U32_MAX);
505 if (can_finish_ordered_extent(entry, page, cur, len, uptodate)) {
506 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
507 btrfs_queue_ordered_fn(entry);
508 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
512 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
516 * Finish IO for one ordered extent across a given range. The range can only
517 * contain one ordered extent.
519 * @cached: The cached ordered extent. If not NULL, we can skip the tree
520 * search and use the ordered extent directly.
521 * Will be also used to store the finished ordered extent.
522 * @file_offset: File offset for the finished IO
523 * @io_size: Length of the finish IO range
525 * Return true if the ordered extent is finished in the range, and update
527 * Return false otherwise.
529 * NOTE: The range can NOT cross multiple ordered extents.
530 * Thus caller should ensure the range doesn't cross ordered extents.
532 bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
533 struct btrfs_ordered_extent **cached,
534 u64 file_offset, u64 io_size)
536 struct rb_node *node;
537 struct btrfs_ordered_extent *entry = NULL;
539 bool finished = false;
541 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
542 if (cached && *cached) {
547 node = ordered_tree_search(inode, file_offset);
551 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
553 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
556 if (io_size > entry->bytes_left)
557 btrfs_crit(inode->root->fs_info,
558 "bad ordered accounting left %llu size %llu",
559 entry->bytes_left, io_size);
561 entry->bytes_left -= io_size;
563 if (entry->bytes_left == 0) {
565 * Ensure only one caller can set the flag and finished_ret
568 finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
569 /* test_and_set_bit implies a barrier */
570 cond_wake_up_nomb(&entry->wait);
573 if (finished && cached && entry) {
575 refcount_inc(&entry->refs);
576 trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
578 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
583 * used to drop a reference on an ordered extent. This will free
584 * the extent if the last reference is dropped
586 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
588 struct list_head *cur;
589 struct btrfs_ordered_sum *sum;
591 trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
593 if (refcount_dec_and_test(&entry->refs)) {
594 ASSERT(list_empty(&entry->root_extent_list));
595 ASSERT(list_empty(&entry->log_list));
596 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
598 btrfs_add_delayed_iput(BTRFS_I(entry->inode));
599 while (!list_empty(&entry->list)) {
600 cur = entry->list.next;
601 sum = list_entry(cur, struct btrfs_ordered_sum, list);
602 list_del(&sum->list);
605 kmem_cache_free(btrfs_ordered_extent_cache, entry);
610 * remove an ordered extent from the tree. No references are dropped
611 * and waiters are woken up.
613 void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
614 struct btrfs_ordered_extent *entry)
616 struct btrfs_root *root = btrfs_inode->root;
617 struct btrfs_fs_info *fs_info = root->fs_info;
618 struct rb_node *node;
620 bool freespace_inode;
623 * If this is a free space inode the thread has not acquired the ordered
624 * extents lockdep map.
626 freespace_inode = btrfs_is_free_space_inode(btrfs_inode);
628 btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered);
629 /* This is paired with btrfs_alloc_ordered_extent. */
630 spin_lock(&btrfs_inode->lock);
631 btrfs_mod_outstanding_extents(btrfs_inode, -1);
632 spin_unlock(&btrfs_inode->lock);
633 if (root != fs_info->tree_root) {
636 if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags))
637 release = entry->disk_num_bytes;
639 release = entry->num_bytes;
640 btrfs_delalloc_release_metadata(btrfs_inode, release,
641 test_bit(BTRFS_ORDERED_IOERR,
645 percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
646 fs_info->delalloc_batch);
648 spin_lock_irq(&btrfs_inode->ordered_tree_lock);
649 node = &entry->rb_node;
650 rb_erase(node, &btrfs_inode->ordered_tree);
652 if (btrfs_inode->ordered_tree_last == node)
653 btrfs_inode->ordered_tree_last = NULL;
654 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
655 pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
656 spin_unlock_irq(&btrfs_inode->ordered_tree_lock);
659 * The current running transaction is waiting on us, we need to let it
660 * know that we're complete and wake it up.
663 struct btrfs_transaction *trans;
666 * The checks for trans are just a formality, it should be set,
667 * but if it isn't we don't want to deref/assert under the spin
668 * lock, so be nice and check if trans is set, but ASSERT() so
669 * if it isn't set a developer will notice.
671 spin_lock(&fs_info->trans_lock);
672 trans = fs_info->running_transaction;
674 refcount_inc(&trans->use_count);
675 spin_unlock(&fs_info->trans_lock);
677 ASSERT(trans || BTRFS_FS_ERROR(fs_info));
679 if (atomic_dec_and_test(&trans->pending_ordered))
680 wake_up(&trans->pending_wait);
681 btrfs_put_transaction(trans);
685 btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered);
687 spin_lock(&root->ordered_extent_lock);
688 list_del_init(&entry->root_extent_list);
689 root->nr_ordered_extents--;
691 trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
693 if (!root->nr_ordered_extents) {
694 spin_lock(&fs_info->ordered_root_lock);
695 BUG_ON(list_empty(&root->ordered_root));
696 list_del_init(&root->ordered_root);
697 spin_unlock(&fs_info->ordered_root_lock);
699 spin_unlock(&root->ordered_extent_lock);
700 wake_up(&entry->wait);
701 if (!freespace_inode)
702 btrfs_lockdep_release(fs_info, btrfs_ordered_extent);
705 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
707 struct btrfs_ordered_extent *ordered;
709 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
710 btrfs_start_ordered_extent(ordered);
711 complete(&ordered->completion);
715 * wait for all the ordered extents in a root. This is done when balancing
716 * space between drives.
718 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
719 const u64 range_start, const u64 range_len)
721 struct btrfs_fs_info *fs_info = root->fs_info;
725 struct btrfs_ordered_extent *ordered, *next;
727 const u64 range_end = range_start + range_len;
729 mutex_lock(&root->ordered_extent_mutex);
730 spin_lock(&root->ordered_extent_lock);
731 list_splice_init(&root->ordered_extents, &splice);
732 while (!list_empty(&splice) && nr) {
733 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
736 if (range_end <= ordered->disk_bytenr ||
737 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
738 list_move_tail(&ordered->root_extent_list, &skipped);
739 cond_resched_lock(&root->ordered_extent_lock);
743 list_move_tail(&ordered->root_extent_list,
744 &root->ordered_extents);
745 refcount_inc(&ordered->refs);
746 spin_unlock(&root->ordered_extent_lock);
748 btrfs_init_work(&ordered->flush_work,
749 btrfs_run_ordered_extent_work, NULL);
750 list_add_tail(&ordered->work_list, &works);
751 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
754 spin_lock(&root->ordered_extent_lock);
759 list_splice_tail(&skipped, &root->ordered_extents);
760 list_splice_tail(&splice, &root->ordered_extents);
761 spin_unlock(&root->ordered_extent_lock);
763 list_for_each_entry_safe(ordered, next, &works, work_list) {
764 list_del_init(&ordered->work_list);
765 wait_for_completion(&ordered->completion);
766 btrfs_put_ordered_extent(ordered);
769 mutex_unlock(&root->ordered_extent_mutex);
774 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
775 const u64 range_start, const u64 range_len)
777 struct btrfs_root *root;
781 mutex_lock(&fs_info->ordered_operations_mutex);
782 spin_lock(&fs_info->ordered_root_lock);
783 list_splice_init(&fs_info->ordered_roots, &splice);
784 while (!list_empty(&splice) && nr) {
785 root = list_first_entry(&splice, struct btrfs_root,
787 root = btrfs_grab_root(root);
789 list_move_tail(&root->ordered_root,
790 &fs_info->ordered_roots);
791 spin_unlock(&fs_info->ordered_root_lock);
793 done = btrfs_wait_ordered_extents(root, nr,
794 range_start, range_len);
795 btrfs_put_root(root);
797 spin_lock(&fs_info->ordered_root_lock);
802 list_splice_tail(&splice, &fs_info->ordered_roots);
803 spin_unlock(&fs_info->ordered_root_lock);
804 mutex_unlock(&fs_info->ordered_operations_mutex);
808 * Start IO and wait for a given ordered extent to finish.
810 * Wait on page writeback for all the pages in the extent and the IO completion
811 * code to insert metadata into the btree corresponding to the extent.
813 void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry)
815 u64 start = entry->file_offset;
816 u64 end = start + entry->num_bytes - 1;
817 struct btrfs_inode *inode = BTRFS_I(entry->inode);
818 bool freespace_inode;
820 trace_btrfs_ordered_extent_start(inode, entry);
823 * If this is a free space inode do not take the ordered extents lockdep
826 freespace_inode = btrfs_is_free_space_inode(inode);
829 * pages in the range can be dirty, clean or writeback. We
830 * start IO on any dirty ones so the wait doesn't stall waiting
831 * for the flusher thread to find them
833 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
834 filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
836 if (!freespace_inode)
837 btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent);
838 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags));
842 * Used to wait on ordered extents across a large range of bytes.
844 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
850 struct btrfs_ordered_extent *ordered;
852 if (start + len < start) {
853 orig_end = OFFSET_MAX;
855 orig_end = start + len - 1;
856 if (orig_end > OFFSET_MAX)
857 orig_end = OFFSET_MAX;
860 /* start IO across the range first to instantiate any delalloc
863 ret = btrfs_fdatawrite_range(inode, start, orig_end);
868 * If we have a writeback error don't return immediately. Wait first
869 * for any ordered extents that haven't completed yet. This is to make
870 * sure no one can dirty the same page ranges and call writepages()
871 * before the ordered extents complete - to avoid failures (-EEXIST)
872 * when adding the new ordered extents to the ordered tree.
874 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
878 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
881 if (ordered->file_offset > orig_end) {
882 btrfs_put_ordered_extent(ordered);
885 if (ordered->file_offset + ordered->num_bytes <= start) {
886 btrfs_put_ordered_extent(ordered);
889 btrfs_start_ordered_extent(ordered);
890 end = ordered->file_offset;
892 * If the ordered extent had an error save the error but don't
893 * exit without waiting first for all other ordered extents in
894 * the range to complete.
896 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
898 btrfs_put_ordered_extent(ordered);
899 if (end == 0 || end == start)
903 return ret_wb ? ret_wb : ret;
907 * find an ordered extent corresponding to file_offset. return NULL if
908 * nothing is found, otherwise take a reference on the extent and return it
910 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
913 struct rb_node *node;
914 struct btrfs_ordered_extent *entry = NULL;
917 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
918 node = ordered_tree_search(inode, file_offset);
922 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
923 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
926 refcount_inc(&entry->refs);
927 trace_btrfs_ordered_extent_lookup(inode, entry);
930 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
934 /* Since the DIO code tries to lock a wide area we need to look for any ordered
935 * extents that exist in the range, rather than just the start of the range.
937 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
938 struct btrfs_inode *inode, u64 file_offset, u64 len)
940 struct rb_node *node;
941 struct btrfs_ordered_extent *entry = NULL;
943 spin_lock_irq(&inode->ordered_tree_lock);
944 node = ordered_tree_search(inode, file_offset);
946 node = ordered_tree_search(inode, file_offset + len);
952 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
953 if (range_overlaps(entry, file_offset, len))
956 if (entry->file_offset >= file_offset + len) {
961 node = rb_next(node);
967 refcount_inc(&entry->refs);
968 trace_btrfs_ordered_extent_lookup_range(inode, entry);
970 spin_unlock_irq(&inode->ordered_tree_lock);
975 * Adds all ordered extents to the given list. The list ends up sorted by the
976 * file_offset of the ordered extents.
978 void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
979 struct list_head *list)
983 ASSERT(inode_is_locked(&inode->vfs_inode));
985 spin_lock_irq(&inode->ordered_tree_lock);
986 for (n = rb_first(&inode->ordered_tree); n; n = rb_next(n)) {
987 struct btrfs_ordered_extent *ordered;
989 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
991 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
994 ASSERT(list_empty(&ordered->log_list));
995 list_add_tail(&ordered->log_list, list);
996 refcount_inc(&ordered->refs);
997 trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
999 spin_unlock_irq(&inode->ordered_tree_lock);
1003 * lookup and return any extent before 'file_offset'. NULL is returned
1006 struct btrfs_ordered_extent *
1007 btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
1009 struct rb_node *node;
1010 struct btrfs_ordered_extent *entry = NULL;
1012 spin_lock_irq(&inode->ordered_tree_lock);
1013 node = ordered_tree_search(inode, file_offset);
1017 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
1018 refcount_inc(&entry->refs);
1019 trace_btrfs_ordered_extent_lookup_first(inode, entry);
1021 spin_unlock_irq(&inode->ordered_tree_lock);
1026 * Lookup the first ordered extent that overlaps the range
1027 * [@file_offset, @file_offset + @len).
1029 * The difference between this and btrfs_lookup_first_ordered_extent() is
1030 * that this one won't return any ordered extent that does not overlap the range.
1031 * And the difference against btrfs_lookup_ordered_extent() is, this function
1032 * ensures the first ordered extent gets returned.
1034 struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
1035 struct btrfs_inode *inode, u64 file_offset, u64 len)
1037 struct rb_node *node;
1038 struct rb_node *cur;
1039 struct rb_node *prev;
1040 struct rb_node *next;
1041 struct btrfs_ordered_extent *entry = NULL;
1043 spin_lock_irq(&inode->ordered_tree_lock);
1044 node = inode->ordered_tree.rb_node;
1046 * Here we don't want to use tree_search() which will use tree->last
1047 * and screw up the search order.
1048 * And __tree_search() can't return the adjacent ordered extents
1049 * either, thus here we do our own search.
1052 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
1054 if (file_offset < entry->file_offset) {
1055 node = node->rb_left;
1056 } else if (file_offset >= entry_end(entry)) {
1057 node = node->rb_right;
1060 * Direct hit, got an ordered extent that starts at
1071 cur = &entry->rb_node;
1072 /* We got an entry around @file_offset, check adjacent entries */
1073 if (entry->file_offset < file_offset) {
1075 next = rb_next(cur);
1077 prev = rb_prev(cur);
1081 entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
1082 if (range_overlaps(entry, file_offset, len))
1086 entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
1087 if (range_overlaps(entry, file_offset, len))
1090 /* No ordered extent in the range */
1094 refcount_inc(&entry->refs);
1095 trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
1098 spin_unlock_irq(&inode->ordered_tree_lock);
1103 * Lock the passed range and ensures all pending ordered extents in it are run
1106 * @inode: Inode whose ordered tree is to be searched
1107 * @start: Beginning of range to flush
1108 * @end: Last byte of range to lock
1109 * @cached_state: If passed, will return the extent state responsible for the
1110 * locked range. It's the caller's responsibility to free the
1113 * Always return with the given range locked, ensuring after it's called no
1114 * order extent can be pending.
1116 void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
1118 struct extent_state **cached_state)
1120 struct btrfs_ordered_extent *ordered;
1121 struct extent_state *cache = NULL;
1122 struct extent_state **cachedp = &cache;
1125 cachedp = cached_state;
1128 lock_extent(&inode->io_tree, start, end, cachedp);
1129 ordered = btrfs_lookup_ordered_range(inode, start,
1133 * If no external cached_state has been passed then
1134 * decrement the extra ref taken for cachedp since we
1135 * aren't exposing it outside of this function
1138 refcount_dec(&cache->refs);
1141 unlock_extent(&inode->io_tree, start, end, cachedp);
1142 btrfs_start_ordered_extent(ordered);
1143 btrfs_put_ordered_extent(ordered);
1148 * Lock the passed range and ensure all pending ordered extents in it are run
1149 * to completion in nowait mode.
1151 * Return true if btrfs_lock_ordered_range does not return any extents,
1154 bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
1155 struct extent_state **cached_state)
1157 struct btrfs_ordered_extent *ordered;
1159 if (!try_lock_extent(&inode->io_tree, start, end, cached_state))
1162 ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
1166 btrfs_put_ordered_extent(ordered);
1167 unlock_extent(&inode->io_tree, start, end, cached_state);
1172 /* Split out a new ordered extent for this first @len bytes of @ordered. */
1173 struct btrfs_ordered_extent *btrfs_split_ordered_extent(
1174 struct btrfs_ordered_extent *ordered, u64 len)
1176 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
1177 struct btrfs_root *root = inode->root;
1178 struct btrfs_fs_info *fs_info = root->fs_info;
1179 u64 file_offset = ordered->file_offset;
1180 u64 disk_bytenr = ordered->disk_bytenr;
1181 unsigned long flags = ordered->flags;
1182 struct btrfs_ordered_sum *sum, *tmpsum;
1183 struct btrfs_ordered_extent *new;
1184 struct rb_node *node;
1187 trace_btrfs_ordered_extent_split(inode, ordered);
1189 ASSERT(!(flags & (1U << BTRFS_ORDERED_COMPRESSED)));
1192 * The entire bio must be covered by the ordered extent, but we can't
1193 * reduce the original extent to a zero length either.
1195 if (WARN_ON_ONCE(len >= ordered->num_bytes))
1196 return ERR_PTR(-EINVAL);
1197 /* We cannot split partially completed ordered extents. */
1198 if (ordered->bytes_left) {
1199 ASSERT(!(flags & ~BTRFS_ORDERED_TYPE_FLAGS));
1200 if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes))
1201 return ERR_PTR(-EINVAL);
1203 /* We cannot split a compressed ordered extent. */
1204 if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes))
1205 return ERR_PTR(-EINVAL);
1207 new = alloc_ordered_extent(inode, file_offset, len, len, disk_bytenr,
1208 len, 0, flags, ordered->compress_type);
1212 /* One ref for the tree. */
1213 refcount_inc(&new->refs);
1215 spin_lock_irq(&root->ordered_extent_lock);
1216 spin_lock(&inode->ordered_tree_lock);
1217 /* Remove from tree once */
1218 node = &ordered->rb_node;
1219 rb_erase(node, &inode->ordered_tree);
1220 RB_CLEAR_NODE(node);
1221 if (inode->ordered_tree_last == node)
1222 inode->ordered_tree_last = NULL;
1224 ordered->file_offset += len;
1225 ordered->disk_bytenr += len;
1226 ordered->num_bytes -= len;
1227 ordered->disk_num_bytes -= len;
1228 ordered->ram_bytes -= len;
1230 if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) {
1231 ASSERT(ordered->bytes_left == 0);
1232 new->bytes_left = 0;
1234 ordered->bytes_left -= len;
1237 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) {
1238 if (ordered->truncated_len > len) {
1239 ordered->truncated_len -= len;
1241 new->truncated_len = ordered->truncated_len;
1242 ordered->truncated_len = 0;
1246 list_for_each_entry_safe(sum, tmpsum, &ordered->list, list) {
1249 list_move_tail(&sum->list, &new->list);
1253 /* Re-insert the node */
1254 node = tree_insert(&inode->ordered_tree, ordered->file_offset,
1257 btrfs_panic(fs_info, -EEXIST,
1258 "zoned: inconsistency in ordered tree at offset %llu",
1259 ordered->file_offset);
1261 node = tree_insert(&inode->ordered_tree, new->file_offset, &new->rb_node);
1263 btrfs_panic(fs_info, -EEXIST,
1264 "zoned: inconsistency in ordered tree at offset %llu",
1266 spin_unlock(&inode->ordered_tree_lock);
1268 list_add_tail(&new->root_extent_list, &root->ordered_extents);
1269 root->nr_ordered_extents++;
1270 spin_unlock_irq(&root->ordered_extent_lock);
1274 int __init ordered_data_init(void)
1276 btrfs_ordered_extent_cache = KMEM_CACHE(btrfs_ordered_extent, 0);
1277 if (!btrfs_ordered_extent_cache)
1283 void __cold ordered_data_exit(void)
1285 kmem_cache_destroy(btrfs_ordered_extent_cache);