1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
7 #include <linux/slab.h>
8 #include <linux/pagemap.h>
9 #include <linux/highmem.h>
10 #include <linux/sched/mm.h>
11 #include <crypto/hash.h>
15 #include "transaction.h"
17 #include "print-tree.h"
18 #include "compression.h"
20 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
21 sizeof(struct btrfs_item) * 2) / \
24 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
28 * Set inode's size according to filesystem options
30 * @inode: inode we want to update the disk_i_size for
31 * @new_i_size: i_size we want to set to, 0 if we use i_size
33 * With NO_HOLES set this simply sets the disk_is_size to whatever i_size_read()
34 * returns as it is perfectly fine with a file that has holes without hole file
37 * However without NO_HOLES we need to only return the area that is contiguous
38 * from the 0 offset of the file. Otherwise we could end up adjust i_size up
39 * to an extent that has a gap in between.
41 * Finally new_i_size should only be set in the case of truncate where we're not
42 * ready to use i_size_read() as the limiter yet.
44 void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_size)
46 struct btrfs_fs_info *fs_info = inode->root->fs_info;
47 u64 start, end, i_size;
50 i_size = new_i_size ?: i_size_read(&inode->vfs_inode);
51 if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
52 inode->disk_i_size = i_size;
56 spin_lock(&inode->lock);
57 ret = find_contiguous_extent_bit(&inode->file_extent_tree, 0, &start,
59 if (!ret && start == 0)
60 i_size = min(i_size, end + 1);
63 inode->disk_i_size = i_size;
64 spin_unlock(&inode->lock);
68 * Mark range within a file as having a new extent inserted
70 * @inode: inode being modified
71 * @start: start file offset of the file extent we've inserted
72 * @len: logical length of the file extent item
74 * Call when we are inserting a new file extent where there was none before.
75 * Does not need to call this in the case where we're replacing an existing file
76 * extent, however if not sure it's fine to call this multiple times.
78 * The start and len must match the file extent item, so thus must be sectorsize
81 int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start,
87 ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize));
89 if (btrfs_fs_incompat(inode->root->fs_info, NO_HOLES))
91 return set_extent_bits(&inode->file_extent_tree, start, start + len - 1,
96 * Marks an inode range as not having a backing extent
98 * @inode: inode being modified
99 * @start: start file offset of the file extent we've inserted
100 * @len: logical length of the file extent item
102 * Called when we drop a file extent, for example when we truncate. Doesn't
103 * need to be called for cases where we're replacing a file extent, like when
104 * we've COWed a file extent.
106 * The start and len must match the file extent item, so thus must be sectorsize
109 int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start,
115 ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize) ||
118 if (btrfs_fs_incompat(inode->root->fs_info, NO_HOLES))
120 return clear_extent_bit(&inode->file_extent_tree, start,
121 start + len - 1, EXTENT_DIRTY, 0, 0, NULL);
124 static inline u32 max_ordered_sum_bytes(struct btrfs_fs_info *fs_info,
127 u32 ncsums = (PAGE_SIZE - sizeof(struct btrfs_ordered_sum)) / csum_size;
129 return ncsums * fs_info->sectorsize;
132 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
133 struct btrfs_root *root,
134 u64 objectid, u64 pos,
135 u64 disk_offset, u64 disk_num_bytes,
136 u64 num_bytes, u64 offset, u64 ram_bytes,
137 u8 compression, u8 encryption, u16 other_encoding)
140 struct btrfs_file_extent_item *item;
141 struct btrfs_key file_key;
142 struct btrfs_path *path;
143 struct extent_buffer *leaf;
145 path = btrfs_alloc_path();
148 file_key.objectid = objectid;
149 file_key.offset = pos;
150 file_key.type = BTRFS_EXTENT_DATA_KEY;
152 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
156 BUG_ON(ret); /* Can't happen */
157 leaf = path->nodes[0];
158 item = btrfs_item_ptr(leaf, path->slots[0],
159 struct btrfs_file_extent_item);
160 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
161 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
162 btrfs_set_file_extent_offset(leaf, item, offset);
163 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
164 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
165 btrfs_set_file_extent_generation(leaf, item, trans->transid);
166 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
167 btrfs_set_file_extent_compression(leaf, item, compression);
168 btrfs_set_file_extent_encryption(leaf, item, encryption);
169 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
171 btrfs_mark_buffer_dirty(leaf);
173 btrfs_free_path(path);
177 static struct btrfs_csum_item *
178 btrfs_lookup_csum(struct btrfs_trans_handle *trans,
179 struct btrfs_root *root,
180 struct btrfs_path *path,
183 struct btrfs_fs_info *fs_info = root->fs_info;
185 struct btrfs_key file_key;
186 struct btrfs_key found_key;
187 struct btrfs_csum_item *item;
188 struct extent_buffer *leaf;
190 const u32 csum_size = fs_info->csum_size;
193 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
194 file_key.offset = bytenr;
195 file_key.type = BTRFS_EXTENT_CSUM_KEY;
196 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
199 leaf = path->nodes[0];
202 if (path->slots[0] == 0)
205 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
206 if (found_key.type != BTRFS_EXTENT_CSUM_KEY)
209 csum_offset = (bytenr - found_key.offset) >>
210 fs_info->sectorsize_bits;
211 csums_in_item = btrfs_item_size(leaf, path->slots[0]);
212 csums_in_item /= csum_size;
214 if (csum_offset == csums_in_item) {
217 } else if (csum_offset > csums_in_item) {
221 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
222 item = (struct btrfs_csum_item *)((unsigned char *)item +
223 csum_offset * csum_size);
231 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
232 struct btrfs_root *root,
233 struct btrfs_path *path, u64 objectid,
236 struct btrfs_key file_key;
237 int ins_len = mod < 0 ? -1 : 0;
240 file_key.objectid = objectid;
241 file_key.offset = offset;
242 file_key.type = BTRFS_EXTENT_DATA_KEY;
244 return btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
248 * Find checksums for logical bytenr range [disk_bytenr, disk_bytenr + len) and
249 * estore the result to @dst.
251 * Return >0 for the number of sectors we found.
252 * Return 0 for the range [disk_bytenr, disk_bytenr + sectorsize) has no csum
253 * for it. Caller may want to try next sector until one range is hit.
254 * Return <0 for fatal error.
256 static int search_csum_tree(struct btrfs_fs_info *fs_info,
257 struct btrfs_path *path, u64 disk_bytenr,
260 struct btrfs_root *csum_root;
261 struct btrfs_csum_item *item = NULL;
262 struct btrfs_key key;
263 const u32 sectorsize = fs_info->sectorsize;
264 const u32 csum_size = fs_info->csum_size;
270 ASSERT(IS_ALIGNED(disk_bytenr, sectorsize) &&
271 IS_ALIGNED(len, sectorsize));
273 /* Check if the current csum item covers disk_bytenr */
274 if (path->nodes[0]) {
275 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
276 struct btrfs_csum_item);
277 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
278 itemsize = btrfs_item_size(path->nodes[0], path->slots[0]);
280 csum_start = key.offset;
281 csum_len = (itemsize / csum_size) * sectorsize;
283 if (in_range(disk_bytenr, csum_start, csum_len))
287 /* Current item doesn't contain the desired range, search again */
288 btrfs_release_path(path);
289 csum_root = btrfs_csum_root(fs_info, disk_bytenr);
290 item = btrfs_lookup_csum(NULL, csum_root, path, disk_bytenr, 0);
295 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
296 itemsize = btrfs_item_size(path->nodes[0], path->slots[0]);
298 csum_start = key.offset;
299 csum_len = (itemsize / csum_size) * sectorsize;
300 ASSERT(in_range(disk_bytenr, csum_start, csum_len));
303 ret = (min(csum_start + csum_len, disk_bytenr + len) -
304 disk_bytenr) >> fs_info->sectorsize_bits;
305 read_extent_buffer(path->nodes[0], dst, (unsigned long)item,
314 * Locate the file_offset of @cur_disk_bytenr of a @bio.
316 * Bio of btrfs represents read range of
317 * [bi_sector << 9, bi_sector << 9 + bi_size).
318 * Knowing this, we can iterate through each bvec to locate the page belong to
319 * @cur_disk_bytenr and get the file offset.
321 * @inode is used to determine if the bvec page really belongs to @inode.
323 * Return 0 if we can't find the file offset
324 * Return >0 if we find the file offset and restore it to @file_offset_ret
326 static int search_file_offset_in_bio(struct bio *bio, struct inode *inode,
327 u64 disk_bytenr, u64 *file_offset_ret)
329 struct bvec_iter iter;
331 u64 cur = bio->bi_iter.bi_sector << SECTOR_SHIFT;
334 bio_for_each_segment(bvec, bio, iter) {
335 struct page *page = bvec.bv_page;
337 if (cur > disk_bytenr)
339 if (cur + bvec.bv_len <= disk_bytenr) {
343 ASSERT(in_range(disk_bytenr, cur, bvec.bv_len));
344 if (page->mapping && page->mapping->host &&
345 page->mapping->host == inode) {
347 *file_offset_ret = page_offset(page) + bvec.bv_offset +
356 * Lookup the checksum for the read bio in csum tree.
358 * @inode: inode that the bio is for.
359 * @bio: bio to look up.
360 * @dst: Buffer of size nblocks * btrfs_super_csum_size() used to return
361 * checksum (nblocks = bio->bi_iter.bi_size / fs_info->sectorsize). If
362 * NULL, the checksum buffer is allocated and returned in
363 * btrfs_bio(bio)->csum instead.
365 * Return: BLK_STS_RESOURCE if allocating memory fails, BLK_STS_OK otherwise.
367 blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst)
369 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
370 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
371 struct btrfs_path *path;
372 const u32 sectorsize = fs_info->sectorsize;
373 const u32 csum_size = fs_info->csum_size;
374 u32 orig_len = bio->bi_iter.bi_size;
375 u64 orig_disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT;
378 const unsigned int nblocks = orig_len >> fs_info->sectorsize_bits;
381 if ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) ||
382 test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state))
386 * This function is only called for read bio.
388 * This means two things:
389 * - All our csums should only be in csum tree
390 * No ordered extents csums, as ordered extents are only for write
392 * - No need to bother any other info from bvec
393 * Since we're looking up csums, the only important info is the
394 * disk_bytenr and the length, which can be extracted from bi_iter
397 ASSERT(bio_op(bio) == REQ_OP_READ);
398 path = btrfs_alloc_path();
400 return BLK_STS_RESOURCE;
403 struct btrfs_bio *bbio = btrfs_bio(bio);
405 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
406 bbio->csum = kmalloc_array(nblocks, csum_size, GFP_NOFS);
408 btrfs_free_path(path);
409 return BLK_STS_RESOURCE;
412 bbio->csum = bbio->csum_inline;
420 * If requested number of sectors is larger than one leaf can contain,
421 * kick the readahead for csum tree.
423 if (nblocks > fs_info->csums_per_leaf)
424 path->reada = READA_FORWARD;
427 * the free space stuff is only read when it hasn't been
428 * updated in the current transaction. So, we can safely
429 * read from the commit root and sidestep a nasty deadlock
430 * between reading the free space cache and updating the csum tree.
432 if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
433 path->search_commit_root = 1;
434 path->skip_locking = 1;
437 for (cur_disk_bytenr = orig_disk_bytenr;
438 cur_disk_bytenr < orig_disk_bytenr + orig_len;
439 cur_disk_bytenr += (count * sectorsize)) {
440 u64 search_len = orig_disk_bytenr + orig_len - cur_disk_bytenr;
441 unsigned int sector_offset;
445 * Although both cur_disk_bytenr and orig_disk_bytenr is u64,
446 * we're calculating the offset to the bio start.
448 * Bio size is limited to UINT_MAX, thus unsigned int is large
449 * enough to contain the raw result, not to mention the right
452 ASSERT(cur_disk_bytenr - orig_disk_bytenr < UINT_MAX);
453 sector_offset = (cur_disk_bytenr - orig_disk_bytenr) >>
454 fs_info->sectorsize_bits;
455 csum_dst = csum + sector_offset * csum_size;
457 count = search_csum_tree(fs_info, path, cur_disk_bytenr,
458 search_len, csum_dst);
461 * Either we hit a critical error or we didn't find
463 * Either way, we put zero into the csums dst, and skip
464 * to the next sector.
466 memset(csum_dst, 0, csum_size);
470 * For data reloc inode, we need to mark the range
471 * NODATASUM so that balance won't report false csum
474 if (BTRFS_I(inode)->root->root_key.objectid ==
475 BTRFS_DATA_RELOC_TREE_OBJECTID) {
479 ret = search_file_offset_in_bio(bio, inode,
480 cur_disk_bytenr, &file_offset);
482 set_extent_bits(io_tree, file_offset,
483 file_offset + sectorsize - 1,
486 btrfs_warn_rl(fs_info,
487 "csum hole found for disk bytenr range [%llu, %llu)",
488 cur_disk_bytenr, cur_disk_bytenr + sectorsize);
493 btrfs_free_path(path);
497 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
498 struct list_head *list, int search_commit)
500 struct btrfs_fs_info *fs_info = root->fs_info;
501 struct btrfs_key key;
502 struct btrfs_path *path;
503 struct extent_buffer *leaf;
504 struct btrfs_ordered_sum *sums;
505 struct btrfs_csum_item *item;
507 unsigned long offset;
511 const u32 csum_size = fs_info->csum_size;
513 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
514 IS_ALIGNED(end + 1, fs_info->sectorsize));
516 path = btrfs_alloc_path();
521 path->skip_locking = 1;
522 path->reada = READA_FORWARD;
523 path->search_commit_root = 1;
526 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
528 key.type = BTRFS_EXTENT_CSUM_KEY;
530 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
533 if (ret > 0 && path->slots[0] > 0) {
534 leaf = path->nodes[0];
535 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
536 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
537 key.type == BTRFS_EXTENT_CSUM_KEY) {
538 offset = (start - key.offset) >> fs_info->sectorsize_bits;
539 if (offset * csum_size <
540 btrfs_item_size(leaf, path->slots[0] - 1))
545 while (start <= end) {
546 leaf = path->nodes[0];
547 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
548 ret = btrfs_next_leaf(root, path);
553 leaf = path->nodes[0];
556 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
557 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
558 key.type != BTRFS_EXTENT_CSUM_KEY ||
562 if (key.offset > start)
565 size = btrfs_item_size(leaf, path->slots[0]);
566 csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
567 if (csum_end <= start) {
572 csum_end = min(csum_end, end + 1);
573 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
574 struct btrfs_csum_item);
575 while (start < csum_end) {
576 size = min_t(size_t, csum_end - start,
577 max_ordered_sum_bytes(fs_info, csum_size));
578 sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
585 sums->bytenr = start;
586 sums->len = (int)size;
588 offset = (start - key.offset) >> fs_info->sectorsize_bits;
590 size >>= fs_info->sectorsize_bits;
592 read_extent_buffer(path->nodes[0],
594 ((unsigned long)item) + offset,
597 start += fs_info->sectorsize * size;
598 list_add_tail(&sums->list, &tmplist);
604 while (ret < 0 && !list_empty(&tmplist)) {
605 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
606 list_del(&sums->list);
609 list_splice_tail(&tmplist, list);
611 btrfs_free_path(path);
616 * btrfs_csum_one_bio - Calculates checksums of the data contained inside a bio
617 * @inode: Owner of the data inside the bio
618 * @bio: Contains the data to be checksummed
619 * @file_start: offset in file this bio begins to describe
620 * @contig: Boolean. If true/1 means all bio vecs in this bio are
621 * contiguous and they begin at @file_start in the file. False/0
622 * means this bio can contain potentially discontiguous bio vecs
623 * so the logical offset of each should be calculated separately.
625 blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
626 u64 file_start, int contig)
628 struct btrfs_fs_info *fs_info = inode->root->fs_info;
629 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
630 struct btrfs_ordered_sum *sums;
631 struct btrfs_ordered_extent *ordered = NULL;
633 struct bvec_iter iter;
637 unsigned long total_bytes = 0;
638 unsigned long this_sum_bytes = 0;
643 nofs_flag = memalloc_nofs_save();
644 sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
646 memalloc_nofs_restore(nofs_flag);
649 return BLK_STS_RESOURCE;
651 sums->len = bio->bi_iter.bi_size;
652 INIT_LIST_HEAD(&sums->list);
657 offset = 0; /* shut up gcc */
659 sums->bytenr = bio->bi_iter.bi_sector << 9;
662 shash->tfm = fs_info->csum_shash;
664 bio_for_each_segment(bvec, bio, iter) {
666 offset = page_offset(bvec.bv_page) + bvec.bv_offset;
669 ordered = btrfs_lookup_ordered_extent(inode, offset);
671 * The bio range is not covered by any ordered extent,
672 * must be a code logic error.
674 if (unlikely(!ordered)) {
676 "no ordered extent for root %llu ino %llu offset %llu\n",
677 inode->root->root_key.objectid,
678 btrfs_ino(inode), offset);
680 return BLK_STS_IOERR;
684 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
685 bvec.bv_len + fs_info->sectorsize
688 for (i = 0; i < nr_sectors; i++) {
689 if (offset >= ordered->file_offset + ordered->num_bytes ||
690 offset < ordered->file_offset) {
691 unsigned long bytes_left;
693 sums->len = this_sum_bytes;
695 btrfs_add_ordered_sum(ordered, sums);
696 btrfs_put_ordered_extent(ordered);
698 bytes_left = bio->bi_iter.bi_size - total_bytes;
700 nofs_flag = memalloc_nofs_save();
701 sums = kvzalloc(btrfs_ordered_sum_size(fs_info,
702 bytes_left), GFP_KERNEL);
703 memalloc_nofs_restore(nofs_flag);
704 BUG_ON(!sums); /* -ENOMEM */
705 sums->len = bytes_left;
706 ordered = btrfs_lookup_ordered_extent(inode,
708 ASSERT(ordered); /* Logic error */
709 sums->bytenr = (bio->bi_iter.bi_sector << 9)
714 data = bvec_kmap_local(&bvec);
715 crypto_shash_digest(shash,
716 data + (i * fs_info->sectorsize),
720 index += fs_info->csum_size;
721 offset += fs_info->sectorsize;
722 this_sum_bytes += fs_info->sectorsize;
723 total_bytes += fs_info->sectorsize;
728 btrfs_add_ordered_sum(ordered, sums);
729 btrfs_put_ordered_extent(ordered);
734 * helper function for csum removal, this expects the
735 * key to describe the csum pointed to by the path, and it expects
736 * the csum to overlap the range [bytenr, len]
738 * The csum should not be entirely contained in the range and the
739 * range should not be entirely contained in the csum.
741 * This calls btrfs_truncate_item with the correct args based on the
742 * overlap, and fixes up the key as required.
744 static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
745 struct btrfs_path *path,
746 struct btrfs_key *key,
749 struct extent_buffer *leaf;
750 const u32 csum_size = fs_info->csum_size;
752 u64 end_byte = bytenr + len;
753 u32 blocksize_bits = fs_info->sectorsize_bits;
755 leaf = path->nodes[0];
756 csum_end = btrfs_item_size(leaf, path->slots[0]) / csum_size;
757 csum_end <<= blocksize_bits;
758 csum_end += key->offset;
760 if (key->offset < bytenr && csum_end <= end_byte) {
765 * A simple truncate off the end of the item
767 u32 new_size = (bytenr - key->offset) >> blocksize_bits;
768 new_size *= csum_size;
769 btrfs_truncate_item(path, new_size, 1);
770 } else if (key->offset >= bytenr && csum_end > end_byte &&
771 end_byte > key->offset) {
776 * we need to truncate from the beginning of the csum
778 u32 new_size = (csum_end - end_byte) >> blocksize_bits;
779 new_size *= csum_size;
781 btrfs_truncate_item(path, new_size, 0);
783 key->offset = end_byte;
784 btrfs_set_item_key_safe(fs_info, path, key);
791 * deletes the csum items from the csum tree for a given
794 int btrfs_del_csums(struct btrfs_trans_handle *trans,
795 struct btrfs_root *root, u64 bytenr, u64 len)
797 struct btrfs_fs_info *fs_info = trans->fs_info;
798 struct btrfs_path *path;
799 struct btrfs_key key;
800 u64 end_byte = bytenr + len;
802 struct extent_buffer *leaf;
804 const u32 csum_size = fs_info->csum_size;
805 u32 blocksize_bits = fs_info->sectorsize_bits;
807 ASSERT(root->root_key.objectid == BTRFS_CSUM_TREE_OBJECTID ||
808 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
810 path = btrfs_alloc_path();
815 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
816 key.offset = end_byte - 1;
817 key.type = BTRFS_EXTENT_CSUM_KEY;
819 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
822 if (path->slots[0] == 0)
825 } else if (ret < 0) {
829 leaf = path->nodes[0];
830 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
832 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
833 key.type != BTRFS_EXTENT_CSUM_KEY) {
837 if (key.offset >= end_byte)
840 csum_end = btrfs_item_size(leaf, path->slots[0]) / csum_size;
841 csum_end <<= blocksize_bits;
842 csum_end += key.offset;
844 /* this csum ends before we start, we're done */
845 if (csum_end <= bytenr)
848 /* delete the entire item, it is inside our range */
849 if (key.offset >= bytenr && csum_end <= end_byte) {
853 * Check how many csum items preceding this one in this
854 * leaf correspond to our range and then delete them all
857 if (key.offset > bytenr && path->slots[0] > 0) {
858 int slot = path->slots[0] - 1;
863 btrfs_item_key_to_cpu(leaf, &pk, slot);
864 if (pk.offset < bytenr ||
865 pk.type != BTRFS_EXTENT_CSUM_KEY ||
867 BTRFS_EXTENT_CSUM_OBJECTID)
869 path->slots[0] = slot;
871 key.offset = pk.offset;
875 ret = btrfs_del_items(trans, root, path,
876 path->slots[0], del_nr);
879 if (key.offset == bytenr)
881 } else if (key.offset < bytenr && csum_end > end_byte) {
882 unsigned long offset;
883 unsigned long shift_len;
884 unsigned long item_offset;
889 * Our bytes are in the middle of the csum,
890 * we need to split this item and insert a new one.
892 * But we can't drop the path because the
893 * csum could change, get removed, extended etc.
895 * The trick here is the max size of a csum item leaves
896 * enough room in the tree block for a single
897 * item header. So, we split the item in place,
898 * adding a new header pointing to the existing
899 * bytes. Then we loop around again and we have
900 * a nicely formed csum item that we can neatly
903 offset = (bytenr - key.offset) >> blocksize_bits;
906 shift_len = (len >> blocksize_bits) * csum_size;
908 item_offset = btrfs_item_ptr_offset(leaf,
911 memzero_extent_buffer(leaf, item_offset + offset,
916 * btrfs_split_item returns -EAGAIN when the
917 * item changed size or key
919 ret = btrfs_split_item(trans, root, path, &key, offset);
920 if (ret && ret != -EAGAIN) {
921 btrfs_abort_transaction(trans, ret);
926 key.offset = end_byte - 1;
928 truncate_one_csum(fs_info, path, &key, bytenr, len);
929 if (key.offset < bytenr)
932 btrfs_release_path(path);
934 btrfs_free_path(path);
938 static int find_next_csum_offset(struct btrfs_root *root,
939 struct btrfs_path *path,
942 const u32 nritems = btrfs_header_nritems(path->nodes[0]);
943 struct btrfs_key found_key;
944 int slot = path->slots[0] + 1;
947 if (nritems == 0 || slot >= nritems) {
948 ret = btrfs_next_leaf(root, path);
951 } else if (ret > 0) {
952 *next_offset = (u64)-1;
955 slot = path->slots[0];
958 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
960 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
961 found_key.type != BTRFS_EXTENT_CSUM_KEY)
962 *next_offset = (u64)-1;
964 *next_offset = found_key.offset;
969 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
970 struct btrfs_root *root,
971 struct btrfs_ordered_sum *sums)
973 struct btrfs_fs_info *fs_info = root->fs_info;
974 struct btrfs_key file_key;
975 struct btrfs_key found_key;
976 struct btrfs_path *path;
977 struct btrfs_csum_item *item;
978 struct btrfs_csum_item *item_end;
979 struct extent_buffer *leaf = NULL;
988 const u32 csum_size = fs_info->csum_size;
990 path = btrfs_alloc_path();
994 next_offset = (u64)-1;
996 bytenr = sums->bytenr + total_bytes;
997 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
998 file_key.offset = bytenr;
999 file_key.type = BTRFS_EXTENT_CSUM_KEY;
1001 item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
1002 if (!IS_ERR(item)) {
1004 leaf = path->nodes[0];
1005 item_end = btrfs_item_ptr(leaf, path->slots[0],
1006 struct btrfs_csum_item);
1007 item_end = (struct btrfs_csum_item *)((char *)item_end +
1008 btrfs_item_size(leaf, path->slots[0]));
1011 ret = PTR_ERR(item);
1012 if (ret != -EFBIG && ret != -ENOENT)
1015 if (ret == -EFBIG) {
1017 /* we found one, but it isn't big enough yet */
1018 leaf = path->nodes[0];
1019 item_size = btrfs_item_size(leaf, path->slots[0]);
1020 if ((item_size / csum_size) >=
1021 MAX_CSUM_ITEMS(fs_info, csum_size)) {
1022 /* already at max size, make a new one */
1026 /* We didn't find a csum item, insert one. */
1027 ret = find_next_csum_offset(root, path, &next_offset);
1035 * At this point, we know the tree has a checksum item that ends at an
1036 * offset matching the start of the checksum range we want to insert.
1037 * We try to extend that item as much as possible and then add as many
1038 * checksums to it as they fit.
1040 * First check if the leaf has enough free space for at least one
1041 * checksum. If it has go directly to the item extension code, otherwise
1042 * release the path and do a search for insertion before the extension.
1044 if (btrfs_leaf_free_space(leaf) >= csum_size) {
1045 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1046 csum_offset = (bytenr - found_key.offset) >>
1047 fs_info->sectorsize_bits;
1051 btrfs_release_path(path);
1052 path->search_for_extension = 1;
1053 ret = btrfs_search_slot(trans, root, &file_key, path,
1055 path->search_for_extension = 0;
1060 if (path->slots[0] == 0)
1065 leaf = path->nodes[0];
1066 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1067 csum_offset = (bytenr - found_key.offset) >> fs_info->sectorsize_bits;
1069 if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
1070 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
1071 csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) {
1076 if (csum_offset == btrfs_item_size(leaf, path->slots[0]) /
1082 tmp = sums->len - total_bytes;
1083 tmp >>= fs_info->sectorsize_bits;
1085 extend_nr = max_t(int, 1, tmp);
1088 * A log tree can already have checksum items with a subset of
1089 * the checksums we are trying to log. This can happen after
1090 * doing a sequence of partial writes into prealloc extents and
1091 * fsyncs in between, with a full fsync logging a larger subrange
1092 * of an extent for which a previous fast fsync logged a smaller
1093 * subrange. And this happens in particular due to merging file
1094 * extent items when we complete an ordered extent for a range
1095 * covered by a prealloc extent - this is done at
1096 * btrfs_mark_extent_written().
1098 * So if we try to extend the previous checksum item, which has
1099 * a range that ends at the start of the range we want to insert,
1100 * make sure we don't extend beyond the start offset of the next
1101 * checksum item. If we are at the last item in the leaf, then
1102 * forget the optimization of extending and add a new checksum
1103 * item - it is not worth the complexity of releasing the path,
1104 * getting the first key for the next leaf, repeat the btree
1105 * search, etc, because log trees are temporary anyway and it
1106 * would only save a few bytes of leaf space.
1108 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
1109 if (path->slots[0] + 1 >=
1110 btrfs_header_nritems(path->nodes[0])) {
1111 ret = find_next_csum_offset(root, path, &next_offset);
1118 ret = find_next_csum_offset(root, path, &next_offset);
1122 tmp = (next_offset - bytenr) >> fs_info->sectorsize_bits;
1124 extend_nr = min_t(int, extend_nr, tmp);
1127 diff = (csum_offset + extend_nr) * csum_size;
1129 MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
1131 diff = diff - btrfs_item_size(leaf, path->slots[0]);
1132 diff = min_t(u32, btrfs_leaf_free_space(leaf), diff);
1136 btrfs_extend_item(path, diff);
1142 btrfs_release_path(path);
1147 tmp = sums->len - total_bytes;
1148 tmp >>= fs_info->sectorsize_bits;
1149 tmp = min(tmp, (next_offset - file_key.offset) >>
1150 fs_info->sectorsize_bits);
1152 tmp = max_t(u64, 1, tmp);
1153 tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size));
1154 ins_size = csum_size * tmp;
1156 ins_size = csum_size;
1158 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
1162 if (WARN_ON(ret != 0))
1164 leaf = path->nodes[0];
1166 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
1167 item_end = (struct btrfs_csum_item *)((unsigned char *)item +
1168 btrfs_item_size(leaf, path->slots[0]));
1169 item = (struct btrfs_csum_item *)((unsigned char *)item +
1170 csum_offset * csum_size);
1172 ins_size = (u32)(sums->len - total_bytes) >> fs_info->sectorsize_bits;
1173 ins_size *= csum_size;
1174 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
1176 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item,
1180 ins_size /= csum_size;
1181 total_bytes += ins_size * fs_info->sectorsize;
1183 btrfs_mark_buffer_dirty(path->nodes[0]);
1184 if (total_bytes < sums->len) {
1185 btrfs_release_path(path);
1190 btrfs_free_path(path);
1194 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
1195 const struct btrfs_path *path,
1196 struct btrfs_file_extent_item *fi,
1197 const bool new_inline,
1198 struct extent_map *em)
1200 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1201 struct btrfs_root *root = inode->root;
1202 struct extent_buffer *leaf = path->nodes[0];
1203 const int slot = path->slots[0];
1204 struct btrfs_key key;
1205 u64 extent_start, extent_end;
1207 u8 type = btrfs_file_extent_type(leaf, fi);
1208 int compress_type = btrfs_file_extent_compression(leaf, fi);
1210 btrfs_item_key_to_cpu(leaf, &key, slot);
1211 extent_start = key.offset;
1212 extent_end = btrfs_file_extent_end(path);
1213 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1214 if (type == BTRFS_FILE_EXTENT_REG ||
1215 type == BTRFS_FILE_EXTENT_PREALLOC) {
1216 em->start = extent_start;
1217 em->len = extent_end - extent_start;
1218 em->orig_start = extent_start -
1219 btrfs_file_extent_offset(leaf, fi);
1220 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
1221 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1223 em->block_start = EXTENT_MAP_HOLE;
1226 if (compress_type != BTRFS_COMPRESS_NONE) {
1227 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
1228 em->compress_type = compress_type;
1229 em->block_start = bytenr;
1230 em->block_len = em->orig_block_len;
1232 bytenr += btrfs_file_extent_offset(leaf, fi);
1233 em->block_start = bytenr;
1234 em->block_len = em->len;
1235 if (type == BTRFS_FILE_EXTENT_PREALLOC)
1236 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
1238 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
1239 em->block_start = EXTENT_MAP_INLINE;
1240 em->start = extent_start;
1241 em->len = extent_end - extent_start;
1243 * Initialize orig_start and block_len with the same values
1244 * as in inode.c:btrfs_get_extent().
1246 em->orig_start = EXTENT_MAP_HOLE;
1247 em->block_len = (u64)-1;
1248 if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) {
1249 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
1250 em->compress_type = compress_type;
1254 "unknown file extent item type %d, inode %llu, offset %llu, "
1255 "root %llu", type, btrfs_ino(inode), extent_start,
1256 root->root_key.objectid);
1261 * Returns the end offset (non inclusive) of the file extent item the given path
1262 * points to. If it points to an inline extent, the returned offset is rounded
1263 * up to the sector size.
1265 u64 btrfs_file_extent_end(const struct btrfs_path *path)
1267 const struct extent_buffer *leaf = path->nodes[0];
1268 const int slot = path->slots[0];
1269 struct btrfs_file_extent_item *fi;
1270 struct btrfs_key key;
1273 btrfs_item_key_to_cpu(leaf, &key, slot);
1274 ASSERT(key.type == BTRFS_EXTENT_DATA_KEY);
1275 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1277 if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) {
1278 end = btrfs_file_extent_ram_bytes(leaf, fi);
1279 end = ALIGN(key.offset + end, leaf->fs_info->sectorsize);
1281 end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);