1 // SPDX-License-Identifier: GPL-2.0+
3 * BTRFS filesystem implementation for U-Boot
8 #include <linux/kernel.h>
14 #include "extent-io.h"
17 u64 btrfs_read_extent_inline(struct btrfs_path *path,
18 struct btrfs_file_extent_item *extent, u64 offset,
21 u32 clen, dlen, orig_size = size, res;
24 const int data_off = offsetof(struct btrfs_file_extent_item,
27 clen = btrfs_path_item_size(path) - data_off;
28 cbuf = (const char *) extent + data_off;
29 dlen = extent->ram_bytes;
34 if (size > dlen - offset)
37 if (extent->compression == BTRFS_COMPRESS_NONE) {
38 memcpy(out, cbuf + offset, size);
42 if (dlen > orig_size) {
50 res = btrfs_decompress(extent->compression, cbuf, clen, dbuf, dlen);
51 if (res == -1 || res != dlen)
54 if (dlen > orig_size) {
55 memcpy(out, dbuf + offset, size);
58 memmove(out, dbuf + offset, size);
69 u64 btrfs_read_extent_reg(struct btrfs_path *path,
70 struct btrfs_file_extent_item *extent, u64 offset,
73 u64 physical, clen, dlen, orig_size = size;
77 clen = extent->disk_num_bytes;
78 dlen = extent->num_bytes;
83 if (size > dlen - offset)
87 if (extent->disk_bytenr == 0) {
92 physical = btrfs_map_logical_to_physical(extent->disk_bytenr);
93 if (physical == -1ULL)
96 if (extent->compression == BTRFS_COMPRESS_NONE) {
97 physical += extent->offset + offset;
98 if (!btrfs_devread(physical, size, out))
104 cbuf = malloc_cache_aligned(dlen > size ? clen + dlen : clen);
108 if (dlen > orig_size)
113 if (!btrfs_devread(physical, clen, cbuf))
116 res = btrfs_decompress(extent->compression, cbuf, clen, dbuf, dlen);
120 if (dlen > orig_size)
121 memcpy(out, dbuf + offset, size);
123 memmove(out, dbuf + offset, size);
133 void extent_io_tree_init(struct extent_io_tree *tree)
135 cache_tree_init(&tree->state);
136 cache_tree_init(&tree->cache);
137 tree->cache_size = 0;
140 static struct extent_state *alloc_extent_state(void)
142 struct extent_state *state;
144 state = malloc(sizeof(*state));
147 state->cache_node.objectid = 0;
154 static void btrfs_free_extent_state(struct extent_state *state)
157 BUG_ON(state->refs < 0);
158 if (state->refs == 0)
162 static void free_extent_state_func(struct cache_extent *cache)
164 struct extent_state *es;
166 es = container_of(cache, struct extent_state, cache_node);
167 btrfs_free_extent_state(es);
170 static void free_extent_buffer_final(struct extent_buffer *eb);
171 void extent_io_tree_cleanup(struct extent_io_tree *tree)
173 cache_tree_free_extents(&tree->state, free_extent_state_func);
176 static inline void update_extent_state(struct extent_state *state)
178 state->cache_node.start = state->start;
179 state->cache_node.size = state->end + 1 - state->start;
183 * Utility function to look for merge candidates inside a given range.
184 * Any extents with matching state are merged together into a single
185 * extent in the tree. Extents with EXTENT_IO in their state field are
188 static int merge_state(struct extent_io_tree *tree,
189 struct extent_state *state)
191 struct extent_state *other;
192 struct cache_extent *other_node;
194 if (state->state & EXTENT_IOBITS)
197 other_node = prev_cache_extent(&state->cache_node);
199 other = container_of(other_node, struct extent_state,
201 if (other->end == state->start - 1 &&
202 other->state == state->state) {
203 state->start = other->start;
204 update_extent_state(state);
205 remove_cache_extent(&tree->state, &other->cache_node);
206 btrfs_free_extent_state(other);
209 other_node = next_cache_extent(&state->cache_node);
211 other = container_of(other_node, struct extent_state,
213 if (other->start == state->end + 1 &&
214 other->state == state->state) {
215 other->start = state->start;
216 update_extent_state(other);
217 remove_cache_extent(&tree->state, &state->cache_node);
218 btrfs_free_extent_state(state);
225 * insert an extent_state struct into the tree. 'bits' are set on the
226 * struct before it is inserted.
228 static int insert_state(struct extent_io_tree *tree,
229 struct extent_state *state, u64 start, u64 end,
235 state->state |= bits;
236 state->start = start;
238 update_extent_state(state);
239 ret = insert_cache_extent(&tree->state, &state->cache_node);
241 merge_state(tree, state);
246 * split a given extent state struct in two, inserting the preallocated
247 * struct 'prealloc' as the newly created second half. 'split' indicates an
248 * offset inside 'orig' where it should be split.
250 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
251 struct extent_state *prealloc, u64 split)
254 prealloc->start = orig->start;
255 prealloc->end = split - 1;
256 prealloc->state = orig->state;
257 update_extent_state(prealloc);
259 update_extent_state(orig);
260 ret = insert_cache_extent(&tree->state, &prealloc->cache_node);
266 * clear some bits on a range in the tree.
268 static int clear_state_bit(struct extent_io_tree *tree,
269 struct extent_state *state, int bits)
271 int ret = state->state & bits;
273 state->state &= ~bits;
274 if (state->state == 0) {
275 remove_cache_extent(&tree->state, &state->cache_node);
276 btrfs_free_extent_state(state);
278 merge_state(tree, state);
284 * extent_buffer_bitmap_set - set an area of a bitmap
285 * @eb: the extent buffer
286 * @start: offset of the bitmap item in the extent buffer
287 * @pos: bit number of the first bit
288 * @len: number of bits to set
290 void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
291 unsigned long pos, unsigned long len)
293 u8 *p = (u8 *)eb->data + start + BIT_BYTE(pos);
294 const unsigned int size = pos + len;
295 int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
296 u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
298 while (len >= bits_to_set) {
301 bits_to_set = BITS_PER_BYTE;
306 mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
312 * extent_buffer_bitmap_clear - clear an area of a bitmap
313 * @eb: the extent buffer
314 * @start: offset of the bitmap item in the extent buffer
315 * @pos: bit number of the first bit
316 * @len: number of bits to clear
318 void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
319 unsigned long pos, unsigned long len)
321 u8 *p = (u8 *)eb->data + start + BIT_BYTE(pos);
322 const unsigned int size = pos + len;
323 int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
324 u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
326 while (len >= bits_to_clear) {
327 *p &= ~mask_to_clear;
328 len -= bits_to_clear;
329 bits_to_clear = BITS_PER_BYTE;
334 mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
335 *p &= ~mask_to_clear;
340 * clear some bits on a range in the tree.
342 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits)
344 struct extent_state *state;
345 struct extent_state *prealloc = NULL;
346 struct cache_extent *node;
353 prealloc = alloc_extent_state();
359 * this search will find the extents that end after
362 node = search_cache_extent(&tree->state, start);
365 state = container_of(node, struct extent_state, cache_node);
366 if (state->start > end)
368 last_end = state->end;
371 * | ---- desired range ---- |
373 * | ------------- state -------------- |
375 * We need to split the extent we found, and may flip
376 * bits on second half.
378 * If the extent we found extends past our range, we
379 * just split and search again. It'll get split again
380 * the next time though.
382 * If the extent we found is inside our range, we clear
383 * the desired bit on it.
385 if (state->start < start) {
386 err = split_state(tree, state, prealloc, start);
387 BUG_ON(err == -EEXIST);
391 if (state->end <= end) {
392 set |= clear_state_bit(tree, state, bits);
393 if (last_end == (u64)-1)
395 start = last_end + 1;
397 start = state->start;
402 * | ---- desired range ---- |
404 * We need to split the extent, and clear the bit
407 if (state->start <= end && state->end > end) {
408 err = split_state(tree, state, prealloc, end + 1);
409 BUG_ON(err == -EEXIST);
411 set |= clear_state_bit(tree, prealloc, bits);
416 start = state->end + 1;
417 set |= clear_state_bit(tree, state, bits);
418 if (last_end == (u64)-1)
420 start = last_end + 1;
424 btrfs_free_extent_state(prealloc);
434 * set some bits on a range in the tree.
436 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits)
438 struct extent_state *state;
439 struct extent_state *prealloc = NULL;
440 struct cache_extent *node;
446 prealloc = alloc_extent_state();
452 * this search will find the extents that end after
455 node = search_cache_extent(&tree->state, start);
457 err = insert_state(tree, prealloc, start, end, bits);
458 BUG_ON(err == -EEXIST);
463 state = container_of(node, struct extent_state, cache_node);
464 last_start = state->start;
465 last_end = state->end;
468 * | ---- desired range ---- |
471 * Just lock what we found and keep going
473 if (state->start == start && state->end <= end) {
474 state->state |= bits;
475 merge_state(tree, state);
476 if (last_end == (u64)-1)
478 start = last_end + 1;
482 * | ---- desired range ---- |
485 * | ------------- state -------------- |
487 * We need to split the extent we found, and may flip bits on
490 * If the extent we found extends past our
491 * range, we just split and search again. It'll get split
492 * again the next time though.
494 * If the extent we found is inside our range, we set the
497 if (state->start < start) {
498 err = split_state(tree, state, prealloc, start);
499 BUG_ON(err == -EEXIST);
503 if (state->end <= end) {
504 state->state |= bits;
505 start = state->end + 1;
506 merge_state(tree, state);
507 if (last_end == (u64)-1)
509 start = last_end + 1;
511 start = state->start;
516 * | ---- desired range ---- |
517 * | state | or | state |
519 * There's a hole, we need to insert something in it and
520 * ignore the extent we found.
522 if (state->start > start) {
524 if (end < last_start)
527 this_end = last_start -1;
528 err = insert_state(tree, prealloc, start, this_end,
530 BUG_ON(err == -EEXIST);
534 start = this_end + 1;
538 * | ---- desired range ---- |
539 * | ---------- state ---------- |
540 * We need to split the extent, and set the bit
543 err = split_state(tree, state, prealloc, end + 1);
544 BUG_ON(err == -EEXIST);
546 state->state |= bits;
547 merge_state(tree, prealloc);
551 btrfs_free_extent_state(prealloc);
559 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end)
561 return set_extent_bits(tree, start, end, EXTENT_DIRTY);
564 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end)
566 return clear_extent_bits(tree, start, end, EXTENT_DIRTY);
569 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
570 u64 *start_ret, u64 *end_ret, int bits)
572 struct cache_extent *node;
573 struct extent_state *state;
577 * this search will find all the extents that end after
580 node = search_cache_extent(&tree->state, start);
585 state = container_of(node, struct extent_state, cache_node);
586 if (state->end >= start && (state->state & bits)) {
587 *start_ret = state->start;
588 *end_ret = state->end;
592 node = next_cache_extent(node);
600 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
601 int bits, int filled)
603 struct extent_state *state = NULL;
604 struct cache_extent *node;
607 node = search_cache_extent(&tree->state, start);
608 while (node && start <= end) {
609 state = container_of(node, struct extent_state, cache_node);
611 if (filled && state->start > start) {
615 if (state->start > end)
617 if (state->state & bits) {
625 start = state->end + 1;
628 node = next_cache_extent(node);
638 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
640 struct cache_extent *node;
641 struct extent_state *state;
644 node = search_cache_extent(&tree->state, start);
649 state = container_of(node, struct extent_state, cache_node);
650 if (state->start != start) {
654 state->xprivate = private;
659 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
661 struct cache_extent *node;
662 struct extent_state *state;
665 node = search_cache_extent(&tree->state, start);
670 state = container_of(node, struct extent_state, cache_node);
671 if (state->start != start) {
675 *private = state->xprivate;
680 static struct extent_buffer *__alloc_extent_buffer(struct btrfs_fs_info *info,
681 u64 bytenr, u32 blocksize)
683 struct extent_buffer *eb;
685 eb = calloc(1, sizeof(struct extent_buffer));
688 eb->data = malloc_cache_aligned(blocksize);
698 eb->cache_node.start = bytenr;
699 eb->cache_node.size = blocksize;
701 memset_extent_buffer(eb, 0, 0, blocksize);
706 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
708 struct extent_buffer *new;
710 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
714 copy_extent_buffer(new, src, 0, 0, src->len);
715 new->flags |= EXTENT_BUFFER_DUMMY;
720 static void free_extent_buffer_final(struct extent_buffer *eb)
723 if (!(eb->flags & EXTENT_BUFFER_DUMMY)) {
724 struct extent_io_tree *tree = &eb->fs_info->extent_cache;
726 remove_cache_extent(&tree->cache, &eb->cache_node);
727 BUG_ON(tree->cache_size < eb->len);
728 tree->cache_size -= eb->len;
734 static void free_extent_buffer_internal(struct extent_buffer *eb, bool free_now)
736 if (!eb || IS_ERR(eb))
740 BUG_ON(eb->refs < 0);
742 if (eb->flags & EXTENT_DIRTY) {
744 "dirty eb leak (aborted trans): start %llu len %u",
747 if (eb->flags & EXTENT_BUFFER_DUMMY || free_now)
748 free_extent_buffer_final(eb);
752 void free_extent_buffer(struct extent_buffer *eb)
754 free_extent_buffer_internal(eb, 1);
757 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
758 u64 bytenr, u32 blocksize)
760 struct extent_buffer *eb = NULL;
761 struct cache_extent *cache;
763 cache = lookup_cache_extent(&tree->cache, bytenr, blocksize);
764 if (cache && cache->start == bytenr &&
765 cache->size == blocksize) {
766 eb = container_of(cache, struct extent_buffer, cache_node);
772 struct extent_buffer *find_first_extent_buffer(struct extent_io_tree *tree,
775 struct extent_buffer *eb = NULL;
776 struct cache_extent *cache;
778 cache = search_cache_extent(&tree->cache, start);
780 eb = container_of(cache, struct extent_buffer, cache_node);
786 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
787 u64 bytenr, u32 blocksize)
789 struct extent_buffer *eb;
790 struct extent_io_tree *tree = &fs_info->extent_cache;
791 struct cache_extent *cache;
793 cache = lookup_cache_extent(&tree->cache, bytenr, blocksize);
794 if (cache && cache->start == bytenr &&
795 cache->size == blocksize) {
796 eb = container_of(cache, struct extent_buffer, cache_node);
802 eb = container_of(cache, struct extent_buffer,
804 free_extent_buffer(eb);
806 eb = __alloc_extent_buffer(fs_info, bytenr, blocksize);
809 ret = insert_cache_extent(&tree->cache, &eb->cache_node);
814 tree->cache_size += blocksize;
820 * Allocate a dummy extent buffer which won't be inserted into extent buffer
823 * This mostly allows super block read write using existing eb infrastructure
824 * without pulluting the eb cache.
826 * This is especially important to avoid injecting eb->start == SZ_64K, as
827 * fuzzed image could have invalid tree bytenr covers super block range,
828 * and cause ref count underflow.
830 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
831 u64 bytenr, u32 blocksize)
833 struct extent_buffer *ret;
835 ret = __alloc_extent_buffer(fs_info, bytenr, blocksize);
839 ret->flags |= EXTENT_BUFFER_DUMMY;
844 int read_extent_from_disk(struct blk_desc *desc, struct disk_partition *part,
845 u64 physical, struct extent_buffer *eb,
846 unsigned long offset, unsigned long len)
850 ret = __btrfs_devread(desc, part, eb->data + offset, len, physical);
862 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
863 unsigned long start, unsigned long len)
865 return memcmp(eb->data + start, ptrv, len);
868 void read_extent_buffer(const struct extent_buffer *eb, void *dst,
869 unsigned long start, unsigned long len)
871 memcpy(dst, eb->data + start, len);
874 void write_extent_buffer(struct extent_buffer *eb, const void *src,
875 unsigned long start, unsigned long len)
877 memcpy(eb->data + start, src, len);
880 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
881 unsigned long dst_offset, unsigned long src_offset,
884 memcpy(dst->data + dst_offset, src->data + src_offset, len);
887 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
888 unsigned long src_offset, unsigned long len)
890 memmove(dst->data + dst_offset, dst->data + src_offset, len);
893 void memset_extent_buffer(struct extent_buffer *eb, char c,
894 unsigned long start, unsigned long len)
896 memset(eb->data + start, c, len);
899 int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
902 return le_test_bit(nr, (u8 *)eb->data + start);
905 int set_extent_buffer_dirty(struct extent_buffer *eb)
907 struct extent_io_tree *tree = &eb->fs_info->extent_cache;
908 if (!(eb->flags & EXTENT_DIRTY)) {
909 eb->flags |= EXTENT_DIRTY;
910 set_extent_dirty(tree, eb->start, eb->start + eb->len - 1);
911 extent_buffer_get(eb);
916 int clear_extent_buffer_dirty(struct extent_buffer *eb)
918 struct extent_io_tree *tree = &eb->fs_info->extent_cache;
919 if (eb->flags & EXTENT_DIRTY) {
920 eb->flags &= ~EXTENT_DIRTY;
921 clear_extent_dirty(tree, eb->start, eb->start + eb->len - 1);
922 free_extent_buffer(eb);