1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/sched/mm.h>
10 #include "rcu-string.h"
12 #include "block-group.h"
13 #include "transaction.h"
14 #include "dev-replace.h"
15 #include "space-info.h"
17 /* Maximum number of zones to report per blkdev_report_zones() call */
18 #define BTRFS_REPORT_NR_ZONES 4096
19 /* Invalid allocation pointer value for missing devices */
20 #define WP_MISSING_DEV ((u64)-1)
21 /* Pseudo write pointer value for conventional zone */
22 #define WP_CONVENTIONAL ((u64)-2)
24 /* Number of superblock log zones */
25 #define BTRFS_NR_SB_LOG_ZONES 2
27 static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
29 struct blk_zone *zones = data;
31 memcpy(&zones[idx], zone, sizeof(*zone));
36 static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
39 bool empty[BTRFS_NR_SB_LOG_ZONES];
40 bool full[BTRFS_NR_SB_LOG_ZONES];
43 ASSERT(zones[0].type != BLK_ZONE_TYPE_CONVENTIONAL &&
44 zones[1].type != BLK_ZONE_TYPE_CONVENTIONAL);
46 empty[0] = (zones[0].cond == BLK_ZONE_COND_EMPTY);
47 empty[1] = (zones[1].cond == BLK_ZONE_COND_EMPTY);
48 full[0] = (zones[0].cond == BLK_ZONE_COND_FULL);
49 full[1] = (zones[1].cond == BLK_ZONE_COND_FULL);
52 * Possible states of log buffer zones
54 * Empty[0] In use[0] Full[0]
60 * *: Special case, no superblock is written
61 * 0: Use write pointer of zones[0]
62 * 1: Use write pointer of zones[1]
63 * C: Compare super blcoks from zones[0] and zones[1], use the latest
64 * one determined by generation
68 if (empty[0] && empty[1]) {
69 /* Special case to distinguish no superblock to read */
70 *wp_ret = zones[0].start << SECTOR_SHIFT;
72 } else if (full[0] && full[1]) {
73 /* Compare two super blocks */
74 struct address_space *mapping = bdev->bd_inode->i_mapping;
75 struct page *page[BTRFS_NR_SB_LOG_ZONES];
76 struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
79 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
82 bytenr = ((zones[i].start + zones[i].len)
83 << SECTOR_SHIFT) - BTRFS_SUPER_INFO_SIZE;
85 page[i] = read_cache_page_gfp(mapping,
86 bytenr >> PAGE_SHIFT, GFP_NOFS);
87 if (IS_ERR(page[i])) {
89 btrfs_release_disk_super(super[0]);
90 return PTR_ERR(page[i]);
92 super[i] = page_address(page[i]);
95 if (super[0]->generation > super[1]->generation)
96 sector = zones[1].start;
98 sector = zones[0].start;
100 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
101 btrfs_release_disk_super(super[i]);
102 } else if (!full[0] && (empty[1] || full[1])) {
103 sector = zones[0].wp;
104 } else if (full[0]) {
105 sector = zones[1].wp;
109 *wp_ret = sector << SECTOR_SHIFT;
114 * The following zones are reserved as the circular buffer on ZONED btrfs.
115 * - The primary superblock: zones 0 and 1
116 * - The first copy: zones 16 and 17
117 * - The second copy: zones 1024 or zone at 256GB which is minimum, and
120 static inline u32 sb_zone_number(int shift, int mirror)
122 ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
127 case 2: return min_t(u64, btrfs_sb_offset(mirror) >> shift, 1024);
134 * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
135 * device into static sized chunks and fake a conventional zone on each of
138 static int emulate_report_zones(struct btrfs_device *device, u64 pos,
139 struct blk_zone *zones, unsigned int nr_zones)
141 const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT;
142 sector_t bdev_size = bdev_nr_sectors(device->bdev);
145 pos >>= SECTOR_SHIFT;
146 for (i = 0; i < nr_zones; i++) {
147 zones[i].start = i * zone_sectors + pos;
148 zones[i].len = zone_sectors;
149 zones[i].capacity = zone_sectors;
150 zones[i].wp = zones[i].start + zone_sectors;
151 zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL;
152 zones[i].cond = BLK_ZONE_COND_NOT_WP;
154 if (zones[i].wp >= bdev_size) {
163 static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
164 struct blk_zone *zones, unsigned int *nr_zones)
171 if (!bdev_is_zoned(device->bdev)) {
172 ret = emulate_report_zones(device, pos, zones, *nr_zones);
177 ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
178 copy_zone_info_cb, zones);
180 btrfs_err_in_rcu(device->fs_info,
181 "zoned: failed to read zone %llu on %s (devid %llu)",
182 pos, rcu_str_deref(device->name),
193 /* The emulated zone size is determined from the size of device extent */
194 static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
196 struct btrfs_path *path;
197 struct btrfs_root *root = fs_info->dev_root;
198 struct btrfs_key key;
199 struct extent_buffer *leaf;
200 struct btrfs_dev_extent *dext;
204 key.type = BTRFS_DEV_EXTENT_KEY;
207 path = btrfs_alloc_path();
211 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
215 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
216 ret = btrfs_next_item(root, path);
219 /* No dev extents at all? Not good */
226 leaf = path->nodes[0];
227 dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
228 fs_info->zone_size = btrfs_dev_extent_length(leaf, dext);
232 btrfs_free_path(path);
237 int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
239 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
240 struct btrfs_device *device;
243 /* fs_info->zone_size might not set yet. Use the incomapt flag here. */
244 if (!btrfs_fs_incompat(fs_info, ZONED))
247 mutex_lock(&fs_devices->device_list_mutex);
248 list_for_each_entry(device, &fs_devices->devices, dev_list) {
249 /* We can skip reading of zone info for missing devices */
253 ret = btrfs_get_dev_zone_info(device);
257 mutex_unlock(&fs_devices->device_list_mutex);
262 int btrfs_get_dev_zone_info(struct btrfs_device *device)
264 struct btrfs_fs_info *fs_info = device->fs_info;
265 struct btrfs_zoned_device_info *zone_info = NULL;
266 struct block_device *bdev = device->bdev;
267 struct request_queue *queue = bdev_get_queue(bdev);
270 struct blk_zone *zones = NULL;
271 unsigned int i, nreported = 0, nr_zones;
272 sector_t zone_sectors;
273 char *model, *emulated;
277 * Cannot use btrfs_is_zoned here, since fs_info::zone_size might not
280 if (!btrfs_fs_incompat(fs_info, ZONED))
283 if (device->zone_info)
286 zone_info = kzalloc(sizeof(*zone_info), GFP_KERNEL);
290 if (!bdev_is_zoned(bdev)) {
291 if (!fs_info->zone_size) {
292 ret = calculate_emulated_zone_size(fs_info);
297 ASSERT(fs_info->zone_size);
298 zone_sectors = fs_info->zone_size >> SECTOR_SHIFT;
300 zone_sectors = bdev_zone_sectors(bdev);
303 nr_sectors = bdev_nr_sectors(bdev);
304 /* Check if it's power of 2 (see is_power_of_2) */
305 ASSERT(zone_sectors != 0 && (zone_sectors & (zone_sectors - 1)) == 0);
306 zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
307 zone_info->zone_size_shift = ilog2(zone_info->zone_size);
308 zone_info->max_zone_append_size =
309 (u64)queue_max_zone_append_sectors(queue) << SECTOR_SHIFT;
310 zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
311 if (!IS_ALIGNED(nr_sectors, zone_sectors))
312 zone_info->nr_zones++;
314 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
315 if (!zone_info->seq_zones) {
320 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
321 if (!zone_info->empty_zones) {
326 zones = kcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
333 while (sector < nr_sectors) {
334 nr_zones = BTRFS_REPORT_NR_ZONES;
335 ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones,
340 for (i = 0; i < nr_zones; i++) {
341 if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ)
342 __set_bit(nreported, zone_info->seq_zones);
343 if (zones[i].cond == BLK_ZONE_COND_EMPTY)
344 __set_bit(nreported, zone_info->empty_zones);
347 sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
350 if (nreported != zone_info->nr_zones) {
351 btrfs_err_in_rcu(device->fs_info,
352 "inconsistent number of zones on %s (%u/%u)",
353 rcu_str_deref(device->name), nreported,
354 zone_info->nr_zones);
359 /* Validate superblock log */
360 nr_zones = BTRFS_NR_SB_LOG_ZONES;
361 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
364 int sb_pos = BTRFS_NR_SB_LOG_ZONES * i;
366 sb_zone = sb_zone_number(zone_info->zone_size_shift, i);
367 if (sb_zone + 1 >= zone_info->nr_zones)
370 sector = sb_zone << (zone_info->zone_size_shift - SECTOR_SHIFT);
371 ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT,
372 &zone_info->sb_zones[sb_pos],
377 if (nr_zones != BTRFS_NR_SB_LOG_ZONES) {
378 btrfs_err_in_rcu(device->fs_info,
379 "zoned: failed to read super block log zone info at devid %llu zone %u",
380 device->devid, sb_zone);
386 * If zones[0] is conventional, always use the beggining of the
387 * zone to record superblock. No need to validate in that case.
389 if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type ==
390 BLK_ZONE_TYPE_CONVENTIONAL)
393 ret = sb_write_pointer(device->bdev,
394 &zone_info->sb_zones[sb_pos], &sb_wp);
395 if (ret != -ENOENT && ret) {
396 btrfs_err_in_rcu(device->fs_info,
397 "zoned: super block log zone corrupted devid %llu zone %u",
398 device->devid, sb_zone);
407 device->zone_info = zone_info;
409 switch (bdev_zoned_model(bdev)) {
411 model = "host-managed zoned";
415 model = "host-aware zoned";
420 emulated = "emulated ";
424 btrfs_err_in_rcu(fs_info, "zoned: unsupported model %d on %s",
425 bdev_zoned_model(bdev),
426 rcu_str_deref(device->name));
428 goto out_free_zone_info;
431 btrfs_info_in_rcu(fs_info,
432 "%s block device %s, %u %szones of %llu bytes",
433 model, rcu_str_deref(device->name), zone_info->nr_zones,
434 emulated, zone_info->zone_size);
441 bitmap_free(zone_info->empty_zones);
442 bitmap_free(zone_info->seq_zones);
444 device->zone_info = NULL;
449 void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
451 struct btrfs_zoned_device_info *zone_info = device->zone_info;
456 bitmap_free(zone_info->seq_zones);
457 bitmap_free(zone_info->empty_zones);
459 device->zone_info = NULL;
462 int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
463 struct blk_zone *zone)
465 unsigned int nr_zones = 1;
468 ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones);
469 if (ret != 0 || !nr_zones)
470 return ret ? ret : -EIO;
475 int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
477 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
478 struct btrfs_device *device;
479 u64 zoned_devices = 0;
482 u64 max_zone_append_size = 0;
483 const bool incompat_zoned = btrfs_fs_incompat(fs_info, ZONED);
486 /* Count zoned devices */
487 list_for_each_entry(device, &fs_devices->devices, dev_list) {
488 enum blk_zoned_model model;
493 model = bdev_zoned_model(device->bdev);
495 * A Host-Managed zoned device must be used as a zoned device.
496 * A Host-Aware zoned device and a non-zoned devices can be
497 * treated as a zoned device, if ZONED flag is enabled in the
500 if (model == BLK_ZONED_HM ||
501 (model == BLK_ZONED_HA && incompat_zoned) ||
502 (model == BLK_ZONED_NONE && incompat_zoned)) {
503 struct btrfs_zoned_device_info *zone_info =
506 zone_info = device->zone_info;
509 zone_size = zone_info->zone_size;
510 } else if (zone_info->zone_size != zone_size) {
512 "zoned: unequal block device zone sizes: have %llu found %llu",
513 device->zone_info->zone_size,
518 if (!max_zone_append_size ||
519 (zone_info->max_zone_append_size &&
520 zone_info->max_zone_append_size < max_zone_append_size))
521 max_zone_append_size =
522 zone_info->max_zone_append_size;
527 if (!zoned_devices && !incompat_zoned)
530 if (!zoned_devices && incompat_zoned) {
531 /* No zoned block device found on ZONED filesystem */
533 "zoned: no zoned devices found on a zoned filesystem");
538 if (zoned_devices && !incompat_zoned) {
540 "zoned: mode not enabled but zoned device found");
545 if (zoned_devices != nr_devices) {
547 "zoned: cannot mix zoned and regular devices");
553 * stripe_size is always aligned to BTRFS_STRIPE_LEN in
554 * __btrfs_alloc_chunk(). Since we want stripe_len == zone_size,
555 * check the alignment here.
557 if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) {
559 "zoned: zone size %llu not aligned to stripe %u",
560 zone_size, BTRFS_STRIPE_LEN);
565 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
566 btrfs_err(fs_info, "zoned: mixed block groups not supported");
571 fs_info->zone_size = zone_size;
572 fs_info->max_zone_append_size = max_zone_append_size;
573 fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
576 * Check mount options here, because we might change fs_info->zoned
577 * from fs_info->zone_size.
579 ret = btrfs_check_mountopts_zoned(fs_info);
583 btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size);
588 int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info)
590 if (!btrfs_is_zoned(info))
594 * Space cache writing is not COWed. Disable that to avoid write errors
595 * in sequential zones.
597 if (btrfs_test_opt(info, SPACE_CACHE)) {
598 btrfs_err(info, "zoned: space cache v1 is not supported");
602 if (btrfs_test_opt(info, NODATACOW)) {
603 btrfs_err(info, "zoned: NODATACOW not supported");
610 static int sb_log_location(struct block_device *bdev, struct blk_zone *zones,
611 int rw, u64 *bytenr_ret)
616 if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) {
617 *bytenr_ret = zones[0].start << SECTOR_SHIFT;
621 ret = sb_write_pointer(bdev, zones, &wp);
622 if (ret != -ENOENT && ret < 0)
626 struct blk_zone *reset = NULL;
628 if (wp == zones[0].start << SECTOR_SHIFT)
630 else if (wp == zones[1].start << SECTOR_SHIFT)
633 if (reset && reset->cond != BLK_ZONE_COND_EMPTY) {
634 ASSERT(reset->cond == BLK_ZONE_COND_FULL);
636 ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
637 reset->start, reset->len,
642 reset->cond = BLK_ZONE_COND_EMPTY;
643 reset->wp = reset->start;
645 } else if (ret != -ENOENT) {
646 /* For READ, we want the precious one */
647 if (wp == zones[0].start << SECTOR_SHIFT)
648 wp = (zones[1].start + zones[1].len) << SECTOR_SHIFT;
649 wp -= BTRFS_SUPER_INFO_SIZE;
657 int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
660 struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
661 sector_t zone_sectors;
664 u8 zone_sectors_shift;
668 if (!bdev_is_zoned(bdev)) {
669 *bytenr_ret = btrfs_sb_offset(mirror);
673 ASSERT(rw == READ || rw == WRITE);
675 zone_sectors = bdev_zone_sectors(bdev);
676 if (!is_power_of_2(zone_sectors))
678 zone_sectors_shift = ilog2(zone_sectors);
679 nr_sectors = bdev_nr_sectors(bdev);
680 nr_zones = nr_sectors >> zone_sectors_shift;
682 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
683 if (sb_zone + 1 >= nr_zones)
686 ret = blkdev_report_zones(bdev, sb_zone << zone_sectors_shift,
687 BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
691 if (ret != BTRFS_NR_SB_LOG_ZONES)
694 return sb_log_location(bdev, zones, rw, bytenr_ret);
697 int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
700 struct btrfs_zoned_device_info *zinfo = device->zone_info;
704 * For a zoned filesystem on a non-zoned block device, use the same
705 * super block locations as regular filesystem. Doing so, the super
706 * block can always be retrieved and the zoned flag of the volume
707 * detected from the super block information.
709 if (!bdev_is_zoned(device->bdev)) {
710 *bytenr_ret = btrfs_sb_offset(mirror);
714 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
715 if (zone_num + 1 >= zinfo->nr_zones)
718 return sb_log_location(device->bdev,
719 &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror],
723 static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo,
731 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
732 if (zone_num + 1 >= zinfo->nr_zones)
735 if (!test_bit(zone_num, zinfo->seq_zones))
741 void btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
743 struct btrfs_zoned_device_info *zinfo = device->zone_info;
744 struct blk_zone *zone;
746 if (!is_sb_log_zone(zinfo, mirror))
749 zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror];
750 if (zone->cond != BLK_ZONE_COND_FULL) {
751 if (zone->cond == BLK_ZONE_COND_EMPTY)
752 zone->cond = BLK_ZONE_COND_IMP_OPEN;
754 zone->wp += (BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT);
756 if (zone->wp == zone->start + zone->len)
757 zone->cond = BLK_ZONE_COND_FULL;
763 ASSERT(zone->cond != BLK_ZONE_COND_FULL);
764 if (zone->cond == BLK_ZONE_COND_EMPTY)
765 zone->cond = BLK_ZONE_COND_IMP_OPEN;
767 zone->wp += (BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT);
769 if (zone->wp == zone->start + zone->len)
770 zone->cond = BLK_ZONE_COND_FULL;
773 int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
775 sector_t zone_sectors;
777 u8 zone_sectors_shift;
781 zone_sectors = bdev_zone_sectors(bdev);
782 zone_sectors_shift = ilog2(zone_sectors);
783 nr_sectors = bdev_nr_sectors(bdev);
784 nr_zones = nr_sectors >> zone_sectors_shift;
786 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
787 if (sb_zone + 1 >= nr_zones)
790 return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
791 sb_zone << zone_sectors_shift,
792 zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
796 * btrfs_find_allocatable_zones - find allocatable zones within a given region
798 * @device: the device to allocate a region on
799 * @hole_start: the position of the hole to allocate the region
800 * @num_bytes: size of wanted region
801 * @hole_end: the end of the hole
802 * @return: position of allocatable zones
804 * Allocatable region should not contain any superblock locations.
806 u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
807 u64 hole_end, u64 num_bytes)
809 struct btrfs_zoned_device_info *zinfo = device->zone_info;
810 const u8 shift = zinfo->zone_size_shift;
811 u64 nzones = num_bytes >> shift;
812 u64 pos = hole_start;
817 ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size));
818 ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size));
820 while (pos < hole_end) {
821 begin = pos >> shift;
822 end = begin + nzones;
824 if (end > zinfo->nr_zones)
827 /* Check if zones in the region are all empty */
828 if (btrfs_dev_is_sequential(device, pos) &&
829 find_next_zero_bit(zinfo->empty_zones, end, begin) != end) {
830 pos += zinfo->zone_size;
835 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
839 sb_zone = sb_zone_number(shift, i);
840 if (!(end <= sb_zone ||
841 sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
843 pos = ((u64)sb_zone + BTRFS_NR_SB_LOG_ZONES) << shift;
847 /* We also need to exclude regular superblock positions */
848 sb_pos = btrfs_sb_offset(i);
849 if (!(pos + num_bytes <= sb_pos ||
850 sb_pos + BTRFS_SUPER_INFO_SIZE <= pos)) {
852 pos = ALIGN(sb_pos + BTRFS_SUPER_INFO_SIZE,
864 int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
865 u64 length, u64 *bytes)
870 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET,
871 physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT,
878 btrfs_dev_set_zone_empty(device, physical);
879 physical += device->zone_info->zone_size;
880 length -= device->zone_info->zone_size;
886 int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
888 struct btrfs_zoned_device_info *zinfo = device->zone_info;
889 const u8 shift = zinfo->zone_size_shift;
890 unsigned long begin = start >> shift;
891 unsigned long end = (start + size) >> shift;
895 ASSERT(IS_ALIGNED(start, zinfo->zone_size));
896 ASSERT(IS_ALIGNED(size, zinfo->zone_size));
898 if (end > zinfo->nr_zones)
901 /* All the zones are conventional */
902 if (find_next_bit(zinfo->seq_zones, begin, end) == end)
905 /* All the zones are sequential and empty */
906 if (find_next_zero_bit(zinfo->seq_zones, begin, end) == end &&
907 find_next_zero_bit(zinfo->empty_zones, begin, end) == end)
910 for (pos = start; pos < start + size; pos += zinfo->zone_size) {
913 if (!btrfs_dev_is_sequential(device, pos) ||
914 btrfs_dev_is_empty_zone(device, pos))
917 /* Free regions should be empty */
920 "zoned: resetting device %s (devid %llu) zone %llu for allocation",
921 rcu_str_deref(device->name), device->devid, pos >> shift);
924 ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size,
934 * Calculate an allocation pointer from the extent allocation information
935 * for a block group consist of conventional zones. It is pointed to the
936 * end of the highest addressed extent in the block group as an allocation
939 static int calculate_alloc_pointer(struct btrfs_block_group *cache,
942 struct btrfs_fs_info *fs_info = cache->fs_info;
943 struct btrfs_root *root = fs_info->extent_root;
944 struct btrfs_path *path;
945 struct btrfs_key key;
946 struct btrfs_key found_key;
950 path = btrfs_alloc_path();
954 key.objectid = cache->start + cache->length;
958 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
959 /* We should not find the exact match */
965 ret = btrfs_previous_extent_item(root, path, cache->start);
974 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
976 if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
977 length = found_key.offset;
979 length = fs_info->nodesize;
981 if (!(found_key.objectid >= cache->start &&
982 found_key.objectid + length <= cache->start + cache->length)) {
986 *offset_ret = found_key.objectid + length - cache->start;
990 btrfs_free_path(path);
994 int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
996 struct btrfs_fs_info *fs_info = cache->fs_info;
997 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
998 struct extent_map *em;
999 struct map_lookup *map;
1000 struct btrfs_device *device;
1001 u64 logical = cache->start;
1002 u64 length = cache->length;
1006 unsigned int nofs_flag;
1007 u64 *alloc_offsets = NULL;
1009 u32 num_sequential = 0, num_conventional = 0;
1011 if (!btrfs_is_zoned(fs_info))
1015 if (!IS_ALIGNED(length, fs_info->zone_size)) {
1017 "zoned: block group %llu len %llu unaligned to zone size %llu",
1018 logical, length, fs_info->zone_size);
1022 /* Get the chunk mapping */
1023 read_lock(&em_tree->lock);
1024 em = lookup_extent_mapping(em_tree, logical, length);
1025 read_unlock(&em_tree->lock);
1030 map = em->map_lookup;
1032 alloc_offsets = kcalloc(map->num_stripes, sizeof(*alloc_offsets), GFP_NOFS);
1033 if (!alloc_offsets) {
1034 free_extent_map(em);
1038 for (i = 0; i < map->num_stripes; i++) {
1040 struct blk_zone zone;
1041 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1042 int dev_replace_is_ongoing = 0;
1044 device = map->stripes[i].dev;
1045 physical = map->stripes[i].physical;
1047 if (device->bdev == NULL) {
1048 alloc_offsets[i] = WP_MISSING_DEV;
1052 is_sequential = btrfs_dev_is_sequential(device, physical);
1058 if (!is_sequential) {
1059 alloc_offsets[i] = WP_CONVENTIONAL;
1064 * This zone will be used for allocation, so mark this zone
1067 btrfs_dev_clear_zone_empty(device, physical);
1069 down_read(&dev_replace->rwsem);
1070 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
1071 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
1072 btrfs_dev_clear_zone_empty(dev_replace->tgtdev, physical);
1073 up_read(&dev_replace->rwsem);
1076 * The group is mapped to a sequential zone. Get the zone write
1077 * pointer to determine the allocation offset within the zone.
1079 WARN_ON(!IS_ALIGNED(physical, fs_info->zone_size));
1080 nofs_flag = memalloc_nofs_save();
1081 ret = btrfs_get_dev_zone(device, physical, &zone);
1082 memalloc_nofs_restore(nofs_flag);
1083 if (ret == -EIO || ret == -EOPNOTSUPP) {
1085 alloc_offsets[i] = WP_MISSING_DEV;
1091 switch (zone.cond) {
1092 case BLK_ZONE_COND_OFFLINE:
1093 case BLK_ZONE_COND_READONLY:
1095 "zoned: offline/readonly zone %llu on device %s (devid %llu)",
1096 physical >> device->zone_info->zone_size_shift,
1097 rcu_str_deref(device->name), device->devid);
1098 alloc_offsets[i] = WP_MISSING_DEV;
1100 case BLK_ZONE_COND_EMPTY:
1101 alloc_offsets[i] = 0;
1103 case BLK_ZONE_COND_FULL:
1104 alloc_offsets[i] = fs_info->zone_size;
1107 /* Partially used zone */
1109 ((zone.wp - zone.start) << SECTOR_SHIFT);
1114 if (num_sequential > 0)
1115 cache->seq_zone = true;
1117 if (num_conventional > 0) {
1119 * Avoid calling calculate_alloc_pointer() for new BG. It
1120 * is no use for new BG. It must be always 0.
1122 * Also, we have a lock chain of extent buffer lock ->
1123 * chunk mutex. For new BG, this function is called from
1124 * btrfs_make_block_group() which is already taking the
1125 * chunk mutex. Thus, we cannot call
1126 * calculate_alloc_pointer() which takes extent buffer
1127 * locks to avoid deadlock.
1130 cache->alloc_offset = 0;
1133 ret = calculate_alloc_pointer(cache, &last_alloc);
1134 if (ret || map->num_stripes == num_conventional) {
1136 cache->alloc_offset = last_alloc;
1139 "zoned: failed to determine allocation offset of bg %llu",
1145 switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
1146 case 0: /* single */
1147 cache->alloc_offset = alloc_offsets[0];
1149 case BTRFS_BLOCK_GROUP_DUP:
1150 case BTRFS_BLOCK_GROUP_RAID1:
1151 case BTRFS_BLOCK_GROUP_RAID0:
1152 case BTRFS_BLOCK_GROUP_RAID10:
1153 case BTRFS_BLOCK_GROUP_RAID5:
1154 case BTRFS_BLOCK_GROUP_RAID6:
1155 /* non-single profiles are not supported yet */
1157 btrfs_err(fs_info, "zoned: profile %s not yet supported",
1158 btrfs_bg_type_to_raid_name(map->type));
1164 /* An extent is allocated after the write pointer */
1165 if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
1167 "zoned: got wrong write pointer in BG %llu: %llu > %llu",
1168 logical, last_alloc, cache->alloc_offset);
1173 cache->meta_write_pointer = cache->alloc_offset + cache->start;
1175 kfree(alloc_offsets);
1176 free_extent_map(em);
1181 void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
1185 if (!btrfs_is_zoned(cache->fs_info))
1188 WARN_ON(cache->bytes_super != 0);
1189 unusable = cache->alloc_offset - cache->used;
1190 free = cache->length - cache->alloc_offset;
1192 /* We only need ->free_space in ALLOC_SEQ block groups */
1193 cache->last_byte_to_unpin = (u64)-1;
1194 cache->cached = BTRFS_CACHE_FINISHED;
1195 cache->free_space_ctl->free_space = free;
1196 cache->zone_unusable = unusable;
1198 /* Should not have any excluded extents. Just in case, though */
1199 btrfs_free_excluded_extents(cache);
1202 void btrfs_redirty_list_add(struct btrfs_transaction *trans,
1203 struct extent_buffer *eb)
1205 struct btrfs_fs_info *fs_info = eb->fs_info;
1207 if (!btrfs_is_zoned(fs_info) ||
1208 btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN) ||
1209 !list_empty(&eb->release_list))
1212 set_extent_buffer_dirty(eb);
1213 set_extent_bits_nowait(&trans->dirty_pages, eb->start,
1214 eb->start + eb->len - 1, EXTENT_DIRTY);
1215 memzero_extent_buffer(eb, 0, eb->len);
1216 set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags);
1218 spin_lock(&trans->releasing_ebs_lock);
1219 list_add_tail(&eb->release_list, &trans->releasing_ebs);
1220 spin_unlock(&trans->releasing_ebs_lock);
1221 atomic_inc(&eb->refs);
1224 void btrfs_free_redirty_list(struct btrfs_transaction *trans)
1226 spin_lock(&trans->releasing_ebs_lock);
1227 while (!list_empty(&trans->releasing_ebs)) {
1228 struct extent_buffer *eb;
1230 eb = list_first_entry(&trans->releasing_ebs,
1231 struct extent_buffer, release_list);
1232 list_del_init(&eb->release_list);
1233 free_extent_buffer(eb);
1235 spin_unlock(&trans->releasing_ebs_lock);
1238 bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em)
1240 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1241 struct btrfs_block_group *cache;
1244 if (!btrfs_is_zoned(fs_info))
1247 if (!fs_info->max_zone_append_size)
1250 if (!is_data_inode(&inode->vfs_inode))
1253 cache = btrfs_lookup_block_group(fs_info, em->block_start);
1258 ret = cache->seq_zone;
1259 btrfs_put_block_group(cache);
1264 void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
1267 struct btrfs_ordered_extent *ordered;
1268 const u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
1270 if (bio_op(bio) != REQ_OP_ZONE_APPEND)
1273 ordered = btrfs_lookup_ordered_extent(BTRFS_I(inode), file_offset);
1274 if (WARN_ON(!ordered))
1277 ordered->physical = physical;
1278 ordered->disk = bio->bi_bdev->bd_disk;
1279 ordered->partno = bio->bi_bdev->bd_partno;
1281 btrfs_put_ordered_extent(ordered);
1284 void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
1286 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
1287 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1288 struct extent_map_tree *em_tree;
1289 struct extent_map *em;
1290 struct btrfs_ordered_sum *sum;
1291 struct block_device *bdev;
1292 u64 orig_logical = ordered->disk_bytenr;
1293 u64 *logical = NULL;
1296 /* Zoned devices should not have partitions. So, we can assume it is 0 */
1297 ASSERT(ordered->partno == 0);
1298 bdev = bdgrab(ordered->disk->part0);
1302 if (WARN_ON(btrfs_rmap_block(fs_info, orig_logical, bdev,
1303 ordered->physical, &logical, &nr,
1309 if (orig_logical == *logical)
1312 ordered->disk_bytenr = *logical;
1314 em_tree = &inode->extent_tree;
1315 write_lock(&em_tree->lock);
1316 em = search_extent_mapping(em_tree, ordered->file_offset,
1317 ordered->num_bytes);
1318 em->block_start = *logical;
1319 free_extent_map(em);
1320 write_unlock(&em_tree->lock);
1322 list_for_each_entry(sum, &ordered->list, list) {
1323 if (*logical < orig_logical)
1324 sum->bytenr -= orig_logical - *logical;
1326 sum->bytenr += *logical - orig_logical;
1334 bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
1335 struct extent_buffer *eb,
1336 struct btrfs_block_group **cache_ret)
1338 struct btrfs_block_group *cache;
1341 if (!btrfs_is_zoned(fs_info))
1346 if (cache && (eb->start < cache->start ||
1347 cache->start + cache->length <= eb->start)) {
1348 btrfs_put_block_group(cache);
1354 cache = btrfs_lookup_block_group(fs_info, eb->start);
1357 if (cache->meta_write_pointer != eb->start) {
1358 btrfs_put_block_group(cache);
1362 cache->meta_write_pointer = eb->start + eb->len;
1371 void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
1372 struct extent_buffer *eb)
1374 if (!btrfs_is_zoned(eb->fs_info) || !cache)
1377 ASSERT(cache->meta_write_pointer == eb->start + eb->len);
1378 cache->meta_write_pointer = eb->start;
1381 int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
1383 if (!btrfs_dev_is_sequential(device, physical))
1386 return blkdev_issue_zeroout(device->bdev, physical >> SECTOR_SHIFT,
1387 length >> SECTOR_SHIFT, GFP_NOFS, 0);
1390 static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
1391 struct blk_zone *zone)
1393 struct btrfs_bio *bbio = NULL;
1394 u64 mapped_length = PAGE_SIZE;
1395 unsigned int nofs_flag;
1399 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
1400 &mapped_length, &bbio);
1401 if (ret || !bbio || mapped_length < PAGE_SIZE) {
1402 btrfs_put_bbio(bbio);
1406 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK)
1409 nofs_flag = memalloc_nofs_save();
1410 nmirrors = (int)bbio->num_stripes;
1411 for (i = 0; i < nmirrors; i++) {
1412 u64 physical = bbio->stripes[i].physical;
1413 struct btrfs_device *dev = bbio->stripes[i].dev;
1415 /* Missing device */
1419 ret = btrfs_get_dev_zone(dev, physical, zone);
1420 /* Failing device */
1421 if (ret == -EIO || ret == -EOPNOTSUPP)
1425 memalloc_nofs_restore(nofs_flag);
1431 * Synchronize write pointer in a zone at @physical_start on @tgt_dev, by
1432 * filling zeros between @physical_pos to a write pointer of dev-replace
1435 int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
1436 u64 physical_start, u64 physical_pos)
1438 struct btrfs_fs_info *fs_info = tgt_dev->fs_info;
1439 struct blk_zone zone;
1444 if (!btrfs_dev_is_sequential(tgt_dev, physical_pos))
1447 ret = read_zone_info(fs_info, logical, &zone);
1451 wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT);
1453 if (physical_pos == wp)
1456 if (physical_pos > wp)
1459 length = wp - physical_pos;
1460 return btrfs_zoned_issue_zeroout(tgt_dev, physical_pos, length);