]> Git Repo - linux.git/blame - fs/btrfs/zoned.c
btrfs: factor out block mapping for RAID10
[linux.git] / fs / btrfs / zoned.c
CommitLineData
5b316468
NA
1// SPDX-License-Identifier: GPL-2.0
2
1cd6121f 3#include <linux/bitops.h>
5b316468
NA
4#include <linux/slab.h>
5#include <linux/blkdev.h>
08e11a3d 6#include <linux/sched/mm.h>
ea6f8ddc 7#include <linux/atomic.h>
16beac87 8#include <linux/vmalloc.h>
5b316468
NA
9#include "ctree.h"
10#include "volumes.h"
11#include "zoned.h"
12#include "rcu-string.h"
1cd6121f 13#include "disk-io.h"
08e11a3d 14#include "block-group.h"
d3575156 15#include "transaction.h"
6143c23c 16#include "dev-replace.h"
7db1c5d1 17#include "space-info.h"
71df088c 18#include "super.h"
c7f13d42 19#include "fs.h"
07e81dc9 20#include "accessors.h"
69ccf3f4 21#include "bio.h"
5b316468
NA
22
23/* Maximum number of zones to report per blkdev_report_zones() call */
24#define BTRFS_REPORT_NR_ZONES 4096
08e11a3d
NA
25/* Invalid allocation pointer value for missing devices */
26#define WP_MISSING_DEV ((u64)-1)
27/* Pseudo write pointer value for conventional zone */
28#define WP_CONVENTIONAL ((u64)-2)
5b316468 29
53b74fa9
NA
30/*
31 * Location of the first zone of superblock logging zone pairs.
32 *
33 * - primary superblock: 0B (zone 0)
34 * - first copy: 512G (zone starting at that offset)
35 * - second copy: 4T (zone starting at that offset)
36 */
37#define BTRFS_SB_LOG_PRIMARY_OFFSET (0ULL)
38#define BTRFS_SB_LOG_FIRST_OFFSET (512ULL * SZ_1G)
39#define BTRFS_SB_LOG_SECOND_OFFSET (4096ULL * SZ_1G)
40
41#define BTRFS_SB_LOG_FIRST_SHIFT const_ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
42#define BTRFS_SB_LOG_SECOND_SHIFT const_ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
43
12659251
NA
44/* Number of superblock log zones */
45#define BTRFS_NR_SB_LOG_ZONES 2
46
ea6f8ddc
NA
47/*
48 * Minimum of active zones we need:
49 *
50 * - BTRFS_SUPER_MIRROR_MAX zones for superblock mirrors
51 * - 3 zones to ensure at least one zone per SYSTEM, META and DATA block group
52 * - 1 zone for tree-log dedicated block group
53 * - 1 zone for relocation
54 */
55#define BTRFS_MIN_ACTIVE_ZONES (BTRFS_SUPER_MIRROR_MAX + 5)
56
53b74fa9 57/*
0a05fafe
JT
58 * Minimum / maximum supported zone size. Currently, SMR disks have a zone
59 * size of 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range.
60 * We do not expect the zone size to become larger than 8GiB or smaller than
61 * 4MiB in the near future.
53b74fa9
NA
62 */
63#define BTRFS_MAX_ZONE_SIZE SZ_8G
0a05fafe 64#define BTRFS_MIN_ZONE_SIZE SZ_4M
53b74fa9 65
5daaf552
NA
66#define SUPER_INFO_SECTORS ((u64)BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT)
67
13bb483d
NA
68static void wait_eb_writebacks(struct btrfs_block_group *block_group);
69static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written);
70
5daaf552
NA
71static inline bool sb_zone_is_full(const struct blk_zone *zone)
72{
73 return (zone->cond == BLK_ZONE_COND_FULL) ||
74 (zone->wp + SUPER_INFO_SECTORS > zone->start + zone->capacity);
75}
76
5b316468
NA
77static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
78{
79 struct blk_zone *zones = data;
80
81 memcpy(&zones[idx], zone, sizeof(*zone));
82
83 return 0;
84}
85
12659251
NA
86static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
87 u64 *wp_ret)
88{
89 bool empty[BTRFS_NR_SB_LOG_ZONES];
90 bool full[BTRFS_NR_SB_LOG_ZONES];
91 sector_t sector;
5daaf552 92 int i;
12659251 93
5daaf552
NA
94 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
95 ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL);
96 empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY);
97 full[i] = sb_zone_is_full(&zones[i]);
98 }
12659251
NA
99
100 /*
101 * Possible states of log buffer zones
102 *
103 * Empty[0] In use[0] Full[0]
31f37269
PR
104 * Empty[1] * 0 1
105 * In use[1] x x 1
106 * Full[1] 0 0 C
12659251
NA
107 *
108 * Log position:
109 * *: Special case, no superblock is written
110 * 0: Use write pointer of zones[0]
111 * 1: Use write pointer of zones[1]
1a9fd417 112 * C: Compare super blocks from zones[0] and zones[1], use the latest
12659251
NA
113 * one determined by generation
114 * x: Invalid state
115 */
116
117 if (empty[0] && empty[1]) {
118 /* Special case to distinguish no superblock to read */
119 *wp_ret = zones[0].start << SECTOR_SHIFT;
120 return -ENOENT;
121 } else if (full[0] && full[1]) {
122 /* Compare two super blocks */
123 struct address_space *mapping = bdev->bd_inode->i_mapping;
124 struct page *page[BTRFS_NR_SB_LOG_ZONES];
125 struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
126 int i;
127
128 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
02ca9e6f
NA
129 u64 zone_end = (zones[i].start + zones[i].capacity) << SECTOR_SHIFT;
130 u64 bytenr = ALIGN_DOWN(zone_end, BTRFS_SUPER_INFO_SIZE) -
131 BTRFS_SUPER_INFO_SIZE;
12659251
NA
132
133 page[i] = read_cache_page_gfp(mapping,
134 bytenr >> PAGE_SHIFT, GFP_NOFS);
135 if (IS_ERR(page[i])) {
136 if (i == 1)
137 btrfs_release_disk_super(super[0]);
138 return PTR_ERR(page[i]);
139 }
140 super[i] = page_address(page[i]);
141 }
142
c51f0e6a
CH
143 if (btrfs_super_generation(super[0]) >
144 btrfs_super_generation(super[1]))
12659251
NA
145 sector = zones[1].start;
146 else
147 sector = zones[0].start;
148
149 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
150 btrfs_release_disk_super(super[i]);
151 } else if (!full[0] && (empty[1] || full[1])) {
152 sector = zones[0].wp;
153 } else if (full[0]) {
154 sector = zones[1].wp;
155 } else {
156 return -EUCLEAN;
157 }
158 *wp_ret = sector << SECTOR_SHIFT;
159 return 0;
160}
161
162/*
53b74fa9 163 * Get the first zone number of the superblock mirror
12659251
NA
164 */
165static inline u32 sb_zone_number(int shift, int mirror)
166{
12adffe6 167 u64 zone = U64_MAX;
12659251 168
53b74fa9 169 ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
12659251 170 switch (mirror) {
53b74fa9
NA
171 case 0: zone = 0; break;
172 case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break;
173 case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break;
12659251
NA
174 }
175
53b74fa9
NA
176 ASSERT(zone <= U32_MAX);
177
178 return (u32)zone;
12659251
NA
179}
180
5b434df8
NA
181static inline sector_t zone_start_sector(u32 zone_number,
182 struct block_device *bdev)
183{
184 return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev));
185}
186
187static inline u64 zone_start_physical(u32 zone_number,
188 struct btrfs_zoned_device_info *zone_info)
189{
190 return (u64)zone_number << zone_info->zone_size_shift;
191}
192
3c9daa09
JT
193/*
194 * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
195 * device into static sized chunks and fake a conventional zone on each of
196 * them.
197 */
198static int emulate_report_zones(struct btrfs_device *device, u64 pos,
199 struct blk_zone *zones, unsigned int nr_zones)
200{
201 const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT;
202 sector_t bdev_size = bdev_nr_sectors(device->bdev);
203 unsigned int i;
204
205 pos >>= SECTOR_SHIFT;
206 for (i = 0; i < nr_zones; i++) {
207 zones[i].start = i * zone_sectors + pos;
208 zones[i].len = zone_sectors;
209 zones[i].capacity = zone_sectors;
210 zones[i].wp = zones[i].start + zone_sectors;
211 zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL;
212 zones[i].cond = BLK_ZONE_COND_NOT_WP;
213
214 if (zones[i].wp >= bdev_size) {
215 i++;
216 break;
217 }
218 }
219
220 return i;
221}
222
5b316468
NA
223static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
224 struct blk_zone *zones, unsigned int *nr_zones)
225{
16beac87 226 struct btrfs_zoned_device_info *zinfo = device->zone_info;
5b316468
NA
227 int ret;
228
229 if (!*nr_zones)
230 return 0;
231
3c9daa09
JT
232 if (!bdev_is_zoned(device->bdev)) {
233 ret = emulate_report_zones(device, pos, zones, *nr_zones);
234 *nr_zones = ret;
235 return 0;
236 }
237
16beac87
NA
238 /* Check cache */
239 if (zinfo->zone_cache) {
240 unsigned int i;
cd30d3bc 241 u32 zno;
16beac87
NA
242
243 ASSERT(IS_ALIGNED(pos, zinfo->zone_size));
244 zno = pos >> zinfo->zone_size_shift;
245 /*
246 * We cannot report zones beyond the zone end. So, it is OK to
247 * cap *nr_zones to at the end.
248 */
249 *nr_zones = min_t(u32, *nr_zones, zinfo->nr_zones - zno);
250
251 for (i = 0; i < *nr_zones; i++) {
252 struct blk_zone *zone_info;
253
254 zone_info = &zinfo->zone_cache[zno + i];
255 if (!zone_info->len)
256 break;
257 }
258
259 if (i == *nr_zones) {
260 /* Cache hit on all the zones */
261 memcpy(zones, zinfo->zone_cache + zno,
262 sizeof(*zinfo->zone_cache) * *nr_zones);
263 return 0;
264 }
265 }
266
5b316468
NA
267 ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
268 copy_zone_info_cb, zones);
269 if (ret < 0) {
270 btrfs_err_in_rcu(device->fs_info,
271 "zoned: failed to read zone %llu on %s (devid %llu)",
272 pos, rcu_str_deref(device->name),
273 device->devid);
274 return ret;
275 }
276 *nr_zones = ret;
277 if (!ret)
278 return -EIO;
279
16beac87 280 /* Populate cache */
cd30d3bc
NA
281 if (zinfo->zone_cache) {
282 u32 zno = pos >> zinfo->zone_size_shift;
283
16beac87
NA
284 memcpy(zinfo->zone_cache + zno, zones,
285 sizeof(*zinfo->zone_cache) * *nr_zones);
cd30d3bc 286 }
16beac87 287
5b316468
NA
288 return 0;
289}
290
3c9daa09
JT
291/* The emulated zone size is determined from the size of device extent */
292static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
293{
294 struct btrfs_path *path;
295 struct btrfs_root *root = fs_info->dev_root;
296 struct btrfs_key key;
297 struct extent_buffer *leaf;
298 struct btrfs_dev_extent *dext;
299 int ret = 0;
300
301 key.objectid = 1;
302 key.type = BTRFS_DEV_EXTENT_KEY;
303 key.offset = 0;
304
305 path = btrfs_alloc_path();
306 if (!path)
307 return -ENOMEM;
308
309 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
310 if (ret < 0)
311 goto out;
312
313 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
ad9a9378 314 ret = btrfs_next_leaf(root, path);
3c9daa09
JT
315 if (ret < 0)
316 goto out;
317 /* No dev extents at all? Not good */
318 if (ret > 0) {
319 ret = -EUCLEAN;
320 goto out;
321 }
322 }
323
324 leaf = path->nodes[0];
325 dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
326 fs_info->zone_size = btrfs_dev_extent_length(leaf, dext);
327 ret = 0;
328
329out:
330 btrfs_free_path(path);
331
332 return ret;
333}
334
73651042
NA
335int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
336{
337 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
338 struct btrfs_device *device;
339 int ret = 0;
340
341 /* fs_info->zone_size might not set yet. Use the incomapt flag here. */
342 if (!btrfs_fs_incompat(fs_info, ZONED))
343 return 0;
344
345 mutex_lock(&fs_devices->device_list_mutex);
346 list_for_each_entry(device, &fs_devices->devices, dev_list) {
347 /* We can skip reading of zone info for missing devices */
348 if (!device->bdev)
349 continue;
350
16beac87 351 ret = btrfs_get_dev_zone_info(device, true);
73651042
NA
352 if (ret)
353 break;
354 }
355 mutex_unlock(&fs_devices->device_list_mutex);
356
357 return ret;
358}
359
16beac87 360int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
5b316468 361{
3c9daa09 362 struct btrfs_fs_info *fs_info = device->fs_info;
5b316468
NA
363 struct btrfs_zoned_device_info *zone_info = NULL;
364 struct block_device *bdev = device->bdev;
ea6f8ddc
NA
365 unsigned int max_active_zones;
366 unsigned int nactive;
5b316468
NA
367 sector_t nr_sectors;
368 sector_t sector = 0;
369 struct blk_zone *zones = NULL;
370 unsigned int i, nreported = 0, nr_zones;
d734492a 371 sector_t zone_sectors;
3c9daa09 372 char *model, *emulated;
5b316468
NA
373 int ret;
374
3c9daa09
JT
375 /*
376 * Cannot use btrfs_is_zoned here, since fs_info::zone_size might not
377 * yet be set.
378 */
379 if (!btrfs_fs_incompat(fs_info, ZONED))
5b316468
NA
380 return 0;
381
382 if (device->zone_info)
383 return 0;
384
385 zone_info = kzalloc(sizeof(*zone_info), GFP_KERNEL);
386 if (!zone_info)
387 return -ENOMEM;
388
16beac87
NA
389 device->zone_info = zone_info;
390
3c9daa09
JT
391 if (!bdev_is_zoned(bdev)) {
392 if (!fs_info->zone_size) {
393 ret = calculate_emulated_zone_size(fs_info);
394 if (ret)
395 goto out;
396 }
397
398 ASSERT(fs_info->zone_size);
399 zone_sectors = fs_info->zone_size >> SECTOR_SHIFT;
400 } else {
401 zone_sectors = bdev_zone_sectors(bdev);
402 }
403
fd463ac4 404 ASSERT(is_power_of_two_u64(zone_sectors));
5b316468 405 zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
53b74fa9
NA
406
407 /* We reject devices with a zone size larger than 8GB */
408 if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) {
409 btrfs_err_in_rcu(fs_info,
410 "zoned: %s: zone size %llu larger than supported maximum %llu",
411 rcu_str_deref(device->name),
412 zone_info->zone_size, BTRFS_MAX_ZONE_SIZE);
413 ret = -EINVAL;
414 goto out;
0a05fafe
JT
415 } else if (zone_info->zone_size < BTRFS_MIN_ZONE_SIZE) {
416 btrfs_err_in_rcu(fs_info,
417 "zoned: %s: zone size %llu smaller than supported minimum %u",
418 rcu_str_deref(device->name),
419 zone_info->zone_size, BTRFS_MIN_ZONE_SIZE);
420 ret = -EINVAL;
421 goto out;
53b74fa9
NA
422 }
423
424 nr_sectors = bdev_nr_sectors(bdev);
5b316468
NA
425 zone_info->zone_size_shift = ilog2(zone_info->zone_size);
426 zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
427 if (!IS_ALIGNED(nr_sectors, zone_sectors))
428 zone_info->nr_zones++;
429
c1e7b244 430 max_active_zones = bdev_max_active_zones(bdev);
ea6f8ddc
NA
431 if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) {
432 btrfs_err_in_rcu(fs_info,
433"zoned: %s: max active zones %u is too small, need at least %u active zones",
434 rcu_str_deref(device->name), max_active_zones,
435 BTRFS_MIN_ACTIVE_ZONES);
436 ret = -EINVAL;
437 goto out;
438 }
439 zone_info->max_active_zones = max_active_zones;
440
5b316468
NA
441 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
442 if (!zone_info->seq_zones) {
443 ret = -ENOMEM;
444 goto out;
445 }
446
447 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
448 if (!zone_info->empty_zones) {
449 ret = -ENOMEM;
450 goto out;
451 }
452
ea6f8ddc
NA
453 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
454 if (!zone_info->active_zones) {
455 ret = -ENOMEM;
456 goto out;
457 }
458
8fe97d47 459 zones = kvcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
5b316468
NA
460 if (!zones) {
461 ret = -ENOMEM;
462 goto out;
463 }
464
16beac87
NA
465 /*
466 * Enable zone cache only for a zoned device. On a non-zoned device, we
467 * fill the zone info with emulated CONVENTIONAL zones, so no need to
468 * use the cache.
469 */
470 if (populate_cache && bdev_is_zoned(device->bdev)) {
07a3bb95
JL
471 zone_info->zone_cache = vcalloc(zone_info->nr_zones,
472 sizeof(struct blk_zone));
16beac87
NA
473 if (!zone_info->zone_cache) {
474 btrfs_err_in_rcu(device->fs_info,
475 "zoned: failed to allocate zone cache for %s",
476 rcu_str_deref(device->name));
477 ret = -ENOMEM;
478 goto out;
479 }
480 }
481
5b316468 482 /* Get zones type */
ea6f8ddc 483 nactive = 0;
5b316468
NA
484 while (sector < nr_sectors) {
485 nr_zones = BTRFS_REPORT_NR_ZONES;
486 ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones,
487 &nr_zones);
488 if (ret)
489 goto out;
490
491 for (i = 0; i < nr_zones; i++) {
492 if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ)
493 __set_bit(nreported, zone_info->seq_zones);
ea6f8ddc
NA
494 switch (zones[i].cond) {
495 case BLK_ZONE_COND_EMPTY:
5b316468 496 __set_bit(nreported, zone_info->empty_zones);
ea6f8ddc
NA
497 break;
498 case BLK_ZONE_COND_IMP_OPEN:
499 case BLK_ZONE_COND_EXP_OPEN:
500 case BLK_ZONE_COND_CLOSED:
501 __set_bit(nreported, zone_info->active_zones);
502 nactive++;
503 break;
504 }
5b316468
NA
505 nreported++;
506 }
507 sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
508 }
509
510 if (nreported != zone_info->nr_zones) {
511 btrfs_err_in_rcu(device->fs_info,
512 "inconsistent number of zones on %s (%u/%u)",
513 rcu_str_deref(device->name), nreported,
514 zone_info->nr_zones);
515 ret = -EIO;
516 goto out;
517 }
518
ea6f8ddc
NA
519 if (max_active_zones) {
520 if (nactive > max_active_zones) {
521 btrfs_err_in_rcu(device->fs_info,
522 "zoned: %u active zones on %s exceeds max_active_zones %u",
523 nactive, rcu_str_deref(device->name),
524 max_active_zones);
525 ret = -EIO;
526 goto out;
527 }
528 atomic_set(&zone_info->active_zones_left,
529 max_active_zones - nactive);
bf1f1fec 530 set_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags);
ea6f8ddc
NA
531 }
532
12659251
NA
533 /* Validate superblock log */
534 nr_zones = BTRFS_NR_SB_LOG_ZONES;
535 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
536 u32 sb_zone;
537 u64 sb_wp;
538 int sb_pos = BTRFS_NR_SB_LOG_ZONES * i;
539
540 sb_zone = sb_zone_number(zone_info->zone_size_shift, i);
541 if (sb_zone + 1 >= zone_info->nr_zones)
542 continue;
543
5b434df8
NA
544 ret = btrfs_get_dev_zones(device,
545 zone_start_physical(sb_zone, zone_info),
12659251
NA
546 &zone_info->sb_zones[sb_pos],
547 &nr_zones);
548 if (ret)
549 goto out;
550
551 if (nr_zones != BTRFS_NR_SB_LOG_ZONES) {
552 btrfs_err_in_rcu(device->fs_info,
553 "zoned: failed to read super block log zone info at devid %llu zone %u",
554 device->devid, sb_zone);
555 ret = -EUCLEAN;
556 goto out;
557 }
558
559 /*
1a9fd417 560 * If zones[0] is conventional, always use the beginning of the
12659251
NA
561 * zone to record superblock. No need to validate in that case.
562 */
563 if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type ==
564 BLK_ZONE_TYPE_CONVENTIONAL)
565 continue;
566
567 ret = sb_write_pointer(device->bdev,
568 &zone_info->sb_zones[sb_pos], &sb_wp);
569 if (ret != -ENOENT && ret) {
570 btrfs_err_in_rcu(device->fs_info,
571 "zoned: super block log zone corrupted devid %llu zone %u",
572 device->devid, sb_zone);
573 ret = -EUCLEAN;
574 goto out;
575 }
576 }
577
578
8fe97d47 579 kvfree(zones);
5b316468 580
3c9daa09
JT
581 switch (bdev_zoned_model(bdev)) {
582 case BLK_ZONED_HM:
583 model = "host-managed zoned";
584 emulated = "";
585 break;
586 case BLK_ZONED_HA:
587 model = "host-aware zoned";
588 emulated = "";
589 break;
590 case BLK_ZONED_NONE:
591 model = "regular";
592 emulated = "emulated ";
593 break;
594 default:
595 /* Just in case */
596 btrfs_err_in_rcu(fs_info, "zoned: unsupported model %d on %s",
597 bdev_zoned_model(bdev),
598 rcu_str_deref(device->name));
599 ret = -EOPNOTSUPP;
600 goto out_free_zone_info;
601 }
602
603 btrfs_info_in_rcu(fs_info,
604 "%s block device %s, %u %szones of %llu bytes",
605 model, rcu_str_deref(device->name), zone_info->nr_zones,
606 emulated, zone_info->zone_size);
5b316468
NA
607
608 return 0;
609
610out:
8fe97d47 611 kvfree(zones);
3c9daa09 612out_free_zone_info:
16beac87 613 btrfs_destroy_dev_zone_info(device);
5b316468
NA
614
615 return ret;
616}
617
618void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
619{
620 struct btrfs_zoned_device_info *zone_info = device->zone_info;
621
622 if (!zone_info)
623 return;
624
ea6f8ddc 625 bitmap_free(zone_info->active_zones);
5b316468
NA
626 bitmap_free(zone_info->seq_zones);
627 bitmap_free(zone_info->empty_zones);
16beac87 628 vfree(zone_info->zone_cache);
5b316468
NA
629 kfree(zone_info);
630 device->zone_info = NULL;
631}
632
21e61ec6
JT
633struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(struct btrfs_device *orig_dev)
634{
635 struct btrfs_zoned_device_info *zone_info;
636
637 zone_info = kmemdup(orig_dev->zone_info, sizeof(*zone_info), GFP_KERNEL);
638 if (!zone_info)
639 return NULL;
640
641 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
642 if (!zone_info->seq_zones)
643 goto out;
644
645 bitmap_copy(zone_info->seq_zones, orig_dev->zone_info->seq_zones,
646 zone_info->nr_zones);
647
648 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
649 if (!zone_info->empty_zones)
650 goto out;
651
652 bitmap_copy(zone_info->empty_zones, orig_dev->zone_info->empty_zones,
653 zone_info->nr_zones);
654
655 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
656 if (!zone_info->active_zones)
657 goto out;
658
659 bitmap_copy(zone_info->active_zones, orig_dev->zone_info->active_zones,
660 zone_info->nr_zones);
661 zone_info->zone_cache = NULL;
662
663 return zone_info;
664
665out:
666 bitmap_free(zone_info->seq_zones);
667 bitmap_free(zone_info->empty_zones);
668 bitmap_free(zone_info->active_zones);
669 kfree(zone_info);
670 return NULL;
671}
672
5b316468
NA
673int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
674 struct blk_zone *zone)
675{
676 unsigned int nr_zones = 1;
677 int ret;
678
679 ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones);
680 if (ret != 0 || !nr_zones)
681 return ret ? ret : -EIO;
682
683 return 0;
684}
b70f5097 685
650c8a9c
CH
686static int btrfs_check_for_zoned_device(struct btrfs_fs_info *fs_info)
687{
688 struct btrfs_device *device;
689
690 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
691 if (device->bdev &&
692 bdev_zoned_model(device->bdev) == BLK_ZONED_HM) {
693 btrfs_err(fs_info,
694 "zoned: mode not enabled but zoned device found: %pg",
695 device->bdev);
696 return -EINVAL;
697 }
698 }
699
700 return 0;
701}
702
b70f5097
NA
703int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
704{
243cf8d1 705 struct queue_limits *lim = &fs_info->limits;
b70f5097 706 struct btrfs_device *device;
b70f5097 707 u64 zone_size = 0;
650c8a9c 708 int ret;
b70f5097 709
650c8a9c
CH
710 /*
711 * Host-Managed devices can't be used without the ZONED flag. With the
712 * ZONED all devices can be used, using zone emulation if required.
713 */
714 if (!btrfs_fs_incompat(fs_info, ZONED))
715 return btrfs_check_for_zoned_device(fs_info);
716
243cf8d1
CH
717 blk_set_stacking_limits(lim);
718
650c8a9c
CH
719 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
720 struct btrfs_zoned_device_info *zone_info = device->zone_info;
b70f5097
NA
721
722 if (!device->bdev)
723 continue;
724
650c8a9c
CH
725 if (!zone_size) {
726 zone_size = zone_info->zone_size;
727 } else if (zone_info->zone_size != zone_size) {
728 btrfs_err(fs_info,
b70f5097 729 "zoned: unequal block device zone sizes: have %llu found %llu",
650c8a9c
CH
730 zone_info->zone_size, zone_size);
731 return -EINVAL;
b70f5097 732 }
243cf8d1
CH
733
734 /*
735 * With the zoned emulation, we can have non-zoned device on the
736 * zoned mode. In this case, we don't have a valid max zone
737 * append size.
738 */
739 if (bdev_is_zoned(device->bdev)) {
740 blk_stack_limits(lim,
741 &bdev_get_queue(device->bdev)->limits,
742 0);
743 }
b70f5097
NA
744 }
745
746 /*
747 * stripe_size is always aligned to BTRFS_STRIPE_LEN in
f6f39f7a 748 * btrfs_create_chunk(). Since we want stripe_len == zone_size,
b70f5097
NA
749 * check the alignment here.
750 */
751 if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) {
752 btrfs_err(fs_info,
753 "zoned: zone size %llu not aligned to stripe %u",
754 zone_size, BTRFS_STRIPE_LEN);
650c8a9c 755 return -EINVAL;
b70f5097
NA
756 }
757
a589dde0
NA
758 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
759 btrfs_err(fs_info, "zoned: mixed block groups not supported");
650c8a9c 760 return -EINVAL;
a589dde0
NA
761 }
762
b70f5097 763 fs_info->zone_size = zone_size;
243cf8d1
CH
764 /*
765 * Also limit max_zone_append_size by max_segments * PAGE_SIZE.
766 * Technically, we can have multiple pages per segment. But, since
767 * we add the pages one by one to a bio, and cannot increase the
768 * metadata reservation even if it increases the number of extents, it
769 * is safe to stick with the limit.
770 */
771 fs_info->max_zone_append_size = ALIGN_DOWN(
772 min3((u64)lim->max_zone_append_sectors << SECTOR_SHIFT,
773 (u64)lim->max_sectors << SECTOR_SHIFT,
774 (u64)lim->max_segments << PAGE_SHIFT),
775 fs_info->sectorsize);
1cd6121f 776 fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
f7b12a62
NA
777 if (fs_info->max_zone_append_size < fs_info->max_extent_size)
778 fs_info->max_extent_size = fs_info->max_zone_append_size;
b70f5097 779
b53429ba
JT
780 /*
781 * Check mount options here, because we might change fs_info->zoned
782 * from fs_info->zone_size.
783 */
eddb1a43 784 ret = btrfs_check_mountopts_zoned(fs_info, &fs_info->mount_opt);
b53429ba 785 if (ret)
650c8a9c 786 return ret;
b53429ba 787
b70f5097 788 btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size);
650c8a9c 789 return 0;
b70f5097 790}
5d1ab66c 791
eddb1a43 792int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info, unsigned long *mount_opt)
5d1ab66c
NA
793{
794 if (!btrfs_is_zoned(info))
795 return 0;
796
797 /*
798 * Space cache writing is not COWed. Disable that to avoid write errors
799 * in sequential zones.
800 */
eddb1a43 801 if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) {
5d1ab66c
NA
802 btrfs_err(info, "zoned: space cache v1 is not supported");
803 return -EINVAL;
804 }
805
eddb1a43 806 if (btrfs_raw_test_opt(*mount_opt, NODATACOW)) {
d206e9c9
NA
807 btrfs_err(info, "zoned: NODATACOW not supported");
808 return -EINVAL;
809 }
810
eddb1a43
JB
811 if (btrfs_raw_test_opt(*mount_opt, DISCARD_ASYNC)) {
812 btrfs_info(info,
813 "zoned: async discard ignored and disabled for zoned mode");
814 btrfs_clear_opt(*mount_opt, DISCARD_ASYNC);
815 }
95ca6599 816
5d1ab66c
NA
817 return 0;
818}
12659251
NA
819
820static int sb_log_location(struct block_device *bdev, struct blk_zone *zones,
821 int rw, u64 *bytenr_ret)
822{
823 u64 wp;
824 int ret;
825
826 if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) {
827 *bytenr_ret = zones[0].start << SECTOR_SHIFT;
828 return 0;
829 }
830
831 ret = sb_write_pointer(bdev, zones, &wp);
832 if (ret != -ENOENT && ret < 0)
833 return ret;
834
835 if (rw == WRITE) {
836 struct blk_zone *reset = NULL;
837
838 if (wp == zones[0].start << SECTOR_SHIFT)
839 reset = &zones[0];
840 else if (wp == zones[1].start << SECTOR_SHIFT)
841 reset = &zones[1];
842
843 if (reset && reset->cond != BLK_ZONE_COND_EMPTY) {
5daaf552 844 ASSERT(sb_zone_is_full(reset));
12659251
NA
845
846 ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
847 reset->start, reset->len,
848 GFP_NOFS);
849 if (ret)
850 return ret;
851
852 reset->cond = BLK_ZONE_COND_EMPTY;
853 reset->wp = reset->start;
854 }
855 } else if (ret != -ENOENT) {
9658b72e
NA
856 /*
857 * For READ, we want the previous one. Move write pointer to
858 * the end of a zone, if it is at the head of a zone.
859 */
860 u64 zone_end = 0;
861
12659251 862 if (wp == zones[0].start << SECTOR_SHIFT)
9658b72e
NA
863 zone_end = zones[1].start + zones[1].capacity;
864 else if (wp == zones[1].start << SECTOR_SHIFT)
865 zone_end = zones[0].start + zones[0].capacity;
866 if (zone_end)
867 wp = ALIGN_DOWN(zone_end << SECTOR_SHIFT,
868 BTRFS_SUPER_INFO_SIZE);
869
12659251
NA
870 wp -= BTRFS_SUPER_INFO_SIZE;
871 }
872
873 *bytenr_ret = wp;
874 return 0;
875
876}
877
878int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
879 u64 *bytenr_ret)
880{
881 struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
d734492a 882 sector_t zone_sectors;
12659251
NA
883 u32 sb_zone;
884 int ret;
12659251
NA
885 u8 zone_sectors_shift;
886 sector_t nr_sectors;
887 u32 nr_zones;
888
889 if (!bdev_is_zoned(bdev)) {
890 *bytenr_ret = btrfs_sb_offset(mirror);
891 return 0;
892 }
893
894 ASSERT(rw == READ || rw == WRITE);
895
896 zone_sectors = bdev_zone_sectors(bdev);
897 if (!is_power_of_2(zone_sectors))
898 return -EINVAL;
12659251 899 zone_sectors_shift = ilog2(zone_sectors);
ac7ac461 900 nr_sectors = bdev_nr_sectors(bdev);
12659251
NA
901 nr_zones = nr_sectors >> zone_sectors_shift;
902
903 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
904 if (sb_zone + 1 >= nr_zones)
905 return -ENOENT;
906
5b434df8 907 ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev),
12659251
NA
908 BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
909 zones);
910 if (ret < 0)
911 return ret;
912 if (ret != BTRFS_NR_SB_LOG_ZONES)
913 return -EIO;
914
915 return sb_log_location(bdev, zones, rw, bytenr_ret);
916}
917
918int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
919 u64 *bytenr_ret)
920{
921 struct btrfs_zoned_device_info *zinfo = device->zone_info;
922 u32 zone_num;
923
d6639b35
NA
924 /*
925 * For a zoned filesystem on a non-zoned block device, use the same
926 * super block locations as regular filesystem. Doing so, the super
927 * block can always be retrieved and the zoned flag of the volume
928 * detected from the super block information.
929 */
930 if (!bdev_is_zoned(device->bdev)) {
12659251
NA
931 *bytenr_ret = btrfs_sb_offset(mirror);
932 return 0;
933 }
934
935 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
936 if (zone_num + 1 >= zinfo->nr_zones)
937 return -ENOENT;
938
939 return sb_log_location(device->bdev,
940 &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror],
941 rw, bytenr_ret);
942}
943
944static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo,
945 int mirror)
946{
947 u32 zone_num;
948
949 if (!zinfo)
950 return false;
951
952 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
953 if (zone_num + 1 >= zinfo->nr_zones)
954 return false;
955
956 if (!test_bit(zone_num, zinfo->seq_zones))
957 return false;
958
959 return true;
960}
961
8376d9e1 962int btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
12659251
NA
963{
964 struct btrfs_zoned_device_info *zinfo = device->zone_info;
965 struct blk_zone *zone;
8376d9e1 966 int i;
12659251
NA
967
968 if (!is_sb_log_zone(zinfo, mirror))
8376d9e1 969 return 0;
12659251
NA
970
971 zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror];
8376d9e1
NA
972 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
973 /* Advance the next zone */
974 if (zone->cond == BLK_ZONE_COND_FULL) {
975 zone++;
976 continue;
977 }
978
12659251
NA
979 if (zone->cond == BLK_ZONE_COND_EMPTY)
980 zone->cond = BLK_ZONE_COND_IMP_OPEN;
981
8376d9e1
NA
982 zone->wp += SUPER_INFO_SECTORS;
983
984 if (sb_zone_is_full(zone)) {
985 /*
986 * No room left to write new superblock. Since
987 * superblock is written with REQ_SYNC, it is safe to
988 * finish the zone now.
989 *
990 * If the write pointer is exactly at the capacity,
991 * explicit ZONE_FINISH is not necessary.
992 */
993 if (zone->wp != zone->start + zone->capacity) {
994 int ret;
995
996 ret = blkdev_zone_mgmt(device->bdev,
997 REQ_OP_ZONE_FINISH, zone->start,
998 zone->len, GFP_NOFS);
999 if (ret)
1000 return ret;
1001 }
12659251 1002
8376d9e1 1003 zone->wp = zone->start + zone->len;
12659251 1004 zone->cond = BLK_ZONE_COND_FULL;
8376d9e1
NA
1005 }
1006 return 0;
12659251
NA
1007 }
1008
8376d9e1
NA
1009 /* All the zones are FULL. Should not reach here. */
1010 ASSERT(0);
1011 return -EIO;
12659251
NA
1012}
1013
1014int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
1015{
1016 sector_t zone_sectors;
1017 sector_t nr_sectors;
1018 u8 zone_sectors_shift;
1019 u32 sb_zone;
1020 u32 nr_zones;
1021
1022 zone_sectors = bdev_zone_sectors(bdev);
1023 zone_sectors_shift = ilog2(zone_sectors);
ac7ac461 1024 nr_sectors = bdev_nr_sectors(bdev);
12659251
NA
1025 nr_zones = nr_sectors >> zone_sectors_shift;
1026
1027 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
1028 if (sb_zone + 1 >= nr_zones)
1029 return -ENOENT;
1030
1031 return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
5b434df8 1032 zone_start_sector(sb_zone, bdev),
12659251
NA
1033 zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
1034}
1cd6121f 1035
43dd529a
DS
1036/*
1037 * Find allocatable zones within a given region.
1cd6121f
NA
1038 *
1039 * @device: the device to allocate a region on
1040 * @hole_start: the position of the hole to allocate the region
1041 * @num_bytes: size of wanted region
1042 * @hole_end: the end of the hole
1043 * @return: position of allocatable zones
1044 *
1045 * Allocatable region should not contain any superblock locations.
1046 */
1047u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
1048 u64 hole_end, u64 num_bytes)
1049{
1050 struct btrfs_zoned_device_info *zinfo = device->zone_info;
1051 const u8 shift = zinfo->zone_size_shift;
1052 u64 nzones = num_bytes >> shift;
1053 u64 pos = hole_start;
1054 u64 begin, end;
1055 bool have_sb;
1056 int i;
1057
1058 ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size));
1059 ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size));
1060
1061 while (pos < hole_end) {
1062 begin = pos >> shift;
1063 end = begin + nzones;
1064
1065 if (end > zinfo->nr_zones)
1066 return hole_end;
1067
1068 /* Check if zones in the region are all empty */
1069 if (btrfs_dev_is_sequential(device, pos) &&
b5345d6c 1070 !bitmap_test_range_all_set(zinfo->empty_zones, begin, nzones)) {
1cd6121f
NA
1071 pos += zinfo->zone_size;
1072 continue;
1073 }
1074
1075 have_sb = false;
1076 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1077 u32 sb_zone;
1078 u64 sb_pos;
1079
1080 sb_zone = sb_zone_number(shift, i);
1081 if (!(end <= sb_zone ||
1082 sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
1083 have_sb = true;
5b434df8
NA
1084 pos = zone_start_physical(
1085 sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo);
1cd6121f
NA
1086 break;
1087 }
1088
1089 /* We also need to exclude regular superblock positions */
1090 sb_pos = btrfs_sb_offset(i);
1091 if (!(pos + num_bytes <= sb_pos ||
1092 sb_pos + BTRFS_SUPER_INFO_SIZE <= pos)) {
1093 have_sb = true;
1094 pos = ALIGN(sb_pos + BTRFS_SUPER_INFO_SIZE,
1095 zinfo->zone_size);
1096 break;
1097 }
1098 }
1099 if (!have_sb)
1100 break;
1101 }
1102
1103 return pos;
1104}
1105
afba2bc0
NA
1106static bool btrfs_dev_set_active_zone(struct btrfs_device *device, u64 pos)
1107{
1108 struct btrfs_zoned_device_info *zone_info = device->zone_info;
1109 unsigned int zno = (pos >> zone_info->zone_size_shift);
1110
1111 /* We can use any number of zones */
1112 if (zone_info->max_active_zones == 0)
1113 return true;
1114
1115 if (!test_bit(zno, zone_info->active_zones)) {
1116 /* Active zone left? */
1117 if (atomic_dec_if_positive(&zone_info->active_zones_left) < 0)
1118 return false;
1119 if (test_and_set_bit(zno, zone_info->active_zones)) {
1120 /* Someone already set the bit */
1121 atomic_inc(&zone_info->active_zones_left);
1122 }
1123 }
1124
1125 return true;
1126}
1127
1128static void btrfs_dev_clear_active_zone(struct btrfs_device *device, u64 pos)
1129{
1130 struct btrfs_zoned_device_info *zone_info = device->zone_info;
1131 unsigned int zno = (pos >> zone_info->zone_size_shift);
1132
1133 /* We can use any number of zones */
1134 if (zone_info->max_active_zones == 0)
1135 return;
1136
1137 if (test_and_clear_bit(zno, zone_info->active_zones))
1138 atomic_inc(&zone_info->active_zones_left);
1139}
1140
1cd6121f
NA
1141int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
1142 u64 length, u64 *bytes)
1143{
1144 int ret;
1145
1146 *bytes = 0;
1147 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET,
1148 physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT,
1149 GFP_NOFS);
1150 if (ret)
1151 return ret;
1152
1153 *bytes = length;
1154 while (length) {
1155 btrfs_dev_set_zone_empty(device, physical);
afba2bc0 1156 btrfs_dev_clear_active_zone(device, physical);
1cd6121f
NA
1157 physical += device->zone_info->zone_size;
1158 length -= device->zone_info->zone_size;
1159 }
1160
1161 return 0;
1162}
1163
1164int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
1165{
1166 struct btrfs_zoned_device_info *zinfo = device->zone_info;
1167 const u8 shift = zinfo->zone_size_shift;
1168 unsigned long begin = start >> shift;
b5345d6c 1169 unsigned long nbits = size >> shift;
1cd6121f
NA
1170 u64 pos;
1171 int ret;
1172
1173 ASSERT(IS_ALIGNED(start, zinfo->zone_size));
1174 ASSERT(IS_ALIGNED(size, zinfo->zone_size));
1175
b5345d6c 1176 if (begin + nbits > zinfo->nr_zones)
1cd6121f
NA
1177 return -ERANGE;
1178
1179 /* All the zones are conventional */
b5345d6c 1180 if (bitmap_test_range_all_zero(zinfo->seq_zones, begin, nbits))
1cd6121f
NA
1181 return 0;
1182
1183 /* All the zones are sequential and empty */
b5345d6c
NA
1184 if (bitmap_test_range_all_set(zinfo->seq_zones, begin, nbits) &&
1185 bitmap_test_range_all_set(zinfo->empty_zones, begin, nbits))
1cd6121f
NA
1186 return 0;
1187
1188 for (pos = start; pos < start + size; pos += zinfo->zone_size) {
1189 u64 reset_bytes;
1190
1191 if (!btrfs_dev_is_sequential(device, pos) ||
1192 btrfs_dev_is_empty_zone(device, pos))
1193 continue;
1194
1195 /* Free regions should be empty */
1196 btrfs_warn_in_rcu(
1197 device->fs_info,
1198 "zoned: resetting device %s (devid %llu) zone %llu for allocation",
1199 rcu_str_deref(device->name), device->devid, pos >> shift);
1200 WARN_ON_ONCE(1);
1201
1202 ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size,
1203 &reset_bytes);
1204 if (ret)
1205 return ret;
1206 }
1207
1208 return 0;
1209}
08e11a3d 1210
a94794d5
NA
1211/*
1212 * Calculate an allocation pointer from the extent allocation information
1213 * for a block group consist of conventional zones. It is pointed to the
1214 * end of the highest addressed extent in the block group as an allocation
1215 * offset.
1216 */
1217static int calculate_alloc_pointer(struct btrfs_block_group *cache,
6ca64ac2 1218 u64 *offset_ret, bool new)
a94794d5
NA
1219{
1220 struct btrfs_fs_info *fs_info = cache->fs_info;
29cbcf40 1221 struct btrfs_root *root;
a94794d5
NA
1222 struct btrfs_path *path;
1223 struct btrfs_key key;
1224 struct btrfs_key found_key;
1225 int ret;
1226 u64 length;
1227
6ca64ac2
JT
1228 /*
1229 * Avoid tree lookups for a new block group, there's no use for it.
1230 * It must always be 0.
1231 *
1232 * Also, we have a lock chain of extent buffer lock -> chunk mutex.
1233 * For new a block group, this function is called from
1234 * btrfs_make_block_group() which is already taking the chunk mutex.
1235 * Thus, we cannot call calculate_alloc_pointer() which takes extent
1236 * buffer locks to avoid deadlock.
1237 */
1238 if (new) {
1239 *offset_ret = 0;
1240 return 0;
1241 }
1242
a94794d5
NA
1243 path = btrfs_alloc_path();
1244 if (!path)
1245 return -ENOMEM;
1246
1247 key.objectid = cache->start + cache->length;
1248 key.type = 0;
1249 key.offset = 0;
1250
29cbcf40 1251 root = btrfs_extent_root(fs_info, key.objectid);
a94794d5
NA
1252 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1253 /* We should not find the exact match */
1254 if (!ret)
1255 ret = -EUCLEAN;
1256 if (ret < 0)
1257 goto out;
1258
1259 ret = btrfs_previous_extent_item(root, path, cache->start);
1260 if (ret) {
1261 if (ret == 1) {
1262 ret = 0;
1263 *offset_ret = 0;
1264 }
1265 goto out;
1266 }
1267
1268 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
1269
1270 if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
1271 length = found_key.offset;
1272 else
1273 length = fs_info->nodesize;
1274
1275 if (!(found_key.objectid >= cache->start &&
1276 found_key.objectid + length <= cache->start + cache->length)) {
1277 ret = -EUCLEAN;
1278 goto out;
1279 }
1280 *offset_ret = found_key.objectid + length - cache->start;
1281 ret = 0;
1282
1283out:
1284 btrfs_free_path(path);
1285 return ret;
1286}
1287
15c12fcc
CH
1288struct zone_info {
1289 u64 physical;
1290 u64 capacity;
1291 u64 alloc_offset;
1292};
1293
09a46725
CH
1294static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
1295 struct zone_info *info, unsigned long *active,
7dc66abb 1296 struct btrfs_chunk_map *map)
09a46725
CH
1297{
1298 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1299 struct btrfs_device *device = map->stripes[zone_idx].dev;
1300 int dev_replace_is_ongoing = 0;
1301 unsigned int nofs_flag;
1302 struct blk_zone zone;
1303 int ret;
1304
1305 info->physical = map->stripes[zone_idx].physical;
1306
1307 if (!device->bdev) {
1308 info->alloc_offset = WP_MISSING_DEV;
1309 return 0;
1310 }
1311
1312 /* Consider a zone as active if we can allow any number of active zones. */
1313 if (!device->zone_info->max_active_zones)
1314 __set_bit(zone_idx, active);
1315
1316 if (!btrfs_dev_is_sequential(device, info->physical)) {
1317 info->alloc_offset = WP_CONVENTIONAL;
1318 return 0;
1319 }
1320
1321 /* This zone will be used for allocation, so mark this zone non-empty. */
1322 btrfs_dev_clear_zone_empty(device, info->physical);
1323
1324 down_read(&dev_replace->rwsem);
1325 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
1326 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
1327 btrfs_dev_clear_zone_empty(dev_replace->tgtdev, info->physical);
1328 up_read(&dev_replace->rwsem);
1329
1330 /*
1331 * The group is mapped to a sequential zone. Get the zone write pointer
1332 * to determine the allocation offset within the zone.
1333 */
1334 WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));
1335 nofs_flag = memalloc_nofs_save();
1336 ret = btrfs_get_dev_zone(device, info->physical, &zone);
1337 memalloc_nofs_restore(nofs_flag);
1338 if (ret) {
1339 if (ret != -EIO && ret != -EOPNOTSUPP)
1340 return ret;
1341 info->alloc_offset = WP_MISSING_DEV;
1342 return 0;
1343 }
1344
1345 if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
1346 btrfs_err_in_rcu(fs_info,
1347 "zoned: unexpected conventional zone %llu on device %s (devid %llu)",
1348 zone.start << SECTOR_SHIFT, rcu_str_deref(device->name),
1349 device->devid);
1350 return -EIO;
1351 }
1352
1353 info->capacity = (zone.capacity << SECTOR_SHIFT);
1354
1355 switch (zone.cond) {
1356 case BLK_ZONE_COND_OFFLINE:
1357 case BLK_ZONE_COND_READONLY:
1358 btrfs_err(fs_info,
1359 "zoned: offline/readonly zone %llu on device %s (devid %llu)",
1360 (info->physical >> device->zone_info->zone_size_shift),
1361 rcu_str_deref(device->name), device->devid);
1362 info->alloc_offset = WP_MISSING_DEV;
1363 break;
1364 case BLK_ZONE_COND_EMPTY:
1365 info->alloc_offset = 0;
1366 break;
1367 case BLK_ZONE_COND_FULL:
1368 info->alloc_offset = info->capacity;
1369 break;
1370 default:
1371 /* Partially used zone. */
1372 info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT);
1373 __set_bit(zone_idx, active);
1374 break;
1375 }
1376
1377 return 0;
1378}
1379
9e0e3e74
CH
1380static int btrfs_load_block_group_single(struct btrfs_block_group *bg,
1381 struct zone_info *info,
1382 unsigned long *active)
1383{
1384 if (info->alloc_offset == WP_MISSING_DEV) {
1385 btrfs_err(bg->fs_info,
1386 "zoned: cannot recover write pointer for zone %llu",
1387 info->physical);
1388 return -EIO;
1389 }
1390
1391 bg->alloc_offset = info->alloc_offset;
1392 bg->zone_capacity = info->capacity;
1393 if (test_bit(0, active))
1394 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1395 return 0;
1396}
1397
87463f7e 1398static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
7dc66abb 1399 struct btrfs_chunk_map *map,
87463f7e
CH
1400 struct zone_info *zone_info,
1401 unsigned long *active)
1402{
568220fa
JT
1403 struct btrfs_fs_info *fs_info = bg->fs_info;
1404
1405 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1406 btrfs_err(fs_info, "zoned: data DUP profile needs raid-stripe-tree");
87463f7e
CH
1407 return -EINVAL;
1408 }
1409
1410 if (zone_info[0].alloc_offset == WP_MISSING_DEV) {
1411 btrfs_err(bg->fs_info,
1412 "zoned: cannot recover write pointer for zone %llu",
1413 zone_info[0].physical);
1414 return -EIO;
1415 }
1416 if (zone_info[1].alloc_offset == WP_MISSING_DEV) {
1417 btrfs_err(bg->fs_info,
1418 "zoned: cannot recover write pointer for zone %llu",
1419 zone_info[1].physical);
1420 return -EIO;
1421 }
1422 if (zone_info[0].alloc_offset != zone_info[1].alloc_offset) {
1423 btrfs_err(bg->fs_info,
1424 "zoned: write pointer offset mismatch of zones in DUP profile");
1425 return -EIO;
1426 }
1427
1428 if (test_bit(0, active) != test_bit(1, active)) {
1429 if (!btrfs_zone_activate(bg))
1430 return -EIO;
1431 } else if (test_bit(0, active)) {
1432 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1433 }
1434
1435 bg->alloc_offset = zone_info[0].alloc_offset;
1436 bg->zone_capacity = min(zone_info[0].capacity, zone_info[1].capacity);
1437 return 0;
1438}
1439
568220fa 1440static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
7dc66abb 1441 struct btrfs_chunk_map *map,
568220fa
JT
1442 struct zone_info *zone_info,
1443 unsigned long *active)
1444{
1445 struct btrfs_fs_info *fs_info = bg->fs_info;
1446 int i;
1447
1448 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1449 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1450 btrfs_bg_type_to_raid_name(map->type));
1451 return -EINVAL;
1452 }
1453
1454 for (i = 0; i < map->num_stripes; i++) {
1455 if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
1456 zone_info[i].alloc_offset == WP_CONVENTIONAL)
1457 continue;
1458
1459 if ((zone_info[0].alloc_offset != zone_info[i].alloc_offset) &&
1460 !btrfs_test_opt(fs_info, DEGRADED)) {
1461 btrfs_err(fs_info,
1462 "zoned: write pointer offset mismatch of zones in %s profile",
1463 btrfs_bg_type_to_raid_name(map->type));
1464 return -EIO;
1465 }
1466 if (test_bit(0, active) != test_bit(i, active)) {
1467 if (!btrfs_test_opt(fs_info, DEGRADED) &&
1468 !btrfs_zone_activate(bg)) {
1469 return -EIO;
1470 }
1471 } else {
1472 if (test_bit(0, active))
1473 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1474 }
1475 /* In case a device is missing we have a cap of 0, so don't use it. */
1476 bg->zone_capacity = min_not_zero(zone_info[0].capacity,
1477 zone_info[1].capacity);
1478 }
1479
1480 if (zone_info[0].alloc_offset != WP_MISSING_DEV)
1481 bg->alloc_offset = zone_info[0].alloc_offset;
1482 else
1483 bg->alloc_offset = zone_info[i - 1].alloc_offset;
1484
1485 return 0;
1486}
1487
1488static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
7dc66abb 1489 struct btrfs_chunk_map *map,
568220fa
JT
1490 struct zone_info *zone_info,
1491 unsigned long *active)
1492{
1493 struct btrfs_fs_info *fs_info = bg->fs_info;
1494
1495 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1496 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1497 btrfs_bg_type_to_raid_name(map->type));
1498 return -EINVAL;
1499 }
1500
1501 for (int i = 0; i < map->num_stripes; i++) {
1502 if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
1503 zone_info[i].alloc_offset == WP_CONVENTIONAL)
1504 continue;
1505
1506 if (test_bit(0, active) != test_bit(i, active)) {
1507 if (!btrfs_zone_activate(bg))
1508 return -EIO;
1509 } else {
1510 if (test_bit(0, active))
1511 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1512 }
1513 bg->zone_capacity += zone_info[i].capacity;
1514 bg->alloc_offset += zone_info[i].alloc_offset;
1515 }
1516
1517 return 0;
1518}
1519
1520static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
7dc66abb 1521 struct btrfs_chunk_map *map,
568220fa
JT
1522 struct zone_info *zone_info,
1523 unsigned long *active)
1524{
1525 struct btrfs_fs_info *fs_info = bg->fs_info;
1526
1527 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1528 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1529 btrfs_bg_type_to_raid_name(map->type));
1530 return -EINVAL;
1531 }
1532
1533 for (int i = 0; i < map->num_stripes; i++) {
1534 if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
1535 zone_info[i].alloc_offset == WP_CONVENTIONAL)
1536 continue;
1537
1538 if (test_bit(0, active) != test_bit(i, active)) {
1539 if (!btrfs_zone_activate(bg))
1540 return -EIO;
1541 } else {
1542 if (test_bit(0, active))
1543 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1544 }
1545
1546 if ((i % map->sub_stripes) == 0) {
1547 bg->zone_capacity += zone_info[i].capacity;
1548 bg->alloc_offset += zone_info[i].alloc_offset;
1549 }
1550 }
1551
1552 return 0;
1553}
1554
a94794d5 1555int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
08e11a3d
NA
1556{
1557 struct btrfs_fs_info *fs_info = cache->fs_info;
7dc66abb 1558 struct btrfs_chunk_map *map;
08e11a3d
NA
1559 u64 logical = cache->start;
1560 u64 length = cache->length;
15c12fcc 1561 struct zone_info *zone_info = NULL;
08e11a3d
NA
1562 int ret;
1563 int i;
68a384b5 1564 unsigned long *active = NULL;
a94794d5 1565 u64 last_alloc = 0;
08e11a3d
NA
1566 u32 num_sequential = 0, num_conventional = 0;
1567
1568 if (!btrfs_is_zoned(fs_info))
1569 return 0;
1570
1571 /* Sanity check */
1572 if (!IS_ALIGNED(length, fs_info->zone_size)) {
1573 btrfs_err(fs_info,
1574 "zoned: block group %llu len %llu unaligned to zone size %llu",
1575 logical, length, fs_info->zone_size);
1576 return -EIO;
1577 }
1578
7dc66abb
FM
1579 map = btrfs_find_chunk_map(fs_info, logical, length);
1580 if (!map)
08e11a3d
NA
1581 return -EINVAL;
1582
7dc66abb 1583 cache->physical_map = btrfs_clone_chunk_map(map, GFP_NOFS);
dafc340d
NA
1584 if (!cache->physical_map) {
1585 ret = -ENOMEM;
1586 goto out;
1587 }
1588
15c12fcc
CH
1589 zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS);
1590 if (!zone_info) {
dbfcc18f
JT
1591 ret = -ENOMEM;
1592 goto out;
1593 }
1594
68a384b5
NA
1595 active = bitmap_zalloc(map->num_stripes, GFP_NOFS);
1596 if (!active) {
1597 ret = -ENOMEM;
1598 goto out;
1599 }
1600
08e11a3d 1601 for (i = 0; i < map->num_stripes; i++) {
09a46725
CH
1602 ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map);
1603 if (ret)
784daf2b 1604 goto out;
8eae532b 1605
09a46725
CH
1606 if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
1607 num_conventional++;
1608 else
1609 num_sequential++;
08e11a3d
NA
1610 }
1611
08f45559 1612 if (num_sequential > 0)
961f5b8b 1613 set_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
08f45559 1614
08e11a3d 1615 if (num_conventional > 0) {
8eae532b
NA
1616 /* Zone capacity is always zone size in emulation */
1617 cache->zone_capacity = cache->length;
6ca64ac2
JT
1618 ret = calculate_alloc_pointer(cache, &last_alloc, new);
1619 if (ret) {
1620 btrfs_err(fs_info,
a94794d5 1621 "zoned: failed to determine allocation offset of bg %llu",
6ca64ac2
JT
1622 cache->start);
1623 goto out;
1624 } else if (map->num_stripes == num_conventional) {
1625 cache->alloc_offset = last_alloc;
3349b57f 1626 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
a94794d5
NA
1627 goto out;
1628 }
08e11a3d
NA
1629 }
1630
1631 switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
1632 case 0: /* single */
9e0e3e74 1633 ret = btrfs_load_block_group_single(cache, &zone_info[0], active);
08e11a3d
NA
1634 break;
1635 case BTRFS_BLOCK_GROUP_DUP:
87463f7e 1636 ret = btrfs_load_block_group_dup(cache, map, zone_info, active);
265f7237 1637 break;
08e11a3d 1638 case BTRFS_BLOCK_GROUP_RAID1:
568220fa
JT
1639 case BTRFS_BLOCK_GROUP_RAID1C3:
1640 case BTRFS_BLOCK_GROUP_RAID1C4:
1641 ret = btrfs_load_block_group_raid1(cache, map, zone_info, active);
1642 break;
08e11a3d 1643 case BTRFS_BLOCK_GROUP_RAID0:
568220fa
JT
1644 ret = btrfs_load_block_group_raid0(cache, map, zone_info, active);
1645 break;
08e11a3d 1646 case BTRFS_BLOCK_GROUP_RAID10:
568220fa
JT
1647 ret = btrfs_load_block_group_raid10(cache, map, zone_info, active);
1648 break;
08e11a3d
NA
1649 case BTRFS_BLOCK_GROUP_RAID5:
1650 case BTRFS_BLOCK_GROUP_RAID6:
08e11a3d
NA
1651 default:
1652 btrfs_err(fs_info, "zoned: profile %s not yet supported",
1653 btrfs_bg_type_to_raid_name(map->type));
1654 ret = -EINVAL;
1655 goto out;
1656 }
1657
1658out:
8eae532b
NA
1659 if (cache->alloc_offset > cache->zone_capacity) {
1660 btrfs_err(fs_info,
1661"zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",
1662 cache->alloc_offset, cache->zone_capacity,
1663 cache->start);
1664 ret = -EIO;
1665 }
1666
a94794d5
NA
1667 /* An extent is allocated after the write pointer */
1668 if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
1669 btrfs_err(fs_info,
1670 "zoned: got wrong write pointer in BG %llu: %llu > %llu",
1671 logical, last_alloc, cache->alloc_offset);
1672 ret = -EIO;
1673 }
1674
6ca64ac2 1675 if (!ret) {
0bc09ca1 1676 cache->meta_write_pointer = cache->alloc_offset + cache->start;
3349b57f 1677 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags)) {
6ca64ac2
JT
1678 btrfs_get_block_group(cache);
1679 spin_lock(&fs_info->zone_active_bgs_lock);
1680 list_add_tail(&cache->active_bg_list,
1681 &fs_info->zone_active_bgs);
1682 spin_unlock(&fs_info->zone_active_bgs_lock);
1683 }
1684 } else {
7dc66abb 1685 btrfs_free_chunk_map(cache->physical_map);
dafc340d
NA
1686 cache->physical_map = NULL;
1687 }
68a384b5 1688 bitmap_free(active);
15c12fcc 1689 kfree(zone_info);
08e11a3d
NA
1690
1691 return ret;
1692}
169e0da9
NA
1693
1694void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
1695{
1696 u64 unusable, free;
1697
1698 if (!btrfs_is_zoned(cache->fs_info))
1699 return;
1700
1701 WARN_ON(cache->bytes_super != 0);
6a8ebc77
NA
1702 unusable = (cache->alloc_offset - cache->used) +
1703 (cache->length - cache->zone_capacity);
1704 free = cache->zone_capacity - cache->alloc_offset;
169e0da9
NA
1705
1706 /* We only need ->free_space in ALLOC_SEQ block groups */
169e0da9
NA
1707 cache->cached = BTRFS_CACHE_FINISHED;
1708 cache->free_space_ctl->free_space = free;
1709 cache->zone_unusable = unusable;
169e0da9 1710}
d3575156 1711
921603c7 1712bool btrfs_use_zone_append(struct btrfs_bio *bbio)
08f45559 1713{
921603c7
CH
1714 u64 start = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT);
1715 struct btrfs_inode *inode = bbio->inode;
4317ff00 1716 struct btrfs_fs_info *fs_info = bbio->fs_info;
08f45559
JT
1717 struct btrfs_block_group *cache;
1718 bool ret = false;
1719
1720 if (!btrfs_is_zoned(fs_info))
1721 return false;
1722
4317ff00 1723 if (!inode || !is_data_inode(&inode->vfs_inode))
08f45559
JT
1724 return false;
1725
fdf9a37d
CH
1726 if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE)
1727 return false;
1728
e6d261e3
JT
1729 /*
1730 * Using REQ_OP_ZONE_APPNED for relocation can break assumptions on the
1731 * extent layout the relocation code has.
1732 * Furthermore we have set aside own block-group from which only the
1733 * relocation "process" can allocate and make sure only one process at a
1734 * time can add pages to an extent that gets relocated, so it's safe to
1735 * use regular REQ_OP_WRITE for this special case.
1736 */
1737 if (btrfs_is_data_reloc_root(inode->root))
1738 return false;
1739
e380adfc 1740 cache = btrfs_lookup_block_group(fs_info, start);
08f45559
JT
1741 ASSERT(cache);
1742 if (!cache)
1743 return false;
1744
961f5b8b 1745 ret = !!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
08f45559
JT
1746 btrfs_put_block_group(cache);
1747
1748 return ret;
1749}
d8e3fb10 1750
69ccf3f4 1751void btrfs_record_physical_zoned(struct btrfs_bio *bbio)
d8e3fb10 1752{
69ccf3f4 1753 const u64 physical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
cbfce4c7 1754 struct btrfs_ordered_sum *sum = bbio->sums;
d8e3fb10 1755
cbfce4c7
CH
1756 if (physical < bbio->orig_physical)
1757 sum->logical -= bbio->orig_physical - physical;
1758 else
1759 sum->logical += physical - bbio->orig_physical;
d8e3fb10
NA
1760}
1761
71df088c
CH
1762static void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered,
1763 u64 logical)
d8e3fb10 1764{
71df088c 1765 struct extent_map_tree *em_tree = &BTRFS_I(ordered->inode)->extent_tree;
d8e3fb10 1766 struct extent_map *em;
d8e3fb10 1767
04f0847c 1768 ordered->disk_bytenr = logical;
d8e3fb10 1769
d8e3fb10
NA
1770 write_lock(&em_tree->lock);
1771 em = search_extent_mapping(em_tree, ordered->file_offset,
1772 ordered->num_bytes);
04f0847c 1773 em->block_start = logical;
d8e3fb10
NA
1774 free_extent_map(em);
1775 write_unlock(&em_tree->lock);
71df088c
CH
1776}
1777
1778static bool btrfs_zoned_split_ordered(struct btrfs_ordered_extent *ordered,
1779 u64 logical, u64 len)
1780{
1781 struct btrfs_ordered_extent *new;
1782
1783 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
1784 split_extent_map(BTRFS_I(ordered->inode), ordered->file_offset,
f000bc6f 1785 ordered->num_bytes, len, logical))
71df088c
CH
1786 return false;
1787
1788 new = btrfs_split_ordered_extent(ordered, len);
1789 if (IS_ERR(new))
1790 return false;
f000bc6f 1791 new->disk_bytenr = logical;
71df088c
CH
1792 btrfs_finish_one_ordered(new);
1793 return true;
1794}
1795
1796void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered)
1797{
1798 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
1799 struct btrfs_fs_info *fs_info = inode->root->fs_info;
c02d35d8
NA
1800 struct btrfs_ordered_sum *sum;
1801 u64 logical, len;
1802
1803 /*
1804 * Write to pre-allocated region is for the data relocation, and so
1805 * it should use WRITE operation. No split/rewrite are necessary.
1806 */
1807 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
1808 return;
1809
1810 ASSERT(!list_empty(&ordered->list));
1811 /* The ordered->list can be empty in the above pre-alloc case. */
1812 sum = list_first_entry(&ordered->list, struct btrfs_ordered_sum, list);
1813 logical = sum->logical;
1814 len = sum->len;
71df088c
CH
1815
1816 while (len < ordered->disk_num_bytes) {
1817 sum = list_next_entry(sum, list);
1818 if (sum->logical == logical + len) {
1819 len += sum->len;
1820 continue;
1821 }
1822 if (!btrfs_zoned_split_ordered(ordered, logical, len)) {
1823 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
1824 btrfs_err(fs_info, "failed to split ordered extent");
1825 goto out;
1826 }
1827 logical = sum->logical;
1828 len = sum->len;
1829 }
1830
1831 if (ordered->disk_bytenr != logical)
1832 btrfs_rewrite_logical_zoned(ordered, logical);
d8e3fb10 1833
cbfce4c7
CH
1834out:
1835 /*
1836 * If we end up here for nodatasum I/O, the btrfs_ordered_sum structures
1837 * were allocated by btrfs_alloc_dummy_sum only to record the logical
1838 * addresses and don't contain actual checksums. We thus must free them
1839 * here so that we don't attempt to log the csums later.
1840 */
1841 if ((inode->flags & BTRFS_INODE_NODATASUM) ||
71df088c
CH
1842 test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state)) {
1843 while ((sum = list_first_entry_or_null(&ordered->list,
1844 typeof(*sum), list))) {
1845 list_del(&sum->list);
1846 kfree(sum);
1847 }
d8e3fb10 1848 }
d8e3fb10 1849}
0bc09ca1 1850
13bb483d
NA
1851static bool check_bg_is_active(struct btrfs_eb_write_context *ctx,
1852 struct btrfs_block_group **active_bg)
1853{
1854 const struct writeback_control *wbc = ctx->wbc;
1855 struct btrfs_block_group *block_group = ctx->zoned_bg;
1856 struct btrfs_fs_info *fs_info = block_group->fs_info;
1857
1858 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
1859 return true;
1860
1861 if (fs_info->treelog_bg == block_group->start) {
1862 if (!btrfs_zone_activate(block_group)) {
1863 int ret_fin = btrfs_zone_finish_one_bg(fs_info);
1864
1865 if (ret_fin != 1 || !btrfs_zone_activate(block_group))
1866 return false;
1867 }
1868 } else if (*active_bg != block_group) {
1869 struct btrfs_block_group *tgt = *active_bg;
1870
1871 /* zoned_meta_io_lock protects fs_info->active_{meta,system}_bg. */
1872 lockdep_assert_held(&fs_info->zoned_meta_io_lock);
1873
1874 if (tgt) {
1875 /*
1876 * If there is an unsent IO left in the allocated area,
1877 * we cannot wait for them as it may cause a deadlock.
1878 */
1879 if (tgt->meta_write_pointer < tgt->start + tgt->alloc_offset) {
1880 if (wbc->sync_mode == WB_SYNC_NONE ||
1881 (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync))
1882 return false;
1883 }
1884
1885 /* Pivot active metadata/system block group. */
1886 btrfs_zoned_meta_io_unlock(fs_info);
1887 wait_eb_writebacks(tgt);
1888 do_zone_finish(tgt, true);
1889 btrfs_zoned_meta_io_lock(fs_info);
1890 if (*active_bg == tgt) {
1891 btrfs_put_block_group(tgt);
1892 *active_bg = NULL;
1893 }
1894 }
1895 if (!btrfs_zone_activate(block_group))
1896 return false;
1897 if (*active_bg != block_group) {
1898 ASSERT(*active_bg == NULL);
1899 *active_bg = block_group;
1900 btrfs_get_block_group(block_group);
1901 }
1902 }
1903
1904 return true;
1905}
1906
2ad8c051
NA
1907/*
1908 * Check if @ctx->eb is aligned to the write pointer.
1909 *
1910 * Return:
1911 * 0: @ctx->eb is at the write pointer. You can write it.
1912 * -EAGAIN: There is a hole. The caller should handle the case.
1913 * -EBUSY: There is a hole, but the caller can just bail out.
1914 */
1915int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
1916 struct btrfs_eb_write_context *ctx)
0bc09ca1 1917{
2ad8c051 1918 const struct writeback_control *wbc = ctx->wbc;
7db94301
NA
1919 const struct extent_buffer *eb = ctx->eb;
1920 struct btrfs_block_group *block_group = ctx->zoned_bg;
0bc09ca1
NA
1921
1922 if (!btrfs_is_zoned(fs_info))
2ad8c051 1923 return 0;
0bc09ca1 1924
7db94301
NA
1925 if (block_group) {
1926 if (block_group->start > eb->start ||
1927 block_group->start + block_group->length <= eb->start) {
1928 btrfs_put_block_group(block_group);
1929 block_group = NULL;
1930 ctx->zoned_bg = NULL;
1931 }
1932 }
0bc09ca1 1933
7db94301
NA
1934 if (!block_group) {
1935 block_group = btrfs_lookup_block_group(fs_info, eb->start);
1936 if (!block_group)
2ad8c051 1937 return 0;
7db94301 1938 ctx->zoned_bg = block_group;
0bc09ca1
NA
1939 }
1940
13bb483d
NA
1941 if (block_group->meta_write_pointer == eb->start) {
1942 struct btrfs_block_group **tgt;
1943
1944 if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags))
1945 return 0;
1946
1947 if (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)
1948 tgt = &fs_info->active_system_bg;
1949 else
1950 tgt = &fs_info->active_meta_bg;
1951 if (check_bg_is_active(ctx, tgt))
1952 return 0;
1953 }
1954
1955 /*
1956 * Since we may release fs_info->zoned_meta_io_lock, someone can already
1957 * start writing this eb. In that case, we can just bail out.
1958 */
1959 if (block_group->meta_write_pointer > eb->start)
1960 return -EBUSY;
2ad8c051
NA
1961
1962 /* If for_sync, this hole will be filled with trasnsaction commit. */
1963 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
1964 return -EAGAIN;
1965 return -EBUSY;
0bc09ca1
NA
1966}
1967
de17addc
NA
1968int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
1969{
1970 if (!btrfs_dev_is_sequential(device, physical))
1971 return -EOPNOTSUPP;
1972
1973 return blkdev_issue_zeroout(device->bdev, physical >> SECTOR_SHIFT,
1974 length >> SECTOR_SHIFT, GFP_NOFS, 0);
1975}
7db1c5d1
NA
1976
1977static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
1978 struct blk_zone *zone)
1979{
4c664611 1980 struct btrfs_io_context *bioc = NULL;
7db1c5d1
NA
1981 u64 mapped_length = PAGE_SIZE;
1982 unsigned int nofs_flag;
1983 int nmirrors;
1984 int i, ret;
1985
723b8bb1 1986 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
9fb2acc2 1987 &mapped_length, &bioc, NULL, NULL);
4c664611 1988 if (ret || !bioc || mapped_length < PAGE_SIZE) {
29634578
CH
1989 ret = -EIO;
1990 goto out_put_bioc;
7db1c5d1
NA
1991 }
1992
29634578
CH
1993 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1994 ret = -EINVAL;
1995 goto out_put_bioc;
1996 }
7db1c5d1
NA
1997
1998 nofs_flag = memalloc_nofs_save();
4c664611 1999 nmirrors = (int)bioc->num_stripes;
7db1c5d1 2000 for (i = 0; i < nmirrors; i++) {
4c664611
QW
2001 u64 physical = bioc->stripes[i].physical;
2002 struct btrfs_device *dev = bioc->stripes[i].dev;
7db1c5d1
NA
2003
2004 /* Missing device */
2005 if (!dev->bdev)
2006 continue;
2007
2008 ret = btrfs_get_dev_zone(dev, physical, zone);
2009 /* Failing device */
2010 if (ret == -EIO || ret == -EOPNOTSUPP)
2011 continue;
2012 break;
2013 }
2014 memalloc_nofs_restore(nofs_flag);
29634578
CH
2015out_put_bioc:
2016 btrfs_put_bioc(bioc);
7db1c5d1
NA
2017 return ret;
2018}
2019
2020/*
2021 * Synchronize write pointer in a zone at @physical_start on @tgt_dev, by
2022 * filling zeros between @physical_pos to a write pointer of dev-replace
2023 * source device.
2024 */
2025int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
2026 u64 physical_start, u64 physical_pos)
2027{
2028 struct btrfs_fs_info *fs_info = tgt_dev->fs_info;
2029 struct blk_zone zone;
2030 u64 length;
2031 u64 wp;
2032 int ret;
2033
2034 if (!btrfs_dev_is_sequential(tgt_dev, physical_pos))
2035 return 0;
2036
2037 ret = read_zone_info(fs_info, logical, &zone);
2038 if (ret)
2039 return ret;
2040
2041 wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT);
2042
2043 if (physical_pos == wp)
2044 return 0;
2045
2046 if (physical_pos > wp)
2047 return -EUCLEAN;
2048
2049 length = wp - physical_pos;
2050 return btrfs_zoned_issue_zeroout(tgt_dev, physical_pos, length);
2051}
e7ff9e6b 2052
43dd529a 2053/*
afba2bc0
NA
2054 * Activate block group and underlying device zones
2055 *
2056 * @block_group: the block group to activate
2057 *
2058 * Return: true on success, false otherwise
2059 */
2060bool btrfs_zone_activate(struct btrfs_block_group *block_group)
2061{
2062 struct btrfs_fs_info *fs_info = block_group->fs_info;
7dc66abb 2063 struct btrfs_chunk_map *map;
afba2bc0
NA
2064 struct btrfs_device *device;
2065 u64 physical;
a7e1ac7b 2066 const bool is_data = (block_group->flags & BTRFS_BLOCK_GROUP_DATA);
afba2bc0 2067 bool ret;
f9a912a3 2068 int i;
afba2bc0
NA
2069
2070 if (!btrfs_is_zoned(block_group->fs_info))
2071 return true;
2072
2073 map = block_group->physical_map;
afba2bc0
NA
2074
2075 spin_lock(&block_group->lock);
3349b57f 2076 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
afba2bc0
NA
2077 ret = true;
2078 goto out_unlock;
2079 }
2080
54957712 2081 /* No space left */
1bfd4767 2082 if (btrfs_zoned_bg_is_full(block_group)) {
54957712
NA
2083 ret = false;
2084 goto out_unlock;
2085 }
2086
a7e1ac7b 2087 spin_lock(&fs_info->zone_active_bgs_lock);
f9a912a3 2088 for (i = 0; i < map->num_stripes; i++) {
a7e1ac7b
NA
2089 struct btrfs_zoned_device_info *zinfo;
2090 int reserved = 0;
2091
f9a912a3
JT
2092 device = map->stripes[i].dev;
2093 physical = map->stripes[i].physical;
a7e1ac7b 2094 zinfo = device->zone_info;
afba2bc0 2095
a7e1ac7b 2096 if (zinfo->max_active_zones == 0)
f9a912a3
JT
2097 continue;
2098
a7e1ac7b
NA
2099 if (is_data)
2100 reserved = zinfo->reserved_active_zones;
2101 /*
2102 * For the data block group, leave active zones for one
2103 * metadata block group and one system block group.
2104 */
2105 if (atomic_read(&zinfo->active_zones_left) <= reserved) {
2106 ret = false;
2107 spin_unlock(&fs_info->zone_active_bgs_lock);
2108 goto out_unlock;
2109 }
2110
f9a912a3
JT
2111 if (!btrfs_dev_set_active_zone(device, physical)) {
2112 /* Cannot activate the zone */
2113 ret = false;
a7e1ac7b 2114 spin_unlock(&fs_info->zone_active_bgs_lock);
f9a912a3
JT
2115 goto out_unlock;
2116 }
a7e1ac7b
NA
2117 if (!is_data)
2118 zinfo->reserved_active_zones--;
f9a912a3 2119 }
a7e1ac7b 2120 spin_unlock(&fs_info->zone_active_bgs_lock);
ceb4f608
NA
2121
2122 /* Successfully activated all the zones */
3349b57f 2123 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
afba2bc0
NA
2124 spin_unlock(&block_group->lock);
2125
ceb4f608
NA
2126 /* For the active block group list */
2127 btrfs_get_block_group(block_group);
afba2bc0 2128
ceb4f608
NA
2129 spin_lock(&fs_info->zone_active_bgs_lock);
2130 list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
2131 spin_unlock(&fs_info->zone_active_bgs_lock);
afba2bc0
NA
2132
2133 return true;
2134
2135out_unlock:
2136 spin_unlock(&block_group->lock);
2137 return ret;
2138}
2139
2dd7e7bc
NA
2140static void wait_eb_writebacks(struct btrfs_block_group *block_group)
2141{
2142 struct btrfs_fs_info *fs_info = block_group->fs_info;
2143 const u64 end = block_group->start + block_group->length;
2144 struct radix_tree_iter iter;
2145 struct extent_buffer *eb;
2146 void __rcu **slot;
2147
2148 rcu_read_lock();
2149 radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter,
2150 block_group->start >> fs_info->sectorsize_bits) {
2151 eb = radix_tree_deref_slot(slot);
2152 if (!eb)
2153 continue;
2154 if (radix_tree_deref_retry(eb)) {
2155 slot = radix_tree_iter_retry(&iter);
2156 continue;
2157 }
2158
2159 if (eb->start < block_group->start)
2160 continue;
2161 if (eb->start >= end)
2162 break;
2163
2164 slot = radix_tree_iter_resume(slot, &iter);
2165 rcu_read_unlock();
2166 wait_on_extent_buffer_writeback(eb);
2167 rcu_read_lock();
2168 }
2169 rcu_read_unlock();
2170}
2171
d70cbdda 2172static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
afba2bc0
NA
2173{
2174 struct btrfs_fs_info *fs_info = block_group->fs_info;
7dc66abb 2175 struct btrfs_chunk_map *map;
2dd7e7bc
NA
2176 const bool is_metadata = (block_group->flags &
2177 (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));
afba2bc0 2178 int ret = 0;
4dcbb8ab 2179 int i;
afba2bc0 2180
afba2bc0 2181 spin_lock(&block_group->lock);
3349b57f 2182 if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
afba2bc0
NA
2183 spin_unlock(&block_group->lock);
2184 return 0;
2185 }
2186
2187 /* Check if we have unwritten allocated space */
2dd7e7bc 2188 if (is_metadata &&
aa9ffadf 2189 block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) {
afba2bc0
NA
2190 spin_unlock(&block_group->lock);
2191 return -EAGAIN;
2192 }
afba2bc0
NA
2193
2194 /*
d70cbdda
NA
2195 * If we are sure that the block group is full (= no more room left for
2196 * new allocation) and the IO for the last usable block is completed, we
2197 * don't need to wait for the other IOs. This holds because we ensure
2198 * the sequential IO submissions using the ZONE_APPEND command for data
2199 * and block_group->meta_write_pointer for metadata.
afba2bc0 2200 */
d70cbdda 2201 if (!fully_written) {
332581bd
NA
2202 if (test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
2203 spin_unlock(&block_group->lock);
2204 return -EAGAIN;
2205 }
afba2bc0 2206 spin_unlock(&block_group->lock);
afba2bc0 2207
d70cbdda
NA
2208 ret = btrfs_inc_block_group_ro(block_group, false);
2209 if (ret)
2210 return ret;
2211
2212 /* Ensure all writes in this block group finish */
2213 btrfs_wait_block_group_reservations(block_group);
2214 /* No need to wait for NOCOW writers. Zoned mode does not allow that */
2215 btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
2216 block_group->length);
2dd7e7bc
NA
2217 /* Wait for extent buffers to be written. */
2218 if (is_metadata)
2219 wait_eb_writebacks(block_group);
d70cbdda
NA
2220
2221 spin_lock(&block_group->lock);
2222
2223 /*
2224 * Bail out if someone already deactivated the block group, or
2225 * allocated space is left in the block group.
2226 */
3349b57f
JB
2227 if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2228 &block_group->runtime_flags)) {
d70cbdda
NA
2229 spin_unlock(&block_group->lock);
2230 btrfs_dec_block_group_ro(block_group);
2231 return 0;
2232 }
2233
332581bd
NA
2234 if (block_group->reserved ||
2235 test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
2236 &block_group->runtime_flags)) {
d70cbdda
NA
2237 spin_unlock(&block_group->lock);
2238 btrfs_dec_block_group_ro(block_group);
2239 return -EAGAIN;
2240 }
afba2bc0
NA
2241 }
2242
3349b57f 2243 clear_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
afba2bc0 2244 block_group->alloc_offset = block_group->zone_capacity;
c1c3c2bc
NA
2245 if (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM))
2246 block_group->meta_write_pointer = block_group->start +
2247 block_group->zone_capacity;
afba2bc0
NA
2248 block_group->free_space_ctl->free_space = 0;
2249 btrfs_clear_treelog_bg(block_group);
5911f538 2250 btrfs_clear_data_reloc_bg(block_group);
afba2bc0
NA
2251 spin_unlock(&block_group->lock);
2252
d70cbdda 2253 map = block_group->physical_map;
4dcbb8ab 2254 for (i = 0; i < map->num_stripes; i++) {
d70cbdda
NA
2255 struct btrfs_device *device = map->stripes[i].dev;
2256 const u64 physical = map->stripes[i].physical;
a7e1ac7b 2257 struct btrfs_zoned_device_info *zinfo = device->zone_info;
afba2bc0 2258
a7e1ac7b 2259 if (zinfo->max_active_zones == 0)
4dcbb8ab 2260 continue;
afba2bc0 2261
b3a3b025
NA
2262 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
2263 physical >> SECTOR_SHIFT,
a7e1ac7b 2264 zinfo->zone_size >> SECTOR_SHIFT,
b3a3b025 2265 GFP_NOFS);
4dcbb8ab 2266
b3a3b025
NA
2267 if (ret)
2268 return ret;
afba2bc0 2269
a7e1ac7b
NA
2270 if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))
2271 zinfo->reserved_active_zones++;
4dcbb8ab 2272 btrfs_dev_clear_active_zone(device, physical);
afba2bc0 2273 }
d70cbdda
NA
2274
2275 if (!fully_written)
2276 btrfs_dec_block_group_ro(block_group);
afba2bc0 2277
4dcbb8ab
JT
2278 spin_lock(&fs_info->zone_active_bgs_lock);
2279 ASSERT(!list_empty(&block_group->active_bg_list));
2280 list_del_init(&block_group->active_bg_list);
2281 spin_unlock(&fs_info->zone_active_bgs_lock);
2282
2283 /* For active_bg_list */
2284 btrfs_put_block_group(block_group);
2285
d5b81ced 2286 clear_and_wake_up_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
2ce543f4 2287
4dcbb8ab 2288 return 0;
afba2bc0 2289}
a85f05e5 2290
d70cbdda
NA
2291int btrfs_zone_finish(struct btrfs_block_group *block_group)
2292{
2293 if (!btrfs_is_zoned(block_group->fs_info))
2294 return 0;
2295
2296 return do_zone_finish(block_group, false);
2297}
2298
82187d2e 2299bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
a85f05e5 2300{
0b9e6676 2301 struct btrfs_fs_info *fs_info = fs_devices->fs_info;
a85f05e5
NA
2302 struct btrfs_device *device;
2303 bool ret = false;
2304
0b9e6676 2305 if (!btrfs_is_zoned(fs_info))
a85f05e5
NA
2306 return true;
2307
a85f05e5 2308 /* Check if there is a device with active zones left */
0b9e6676 2309 mutex_lock(&fs_info->chunk_mutex);
a7e1ac7b 2310 spin_lock(&fs_info->zone_active_bgs_lock);
0b9e6676 2311 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
a85f05e5 2312 struct btrfs_zoned_device_info *zinfo = device->zone_info;
a7e1ac7b 2313 int reserved = 0;
a85f05e5
NA
2314
2315 if (!device->bdev)
2316 continue;
2317
9e1cdf0c 2318 if (!zinfo->max_active_zones) {
a85f05e5
NA
2319 ret = true;
2320 break;
2321 }
9e1cdf0c 2322
a7e1ac7b
NA
2323 if (flags & BTRFS_BLOCK_GROUP_DATA)
2324 reserved = zinfo->reserved_active_zones;
2325
9e1cdf0c
NA
2326 switch (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
2327 case 0: /* single */
a7e1ac7b 2328 ret = (atomic_read(&zinfo->active_zones_left) >= (1 + reserved));
9e1cdf0c
NA
2329 break;
2330 case BTRFS_BLOCK_GROUP_DUP:
a7e1ac7b 2331 ret = (atomic_read(&zinfo->active_zones_left) >= (2 + reserved));
9e1cdf0c
NA
2332 break;
2333 }
2334 if (ret)
2335 break;
a85f05e5 2336 }
a7e1ac7b 2337 spin_unlock(&fs_info->zone_active_bgs_lock);
0b9e6676 2338 mutex_unlock(&fs_info->chunk_mutex);
a85f05e5 2339
2ce543f4
NA
2340 if (!ret)
2341 set_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
2342
a85f05e5
NA
2343 return ret;
2344}
be1a1d7a
NA
2345
2346void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length)
2347{
2348 struct btrfs_block_group *block_group;
8b8a5399 2349 u64 min_alloc_bytes;
be1a1d7a
NA
2350
2351 if (!btrfs_is_zoned(fs_info))
2352 return;
2353
2354 block_group = btrfs_lookup_block_group(fs_info, logical);
2355 ASSERT(block_group);
2356
8b8a5399
NA
2357 /* No MIXED_BG on zoned btrfs. */
2358 if (block_group->flags & BTRFS_BLOCK_GROUP_DATA)
2359 min_alloc_bytes = fs_info->sectorsize;
2360 else
2361 min_alloc_bytes = fs_info->nodesize;
be1a1d7a 2362
8b8a5399
NA
2363 /* Bail out if we can allocate more data from this block group. */
2364 if (logical + length + min_alloc_bytes <=
2365 block_group->start + block_group->zone_capacity)
be1a1d7a 2366 goto out;
be1a1d7a 2367
d70cbdda 2368 do_zone_finish(block_group, true);
be1a1d7a 2369
be1a1d7a
NA
2370out:
2371 btrfs_put_block_group(block_group);
2372}
be1a1d7a 2373
56fbb0a4
NA
2374static void btrfs_zone_finish_endio_workfn(struct work_struct *work)
2375{
2376 struct btrfs_block_group *bg =
2377 container_of(work, struct btrfs_block_group, zone_finish_work);
be1a1d7a 2378
56fbb0a4
NA
2379 wait_on_extent_buffer_writeback(bg->last_eb);
2380 free_extent_buffer(bg->last_eb);
2381 btrfs_zone_finish_endio(bg->fs_info, bg->start, bg->length);
2382 btrfs_put_block_group(bg);
2383}
be1a1d7a 2384
56fbb0a4
NA
2385void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
2386 struct extent_buffer *eb)
2387{
961f5b8b
DS
2388 if (!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &bg->runtime_flags) ||
2389 eb->start + eb->len * 2 <= bg->start + bg->zone_capacity)
56fbb0a4 2390 return;
be1a1d7a 2391
56fbb0a4
NA
2392 if (WARN_ON(bg->zone_finish_work.func == btrfs_zone_finish_endio_workfn)) {
2393 btrfs_err(bg->fs_info, "double scheduling of bg %llu zone finishing",
2394 bg->start);
2395 return;
2396 }
be1a1d7a 2397
56fbb0a4
NA
2398 /* For the work */
2399 btrfs_get_block_group(bg);
2400 atomic_inc(&eb->refs);
2401 bg->last_eb = eb;
2402 INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn);
2403 queue_work(system_unbound_wq, &bg->zone_finish_work);
be1a1d7a 2404}
c2707a25
JT
2405
2406void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
2407{
2408 struct btrfs_fs_info *fs_info = bg->fs_info;
2409
2410 spin_lock(&fs_info->relocation_bg_lock);
2411 if (fs_info->data_reloc_bg == bg->start)
2412 fs_info->data_reloc_bg = 0;
2413 spin_unlock(&fs_info->relocation_bg_lock);
2414}
16beac87
NA
2415
2416void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info)
2417{
2418 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2419 struct btrfs_device *device;
2420
2421 if (!btrfs_is_zoned(fs_info))
2422 return;
2423
2424 mutex_lock(&fs_devices->device_list_mutex);
2425 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2426 if (device->zone_info) {
2427 vfree(device->zone_info->zone_cache);
2428 device->zone_info->zone_cache = NULL;
2429 }
2430 }
2431 mutex_unlock(&fs_devices->device_list_mutex);
2432}
3687fcb0
JT
2433
2434bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info)
2435{
2436 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2437 struct btrfs_device *device;
2438 u64 used = 0;
2439 u64 total = 0;
2440 u64 factor;
2441
2442 ASSERT(btrfs_is_zoned(fs_info));
2443
2444 if (fs_info->bg_reclaim_threshold == 0)
2445 return false;
2446
2447 mutex_lock(&fs_devices->device_list_mutex);
2448 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2449 if (!device->bdev)
2450 continue;
2451
2452 total += device->disk_total_bytes;
2453 used += device->bytes_used;
2454 }
2455 mutex_unlock(&fs_devices->device_list_mutex);
2456
2457 factor = div64_u64(used * 100, total);
2458 return factor >= fs_info->bg_reclaim_threshold;
2459}
343d8a30
NA
2460
2461void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical,
2462 u64 length)
2463{
2464 struct btrfs_block_group *block_group;
2465
2466 if (!btrfs_is_zoned(fs_info))
2467 return;
2468
2469 block_group = btrfs_lookup_block_group(fs_info, logical);
2470 /* It should be called on a previous data relocation block group. */
2471 ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA));
2472
2473 spin_lock(&block_group->lock);
3349b57f 2474 if (!test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))
343d8a30
NA
2475 goto out;
2476
2477 /* All relocation extents are written. */
2478 if (block_group->start + block_group->alloc_offset == logical + length) {
332581bd
NA
2479 /*
2480 * Now, release this block group for further allocations and
2481 * zone finish.
2482 */
3349b57f
JB
2483 clear_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
2484 &block_group->runtime_flags);
343d8a30
NA
2485 }
2486
2487out:
2488 spin_unlock(&block_group->lock);
2489 btrfs_put_block_group(block_group);
2490}
393f646e
NA
2491
2492int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
2493{
2494 struct btrfs_block_group *block_group;
2495 struct btrfs_block_group *min_bg = NULL;
2496 u64 min_avail = U64_MAX;
2497 int ret;
2498
2499 spin_lock(&fs_info->zone_active_bgs_lock);
2500 list_for_each_entry(block_group, &fs_info->zone_active_bgs,
2501 active_bg_list) {
2502 u64 avail;
2503
2504 spin_lock(&block_group->lock);
fa2068d7 2505 if (block_group->reserved || block_group->alloc_offset == 0 ||
332581bd
NA
2506 (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) ||
2507 test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
393f646e
NA
2508 spin_unlock(&block_group->lock);
2509 continue;
2510 }
2511
2512 avail = block_group->zone_capacity - block_group->alloc_offset;
2513 if (min_avail > avail) {
2514 if (min_bg)
2515 btrfs_put_block_group(min_bg);
2516 min_bg = block_group;
2517 min_avail = avail;
2518 btrfs_get_block_group(min_bg);
2519 }
2520 spin_unlock(&block_group->lock);
2521 }
2522 spin_unlock(&fs_info->zone_active_bgs_lock);
2523
2524 if (!min_bg)
2525 return 0;
2526
2527 ret = btrfs_zone_finish(min_bg);
2528 btrfs_put_block_group(min_bg);
2529
2530 return ret < 0 ? ret : 1;
2531}
b0931513
NA
2532
2533int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
2534 struct btrfs_space_info *space_info,
2535 bool do_finish)
2536{
2537 struct btrfs_block_group *bg;
2538 int index;
2539
2540 if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
2541 return 0;
2542
b0931513
NA
2543 for (;;) {
2544 int ret;
2545 bool need_finish = false;
2546
2547 down_read(&space_info->groups_sem);
2548 for (index = 0; index < BTRFS_NR_RAID_TYPES; index++) {
2549 list_for_each_entry(bg, &space_info->block_groups[index],
2550 list) {
2551 if (!spin_trylock(&bg->lock))
2552 continue;
3349b57f
JB
2553 if (btrfs_zoned_bg_is_full(bg) ||
2554 test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2555 &bg->runtime_flags)) {
b0931513
NA
2556 spin_unlock(&bg->lock);
2557 continue;
2558 }
2559 spin_unlock(&bg->lock);
2560
2561 if (btrfs_zone_activate(bg)) {
2562 up_read(&space_info->groups_sem);
2563 return 1;
2564 }
2565
2566 need_finish = true;
2567 }
2568 }
2569 up_read(&space_info->groups_sem);
2570
2571 if (!do_finish || !need_finish)
2572 break;
2573
2574 ret = btrfs_zone_finish_one_bg(fs_info);
2575 if (ret == 0)
2576 break;
2577 if (ret < 0)
2578 return ret;
2579 }
2580
2581 return 0;
2582}
a7e1ac7b
NA
2583
2584/*
2585 * Reserve zones for one metadata block group, one tree-log block group, and one
2586 * system block group.
2587 */
2588void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info)
2589{
2590 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2591 struct btrfs_block_group *block_group;
2592 struct btrfs_device *device;
2593 /* Reserve zones for normal SINGLE metadata and tree-log block group. */
2594 unsigned int metadata_reserve = 2;
2595 /* Reserve a zone for SINGLE system block group. */
2596 unsigned int system_reserve = 1;
2597
2598 if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags))
2599 return;
2600
2601 /*
2602 * This function is called from the mount context. So, there is no
2603 * parallel process touching the bits. No need for read_seqretry().
2604 */
2605 if (fs_info->avail_metadata_alloc_bits & BTRFS_BLOCK_GROUP_DUP)
2606 metadata_reserve = 4;
2607 if (fs_info->avail_system_alloc_bits & BTRFS_BLOCK_GROUP_DUP)
2608 system_reserve = 2;
2609
2610 /* Apply the reservation on all the devices. */
2611 mutex_lock(&fs_devices->device_list_mutex);
2612 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2613 if (!device->bdev)
2614 continue;
2615
2616 device->zone_info->reserved_active_zones =
2617 metadata_reserve + system_reserve;
2618 }
2619 mutex_unlock(&fs_devices->device_list_mutex);
2620
2621 /* Release reservation for currently active block groups. */
2622 spin_lock(&fs_info->zone_active_bgs_lock);
2623 list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
7dc66abb 2624 struct btrfs_chunk_map *map = block_group->physical_map;
a7e1ac7b
NA
2625
2626 if (!(block_group->flags &
2627 (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
2628 continue;
2629
2630 for (int i = 0; i < map->num_stripes; i++)
2631 map->stripes[i].dev->zone_info->reserved_active_zones--;
2632 }
2633 spin_unlock(&fs_info->zone_active_bgs_lock);
2634}
This page took 0.980307 seconds and 4 git commands to generate.