]> Git Repo - linux.git/blame - fs/btrfs/scrub.c
btrfs: factor out block mapping for RAID10
[linux.git] / fs / btrfs / scrub.c
CommitLineData
c1d7c514 1// SPDX-License-Identifier: GPL-2.0
a2de733c 2/*
b6bfebc1 3 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
a2de733c
AJ
4 */
5
a2de733c 6#include <linux/blkdev.h>
558540c1 7#include <linux/ratelimit.h>
de2491fd 8#include <linux/sched/mm.h>
d5178578 9#include <crypto/hash.h>
a2de733c 10#include "ctree.h"
6e80d4f8 11#include "discard.h"
a2de733c
AJ
12#include "volumes.h"
13#include "disk-io.h"
14#include "ordered-data.h"
0ef8e451 15#include "transaction.h"
558540c1 16#include "backref.h"
5da6fcbc 17#include "extent_io.h"
ff023aac 18#include "dev-replace.h"
53b381b3 19#include "raid56.h"
aac0023c 20#include "block-group.h"
12659251 21#include "zoned.h"
c7f13d42 22#include "fs.h"
07e81dc9 23#include "accessors.h"
7c8ede16 24#include "file-item.h"
2fc6822c 25#include "scrub.h"
9acaa641 26#include "raid-stripe-tree.h"
a2de733c
AJ
27
28/*
29 * This is only the first step towards a full-features scrub. It reads all
30 * extent and super block and verifies the checksums. In case a bad checksum
31 * is found or the extent cannot be read, good data will be written back if
32 * any can be found.
33 *
34 * Future enhancements:
a2de733c
AJ
35 * - In case an unrepairable extent is encountered, track which files are
36 * affected and report them
a2de733c 37 * - track and record media errors, throw out bad devices
a2de733c 38 * - add a mode to also read unallocated space
a2de733c
AJ
39 */
40
d9d181c1 41struct scrub_ctx;
a2de733c 42
ff023aac 43/*
13a62fd9 44 * The following value only influences the performance.
c9d328c0 45 *
eefaf0a1 46 * This determines how many stripes would be submitted in one go,
ae76d8e3 47 * which is 512KiB (BTRFS_STRIPE_LEN * SCRUB_STRIPES_PER_GROUP).
ff023aac 48 */
ae76d8e3
QW
49#define SCRUB_STRIPES_PER_GROUP 8
50
51/*
52 * How many groups we have for each sctx.
53 *
54 * This would be 8M per device, the same value as the old scrub in-flight bios
55 * size limit.
56 */
57#define SCRUB_GROUPS_PER_SCTX 16
58
59#define SCRUB_TOTAL_STRIPES (SCRUB_GROUPS_PER_SCTX * SCRUB_STRIPES_PER_GROUP)
7a9e9987
SB
60
61/*
0bb3acdc 62 * The following value times PAGE_SIZE needs to be large enough to match the
7a9e9987 63 * largest node/leaf/sector size that shall be supported.
7a9e9987 64 */
7e737cbc 65#define SCRUB_MAX_SECTORS_PER_BLOCK (BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
a2de733c 66
2af2aaf9
QW
67/* Represent one sector and its needed info to verify the content. */
68struct scrub_sector_verification {
69 bool is_metadata;
70
71 union {
72 /*
73 * Csum pointer for data csum verification. Should point to a
74 * sector csum inside scrub_stripe::csums.
75 *
76 * NULL if this data sector has no csum.
77 */
78 u8 *csum;
79
80 /*
81 * Extra info for metadata verification. All sectors inside a
82 * tree block share the same generation.
83 */
84 u64 generation;
85 };
86};
87
88enum scrub_stripe_flags {
89 /* Set when @mirror_num, @dev, @physical and @logical are set. */
90 SCRUB_STRIPE_FLAG_INITIALIZED,
91
92 /* Set when the read-repair is finished. */
93 SCRUB_STRIPE_FLAG_REPAIR_DONE,
1009254b
QW
94
95 /*
96 * Set for data stripes if it's triggered from P/Q stripe.
97 * During such scrub, we should not report errors in data stripes, nor
98 * update the accounting.
99 */
100 SCRUB_STRIPE_FLAG_NO_REPORT,
2af2aaf9
QW
101};
102
103#define SCRUB_STRIPE_PAGES (BTRFS_STRIPE_LEN / PAGE_SIZE)
104
105/*
106 * Represent one contiguous range with a length of BTRFS_STRIPE_LEN.
107 */
108struct scrub_stripe {
00965807 109 struct scrub_ctx *sctx;
2af2aaf9
QW
110 struct btrfs_block_group *bg;
111
112 struct page *pages[SCRUB_STRIPE_PAGES];
113 struct scrub_sector_verification *sectors;
114
115 struct btrfs_device *dev;
116 u64 logical;
117 u64 physical;
118
119 u16 mirror_num;
120
121 /* Should be BTRFS_STRIPE_LEN / sectorsize. */
122 u16 nr_sectors;
123
00965807
QW
124 /*
125 * How many data/meta extents are in this stripe. Only for scrub status
126 * reporting purposes.
127 */
128 u16 nr_data_extents;
129 u16 nr_meta_extents;
130
2af2aaf9
QW
131 atomic_t pending_io;
132 wait_queue_head_t io_wait;
9ecb5ef5 133 wait_queue_head_t repair_wait;
2af2aaf9
QW
134
135 /*
136 * Indicate the states of the stripe. Bits are defined in
137 * scrub_stripe_flags enum.
138 */
139 unsigned long state;
140
141 /* Indicate which sectors are covered by extent items. */
142 unsigned long extent_sector_bitmap;
143
144 /*
145 * The errors hit during the initial read of the stripe.
146 *
147 * Would be utilized for error reporting and repair.
79b8ee70
QW
148 *
149 * The remaining init_nr_* records the number of errors hit, only used
150 * by error reporting.
2af2aaf9
QW
151 */
152 unsigned long init_error_bitmap;
79b8ee70
QW
153 unsigned int init_nr_io_errors;
154 unsigned int init_nr_csum_errors;
155 unsigned int init_nr_meta_errors;
2af2aaf9
QW
156
157 /*
158 * The following error bitmaps are all for the current status.
159 * Every time we submit a new read, these bitmaps may be updated.
160 *
161 * error_bitmap = io_error_bitmap | csum_error_bitmap | meta_error_bitmap;
162 *
163 * IO and csum errors can happen for both metadata and data.
164 */
165 unsigned long error_bitmap;
166 unsigned long io_error_bitmap;
167 unsigned long csum_error_bitmap;
168 unsigned long meta_error_bitmap;
169
058e09e6
QW
170 /* For writeback (repair or replace) error reporting. */
171 unsigned long write_error_bitmap;
172
173 /* Writeback can be concurrent, thus we need to protect the bitmap. */
174 spinlock_t write_error_lock;
175
2af2aaf9
QW
176 /*
177 * Checksum for the whole stripe if this stripe is inside a data block
178 * group.
179 */
180 u8 *csums;
9ecb5ef5
QW
181
182 struct work_struct work;
2af2aaf9
QW
183};
184
d9d181c1 185struct scrub_ctx {
ae76d8e3 186 struct scrub_stripe stripes[SCRUB_TOTAL_STRIPES];
1009254b 187 struct scrub_stripe *raid56_data_stripes;
fb456252 188 struct btrfs_fs_info *fs_info;
1dc4888e 189 struct btrfs_path extent_path;
3c771c19 190 struct btrfs_path csum_path;
a2de733c 191 int first_free;
54765392 192 int cur_stripe;
a2de733c 193 atomic_t cancel_req;
8628764e 194 int readonly;
63a212ab 195
eb3b5053
DS
196 /* State of IO submission throttling affecting the associated device */
197 ktime_t throttle_deadline;
198 u64 throttle_sent;
199
63a212ab 200 int is_dev_replace;
de17addc 201 u64 write_pointer;
3fb99303 202
3fb99303 203 struct mutex wr_lock;
3fb99303 204 struct btrfs_device *wr_tgtdev;
63a212ab 205
a2de733c
AJ
206 /*
207 * statistics
208 */
209 struct btrfs_scrub_progress stat;
210 spinlock_t stat_lock;
f55985f4
FM
211
212 /*
213 * Use a ref counter to avoid use-after-free issues. Scrub workers
214 * decrement bios_in_flight and workers_pending and then do a wakeup
215 * on the list_wait wait queue. We must ensure the main scrub task
216 * doesn't free the scrub context before or while the workers are
217 * doing the wakeup() call.
218 */
99f4cdb1 219 refcount_t refs;
a2de733c
AJ
220};
221
558540c1
JS
222struct scrub_warning {
223 struct btrfs_path *path;
224 u64 extent_item_size;
558540c1 225 const char *errstr;
6aa21263 226 u64 physical;
558540c1
JS
227 u64 logical;
228 struct btrfs_device *dev;
558540c1
JS
229};
230
2af2aaf9
QW
231static void release_scrub_stripe(struct scrub_stripe *stripe)
232{
233 if (!stripe)
234 return;
235
236 for (int i = 0; i < SCRUB_STRIPE_PAGES; i++) {
237 if (stripe->pages[i])
238 __free_page(stripe->pages[i]);
239 stripe->pages[i] = NULL;
240 }
241 kfree(stripe->sectors);
242 kfree(stripe->csums);
243 stripe->sectors = NULL;
244 stripe->csums = NULL;
00965807 245 stripe->sctx = NULL;
2af2aaf9
QW
246 stripe->state = 0;
247}
248
54765392
QW
249static int init_scrub_stripe(struct btrfs_fs_info *fs_info,
250 struct scrub_stripe *stripe)
2af2aaf9
QW
251{
252 int ret;
253
254 memset(stripe, 0, sizeof(*stripe));
255
256 stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
257 stripe->state = 0;
258
259 init_waitqueue_head(&stripe->io_wait);
9ecb5ef5 260 init_waitqueue_head(&stripe->repair_wait);
2af2aaf9 261 atomic_set(&stripe->pending_io, 0);
058e09e6 262 spin_lock_init(&stripe->write_error_lock);
2af2aaf9 263
09e6cef1 264 ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, stripe->pages, 0);
2af2aaf9
QW
265 if (ret < 0)
266 goto error;
267
268 stripe->sectors = kcalloc(stripe->nr_sectors,
269 sizeof(struct scrub_sector_verification),
270 GFP_KERNEL);
271 if (!stripe->sectors)
272 goto error;
273
274 stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits,
275 fs_info->csum_size, GFP_KERNEL);
276 if (!stripe->csums)
277 goto error;
278 return 0;
279error:
280 release_scrub_stripe(stripe);
281 return -ENOMEM;
282}
283
9ecb5ef5 284static void wait_scrub_stripe_io(struct scrub_stripe *stripe)
2af2aaf9
QW
285{
286 wait_event(stripe->io_wait, atomic_read(&stripe->pending_io) == 0);
287}
288
f55985f4 289static void scrub_put_ctx(struct scrub_ctx *sctx);
1623edeb 290
cb7ab021 291static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
3cb0929a
WS
292{
293 while (atomic_read(&fs_info->scrub_pause_req)) {
294 mutex_unlock(&fs_info->scrub_lock);
295 wait_event(fs_info->scrub_pause_wait,
296 atomic_read(&fs_info->scrub_pause_req) == 0);
297 mutex_lock(&fs_info->scrub_lock);
298 }
299}
300
0e22be89 301static void scrub_pause_on(struct btrfs_fs_info *fs_info)
cb7ab021
WS
302{
303 atomic_inc(&fs_info->scrubs_paused);
304 wake_up(&fs_info->scrub_pause_wait);
0e22be89 305}
cb7ab021 306
0e22be89
Z
307static void scrub_pause_off(struct btrfs_fs_info *fs_info)
308{
cb7ab021
WS
309 mutex_lock(&fs_info->scrub_lock);
310 __scrub_blocked_if_needed(fs_info);
311 atomic_dec(&fs_info->scrubs_paused);
312 mutex_unlock(&fs_info->scrub_lock);
313
314 wake_up(&fs_info->scrub_pause_wait);
315}
316
0e22be89
Z
317static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
318{
319 scrub_pause_on(fs_info);
320 scrub_pause_off(fs_info);
321}
322
d9d181c1 323static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
a2de733c
AJ
324{
325 int i;
a2de733c 326
d9d181c1 327 if (!sctx)
a2de733c
AJ
328 return;
329
ae76d8e3 330 for (i = 0; i < SCRUB_TOTAL_STRIPES; i++)
54765392
QW
331 release_scrub_stripe(&sctx->stripes[i]);
332
ae76d8e3 333 kvfree(sctx);
a2de733c
AJ
334}
335
f55985f4
FM
336static void scrub_put_ctx(struct scrub_ctx *sctx)
337{
99f4cdb1 338 if (refcount_dec_and_test(&sctx->refs))
f55985f4
FM
339 scrub_free_ctx(sctx);
340}
341
92f7ba43
DS
342static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
343 struct btrfs_fs_info *fs_info, int is_dev_replace)
a2de733c 344{
d9d181c1 345 struct scrub_ctx *sctx;
a2de733c 346 int i;
a2de733c 347
ae76d8e3
QW
348 /* Since sctx has inline 128 stripes, it can go beyond 64K easily. Use
349 * kvzalloc().
350 */
351 sctx = kvzalloc(sizeof(*sctx), GFP_KERNEL);
d9d181c1 352 if (!sctx)
a2de733c 353 goto nomem;
99f4cdb1 354 refcount_set(&sctx->refs, 1);
63a212ab 355 sctx->is_dev_replace = is_dev_replace;
92f7ba43 356 sctx->fs_info = fs_info;
1dc4888e
QW
357 sctx->extent_path.search_commit_root = 1;
358 sctx->extent_path.skip_locking = 1;
3c771c19
QW
359 sctx->csum_path.search_commit_root = 1;
360 sctx->csum_path.skip_locking = 1;
ae76d8e3 361 for (i = 0; i < SCRUB_TOTAL_STRIPES; i++) {
54765392
QW
362 int ret;
363
364 ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
365 if (ret < 0)
366 goto nomem;
367 sctx->stripes[i].sctx = sctx;
368 }
d9d181c1 369 sctx->first_free = 0;
d9d181c1 370 atomic_set(&sctx->cancel_req, 0);
d9d181c1 371
d9d181c1 372 spin_lock_init(&sctx->stat_lock);
eb3b5053 373 sctx->throttle_deadline = 0;
ff023aac 374
3fb99303 375 mutex_init(&sctx->wr_lock);
8fcdac3f 376 if (is_dev_replace) {
ded56184 377 WARN_ON(!fs_info->dev_replace.tgtdev);
ded56184 378 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
ff023aac 379 }
8fcdac3f 380
d9d181c1 381 return sctx;
a2de733c
AJ
382
383nomem:
d9d181c1 384 scrub_free_ctx(sctx);
a2de733c
AJ
385 return ERR_PTR(-ENOMEM);
386}
387
c7499a64
FM
388static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
389 u64 root, void *warn_ctx)
558540c1 390{
558540c1
JS
391 u32 nlink;
392 int ret;
393 int i;
de2491fd 394 unsigned nofs_flag;
558540c1
JS
395 struct extent_buffer *eb;
396 struct btrfs_inode_item *inode_item;
ff023aac 397 struct scrub_warning *swarn = warn_ctx;
fb456252 398 struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
558540c1
JS
399 struct inode_fs_paths *ipath = NULL;
400 struct btrfs_root *local_root;
1d4c08e0 401 struct btrfs_key key;
558540c1 402
56e9357a 403 local_root = btrfs_get_fs_root(fs_info, root, true);
558540c1
JS
404 if (IS_ERR(local_root)) {
405 ret = PTR_ERR(local_root);
406 goto err;
407 }
408
14692cc1
DS
409 /*
410 * this makes the path point to (inum INODE_ITEM ioff)
411 */
1d4c08e0
DS
412 key.objectid = inum;
413 key.type = BTRFS_INODE_ITEM_KEY;
414 key.offset = 0;
415
416 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
558540c1 417 if (ret) {
00246528 418 btrfs_put_root(local_root);
558540c1
JS
419 btrfs_release_path(swarn->path);
420 goto err;
421 }
422
423 eb = swarn->path->nodes[0];
424 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
425 struct btrfs_inode_item);
558540c1
JS
426 nlink = btrfs_inode_nlink(eb, inode_item);
427 btrfs_release_path(swarn->path);
428
de2491fd
DS
429 /*
430 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
431 * uses GFP_NOFS in this context, so we keep it consistent but it does
432 * not seem to be strictly necessary.
433 */
434 nofs_flag = memalloc_nofs_save();
558540c1 435 ipath = init_ipath(4096, local_root, swarn->path);
de2491fd 436 memalloc_nofs_restore(nofs_flag);
26bdef54 437 if (IS_ERR(ipath)) {
00246528 438 btrfs_put_root(local_root);
26bdef54
DC
439 ret = PTR_ERR(ipath);
440 ipath = NULL;
441 goto err;
442 }
558540c1
JS
443 ret = paths_from_inode(inum, ipath);
444
445 if (ret < 0)
446 goto err;
447
448 /*
449 * we deliberately ignore the bit ipath might have been too small to
450 * hold all of the paths here
451 */
452 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
5d163e0e 453 btrfs_warn_in_rcu(fs_info,
8df507cb 454"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
5d163e0e 455 swarn->errstr, swarn->logical,
cb3e217b 456 btrfs_dev_name(swarn->dev),
6aa21263 457 swarn->physical,
5d163e0e 458 root, inum, offset,
8df507cb 459 fs_info->sectorsize, nlink,
5d163e0e 460 (char *)(unsigned long)ipath->fspath->val[i]);
558540c1 461
00246528 462 btrfs_put_root(local_root);
558540c1
JS
463 free_ipath(ipath);
464 return 0;
465
466err:
5d163e0e 467 btrfs_warn_in_rcu(fs_info,
6aa21263 468 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
5d163e0e 469 swarn->errstr, swarn->logical,
cb3e217b 470 btrfs_dev_name(swarn->dev),
6aa21263 471 swarn->physical,
5d163e0e 472 root, inum, offset, ret);
558540c1
JS
473
474 free_ipath(ipath);
475 return 0;
476}
477
00965807
QW
478static void scrub_print_common_warning(const char *errstr, struct btrfs_device *dev,
479 bool is_super, u64 logical, u64 physical)
558540c1 480{
00965807 481 struct btrfs_fs_info *fs_info = dev->fs_info;
558540c1
JS
482 struct btrfs_path *path;
483 struct btrfs_key found_key;
484 struct extent_buffer *eb;
485 struct btrfs_extent_item *ei;
486 struct scrub_warning swarn;
69917e43 487 u64 flags = 0;
69917e43 488 u32 item_size;
69917e43 489 int ret;
558540c1 490
e69bf81c 491 /* Super block error, no need to search extent tree. */
00965807 492 if (is_super) {
e69bf81c 493 btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu",
00965807 494 errstr, btrfs_dev_name(dev), physical);
e69bf81c
QW
495 return;
496 }
558540c1 497 path = btrfs_alloc_path();
8b9456da
DS
498 if (!path)
499 return;
558540c1 500
00965807
QW
501 swarn.physical = physical;
502 swarn.logical = logical;
558540c1 503 swarn.errstr = errstr;
a36cf8b8 504 swarn.dev = NULL;
558540c1 505
69917e43
LB
506 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
507 &flags);
558540c1
JS
508 if (ret < 0)
509 goto out;
510
558540c1
JS
511 swarn.extent_item_size = found_key.offset;
512
513 eb = path->nodes[0];
514 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
3212fa14 515 item_size = btrfs_item_size(eb, path->slots[0]);
558540c1 516
69917e43 517 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
b7f9945a
QW
518 unsigned long ptr = 0;
519 u8 ref_level;
520 u64 ref_root;
521
522 while (true) {
6eda71d0
LB
523 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
524 item_size, &ref_root,
525 &ref_level);
b7f9945a
QW
526 if (ret < 0) {
527 btrfs_warn(fs_info,
528 "failed to resolve tree backref for logical %llu: %d",
529 swarn.logical, ret);
530 break;
531 }
532 if (ret > 0)
533 break;
ecaeb14b 534 btrfs_warn_in_rcu(fs_info,
6aa21263 535"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
b7f9945a
QW
536 errstr, swarn.logical, btrfs_dev_name(dev),
537 swarn.physical, (ref_level ? "node" : "leaf"),
538 ref_level, ref_root);
539 }
d8fe29e9 540 btrfs_release_path(path);
558540c1 541 } else {
a2c8d27e
FM
542 struct btrfs_backref_walk_ctx ctx = { 0 };
543
d8fe29e9 544 btrfs_release_path(path);
a2c8d27e
FM
545
546 ctx.bytenr = found_key.objectid;
547 ctx.extent_item_pos = swarn.logical - found_key.objectid;
548 ctx.fs_info = fs_info;
549
558540c1 550 swarn.path = path;
a36cf8b8 551 swarn.dev = dev;
a2c8d27e
FM
552
553 iterate_extent_inodes(&ctx, true, scrub_print_warning_inode, &swarn);
558540c1
JS
554 }
555
556out:
557 btrfs_free_path(path);
558540c1
JS
558}
559
de17addc
NA
560static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
561{
562 int ret = 0;
563 u64 length;
564
565 if (!btrfs_is_zoned(sctx->fs_info))
566 return 0;
567
7db1c5d1
NA
568 if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
569 return 0;
570
de17addc
NA
571 if (sctx->write_pointer < physical) {
572 length = physical - sctx->write_pointer;
573
574 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
575 sctx->write_pointer, length);
576 if (!ret)
577 sctx->write_pointer = physical;
578 }
579 return ret;
580}
581
a3ddbaeb
QW
582static struct page *scrub_stripe_get_page(struct scrub_stripe *stripe, int sector_nr)
583{
584 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
585 int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT;
586
587 return stripe->pages[page_index];
588}
589
590static unsigned int scrub_stripe_get_page_offset(struct scrub_stripe *stripe,
591 int sector_nr)
592{
593 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
594
595 return offset_in_page(sector_nr << fs_info->sectorsize_bits);
596}
597
97cf8f37 598static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr)
a3ddbaeb
QW
599{
600 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
601 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
602 const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits);
603 const struct page *first_page = scrub_stripe_get_page(stripe, sector_nr);
604 const unsigned int first_off = scrub_stripe_get_page_offset(stripe, sector_nr);
605 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
606 u8 on_disk_csum[BTRFS_CSUM_SIZE];
607 u8 calculated_csum[BTRFS_CSUM_SIZE];
608 struct btrfs_header *header;
609
610 /*
611 * Here we don't have a good way to attach the pages (and subpages)
612 * to a dummy extent buffer, thus we have to directly grab the members
613 * from pages.
614 */
615 header = (struct btrfs_header *)(page_address(first_page) + first_off);
616 memcpy(on_disk_csum, header->csum, fs_info->csum_size);
617
618 if (logical != btrfs_stack_header_bytenr(header)) {
619 bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
620 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
621 btrfs_warn_rl(fs_info,
622 "tree block %llu mirror %u has bad bytenr, has %llu want %llu",
623 logical, stripe->mirror_num,
624 btrfs_stack_header_bytenr(header), logical);
625 return;
626 }
b471965f
AJ
627 if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid,
628 BTRFS_FSID_SIZE) != 0) {
a3ddbaeb
QW
629 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
630 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
631 btrfs_warn_rl(fs_info,
632 "tree block %llu mirror %u has bad fsid, has %pU want %pU",
633 logical, stripe->mirror_num,
634 header->fsid, fs_info->fs_devices->fsid);
635 return;
636 }
637 if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
638 BTRFS_UUID_SIZE) != 0) {
639 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
640 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
641 btrfs_warn_rl(fs_info,
642 "tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU",
643 logical, stripe->mirror_num,
644 header->chunk_tree_uuid, fs_info->chunk_tree_uuid);
645 return;
646 }
647
648 /* Now check tree block csum. */
649 shash->tfm = fs_info->csum_shash;
650 crypto_shash_init(shash);
651 crypto_shash_update(shash, page_address(first_page) + first_off +
652 BTRFS_CSUM_SIZE, fs_info->sectorsize - BTRFS_CSUM_SIZE);
653
654 for (int i = sector_nr + 1; i < sector_nr + sectors_per_tree; i++) {
655 struct page *page = scrub_stripe_get_page(stripe, i);
656 unsigned int page_off = scrub_stripe_get_page_offset(stripe, i);
657
658 crypto_shash_update(shash, page_address(page) + page_off,
659 fs_info->sectorsize);
660 }
661
662 crypto_shash_final(shash, calculated_csum);
663 if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) {
664 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
665 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
666 btrfs_warn_rl(fs_info,
667 "tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT,
668 logical, stripe->mirror_num,
669 CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
670 CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum));
671 return;
672 }
673 if (stripe->sectors[sector_nr].generation !=
674 btrfs_stack_header_generation(header)) {
675 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
676 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
677 btrfs_warn_rl(fs_info,
678 "tree block %llu mirror %u has bad generation, has %llu want %llu",
679 logical, stripe->mirror_num,
680 btrfs_stack_header_generation(header),
681 stripe->sectors[sector_nr].generation);
682 return;
683 }
684 bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree);
685 bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
686 bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
687}
688
97cf8f37
QW
689static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr)
690{
691 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
692 struct scrub_sector_verification *sector = &stripe->sectors[sector_nr];
693 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
694 struct page *page = scrub_stripe_get_page(stripe, sector_nr);
695 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
696 u8 csum_buf[BTRFS_CSUM_SIZE];
697 int ret;
698
699 ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors);
700
701 /* Sector not utilized, skip it. */
702 if (!test_bit(sector_nr, &stripe->extent_sector_bitmap))
703 return;
704
705 /* IO error, no need to check. */
706 if (test_bit(sector_nr, &stripe->io_error_bitmap))
707 return;
708
709 /* Metadata, verify the full tree block. */
710 if (sector->is_metadata) {
711 /*
eefaf0a1 712 * Check if the tree block crosses the stripe boundary. If
97cf8f37
QW
713 * crossed the boundary, we cannot verify it but only give a
714 * warning.
715 *
716 * This can only happen on a very old filesystem where chunks
717 * are not ensured to be stripe aligned.
718 */
719 if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) {
720 btrfs_warn_rl(fs_info,
721 "tree block at %llu crosses stripe boundary %llu",
722 stripe->logical +
723 (sector_nr << fs_info->sectorsize_bits),
724 stripe->logical);
725 return;
726 }
727 scrub_verify_one_metadata(stripe, sector_nr);
728 return;
729 }
730
731 /*
732 * Data is easier, we just verify the data csum (if we have it). For
733 * cases without csum, we have no other choice but to trust it.
734 */
735 if (!sector->csum) {
736 clear_bit(sector_nr, &stripe->error_bitmap);
737 return;
738 }
739
740 ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum_buf, sector->csum);
741 if (ret < 0) {
742 set_bit(sector_nr, &stripe->csum_error_bitmap);
743 set_bit(sector_nr, &stripe->error_bitmap);
744 } else {
745 clear_bit(sector_nr, &stripe->csum_error_bitmap);
746 clear_bit(sector_nr, &stripe->error_bitmap);
747 }
748}
749
750/* Verify specified sectors of a stripe. */
9ecb5ef5 751static void scrub_verify_one_stripe(struct scrub_stripe *stripe, unsigned long bitmap)
97cf8f37
QW
752{
753 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
754 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
755 int sector_nr;
756
757 for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) {
758 scrub_verify_one_sector(stripe, sector_nr);
759 if (stripe->sectors[sector_nr].is_metadata)
760 sector_nr += sectors_per_tree - 1;
761 }
762}
763
9ecb5ef5
QW
764static int calc_sector_number(struct scrub_stripe *stripe, struct bio_vec *first_bvec)
765{
766 int i;
767
768 for (i = 0; i < stripe->nr_sectors; i++) {
769 if (scrub_stripe_get_page(stripe, i) == first_bvec->bv_page &&
770 scrub_stripe_get_page_offset(stripe, i) == first_bvec->bv_offset)
771 break;
772 }
773 ASSERT(i < stripe->nr_sectors);
774 return i;
775}
776
777/*
778 * Repair read is different to the regular read:
779 *
780 * - Only reads the failed sectors
781 * - May have extra blocksize limits
782 */
783static void scrub_repair_read_endio(struct btrfs_bio *bbio)
784{
785 struct scrub_stripe *stripe = bbio->private;
786 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
787 struct bio_vec *bvec;
788 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
789 u32 bio_size = 0;
790 int i;
791
792 ASSERT(sector_nr < stripe->nr_sectors);
793
794 bio_for_each_bvec_all(bvec, &bbio->bio, i)
795 bio_size += bvec->bv_len;
796
797 if (bbio->bio.bi_status) {
798 bitmap_set(&stripe->io_error_bitmap, sector_nr,
799 bio_size >> fs_info->sectorsize_bits);
800 bitmap_set(&stripe->error_bitmap, sector_nr,
801 bio_size >> fs_info->sectorsize_bits);
802 } else {
803 bitmap_clear(&stripe->io_error_bitmap, sector_nr,
804 bio_size >> fs_info->sectorsize_bits);
805 }
806 bio_put(&bbio->bio);
807 if (atomic_dec_and_test(&stripe->pending_io))
808 wake_up(&stripe->io_wait);
809}
810
811static int calc_next_mirror(int mirror, int num_copies)
812{
813 ASSERT(mirror <= num_copies);
814 return (mirror + 1 > num_copies) ? 1 : mirror + 1;
815}
816
817static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe,
818 int mirror, int blocksize, bool wait)
819{
820 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
821 struct btrfs_bio *bbio = NULL;
822 const unsigned long old_error_bitmap = stripe->error_bitmap;
823 int i;
824
825 ASSERT(stripe->mirror_num >= 1);
826 ASSERT(atomic_read(&stripe->pending_io) == 0);
827
828 for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) {
829 struct page *page;
830 int pgoff;
831 int ret;
832
833 page = scrub_stripe_get_page(stripe, i);
834 pgoff = scrub_stripe_get_page_offset(stripe, i);
835
836 /* The current sector cannot be merged, submit the bio. */
837 if (bbio && ((i > 0 && !test_bit(i - 1, &stripe->error_bitmap)) ||
838 bbio->bio.bi_iter.bi_size >= blocksize)) {
839 ASSERT(bbio->bio.bi_iter.bi_size);
840 atomic_inc(&stripe->pending_io);
841 btrfs_submit_bio(bbio, mirror);
842 if (wait)
843 wait_scrub_stripe_io(stripe);
844 bbio = NULL;
845 }
846
847 if (!bbio) {
848 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
849 fs_info, scrub_repair_read_endio, stripe);
850 bbio->bio.bi_iter.bi_sector = (stripe->logical +
851 (i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT;
852 }
853
854 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
855 ASSERT(ret == fs_info->sectorsize);
856 }
857 if (bbio) {
858 ASSERT(bbio->bio.bi_iter.bi_size);
859 atomic_inc(&stripe->pending_io);
860 btrfs_submit_bio(bbio, mirror);
861 if (wait)
862 wait_scrub_stripe_io(stripe);
863 }
864}
865
00965807
QW
866static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
867 struct scrub_stripe *stripe)
868{
869 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
870 DEFAULT_RATELIMIT_BURST);
871 struct btrfs_fs_info *fs_info = sctx->fs_info;
872 struct btrfs_device *dev = NULL;
873 u64 physical = 0;
874 int nr_data_sectors = 0;
875 int nr_meta_sectors = 0;
876 int nr_nodatacsum_sectors = 0;
877 int nr_repaired_sectors = 0;
878 int sector_nr;
879
1009254b
QW
880 if (test_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state))
881 return;
882
00965807
QW
883 /*
884 * Init needed infos for error reporting.
885 *
eefaf0a1 886 * Although our scrub_stripe infrastructure is mostly based on btrfs_submit_bio()
00965807
QW
887 * thus no need for dev/physical, error reporting still needs dev and physical.
888 */
889 if (!bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) {
890 u64 mapped_len = fs_info->sectorsize;
891 struct btrfs_io_context *bioc = NULL;
892 int stripe_index = stripe->mirror_num - 1;
893 int ret;
894
895 /* For scrub, our mirror_num should always start at 1. */
896 ASSERT(stripe->mirror_num >= 1);
723b8bb1
CH
897 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
898 stripe->logical, &mapped_len, &bioc,
9fb2acc2 899 NULL, NULL);
00965807
QW
900 /*
901 * If we failed, dev will be NULL, and later detailed reports
902 * will just be skipped.
903 */
904 if (ret < 0)
905 goto skip;
906 physical = bioc->stripes[stripe_index].physical;
907 dev = bioc->stripes[stripe_index].dev;
908 btrfs_put_bioc(bioc);
909 }
910
911skip:
912 for_each_set_bit(sector_nr, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
913 bool repaired = false;
914
915 if (stripe->sectors[sector_nr].is_metadata) {
916 nr_meta_sectors++;
917 } else {
918 nr_data_sectors++;
919 if (!stripe->sectors[sector_nr].csum)
920 nr_nodatacsum_sectors++;
921 }
922
923 if (test_bit(sector_nr, &stripe->init_error_bitmap) &&
924 !test_bit(sector_nr, &stripe->error_bitmap)) {
925 nr_repaired_sectors++;
926 repaired = true;
927 }
928
929 /* Good sector from the beginning, nothing need to be done. */
930 if (!test_bit(sector_nr, &stripe->init_error_bitmap))
931 continue;
932
933 /*
934 * Report error for the corrupted sectors. If repaired, just
935 * output the message of repaired message.
936 */
937 if (repaired) {
938 if (dev) {
939 btrfs_err_rl_in_rcu(fs_info,
940 "fixed up error at logical %llu on dev %s physical %llu",
941 stripe->logical, btrfs_dev_name(dev),
942 physical);
943 } else {
944 btrfs_err_rl_in_rcu(fs_info,
945 "fixed up error at logical %llu on mirror %u",
946 stripe->logical, stripe->mirror_num);
947 }
948 continue;
949 }
950
951 /* The remaining are all for unrepaired. */
952 if (dev) {
953 btrfs_err_rl_in_rcu(fs_info,
954 "unable to fixup (regular) error at logical %llu on dev %s physical %llu",
955 stripe->logical, btrfs_dev_name(dev),
956 physical);
957 } else {
958 btrfs_err_rl_in_rcu(fs_info,
959 "unable to fixup (regular) error at logical %llu on mirror %u",
960 stripe->logical, stripe->mirror_num);
961 }
962
963 if (test_bit(sector_nr, &stripe->io_error_bitmap))
964 if (__ratelimit(&rs) && dev)
965 scrub_print_common_warning("i/o error", dev, false,
966 stripe->logical, physical);
967 if (test_bit(sector_nr, &stripe->csum_error_bitmap))
968 if (__ratelimit(&rs) && dev)
969 scrub_print_common_warning("checksum error", dev, false,
970 stripe->logical, physical);
971 if (test_bit(sector_nr, &stripe->meta_error_bitmap))
972 if (__ratelimit(&rs) && dev)
973 scrub_print_common_warning("header error", dev, false,
974 stripe->logical, physical);
975 }
976
977 spin_lock(&sctx->stat_lock);
978 sctx->stat.data_extents_scrubbed += stripe->nr_data_extents;
979 sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents;
980 sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits;
981 sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits;
982 sctx->stat.no_csum += nr_nodatacsum_sectors;
79b8ee70
QW
983 sctx->stat.read_errors += stripe->init_nr_io_errors;
984 sctx->stat.csum_errors += stripe->init_nr_csum_errors;
985 sctx->stat.verify_errors += stripe->init_nr_meta_errors;
00965807
QW
986 sctx->stat.uncorrectable_errors +=
987 bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors);
988 sctx->stat.corrected_errors += nr_repaired_sectors;
989 spin_unlock(&sctx->stat_lock);
990}
991
4fe44f9d
QW
992static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
993 unsigned long write_bitmap, bool dev_replace);
994
9ecb5ef5
QW
995/*
996 * The main entrance for all read related scrub work, including:
997 *
998 * - Wait for the initial read to finish
999 * - Verify and locate any bad sectors
1000 * - Go through the remaining mirrors and try to read as large blocksize as
1001 * possible
1002 * - Go through all mirrors (including the failed mirror) sector-by-sector
4fe44f9d 1003 * - Submit writeback for repaired sectors
9ecb5ef5 1004 *
4fe44f9d
QW
1005 * Writeback for dev-replace does not happen here, it needs extra
1006 * synchronization for zoned devices.
9ecb5ef5
QW
1007 */
1008static void scrub_stripe_read_repair_worker(struct work_struct *work)
1009{
1010 struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work);
4fe44f9d
QW
1011 struct scrub_ctx *sctx = stripe->sctx;
1012 struct btrfs_fs_info *fs_info = sctx->fs_info;
9ecb5ef5
QW
1013 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1014 stripe->bg->length);
1015 int mirror;
1016 int i;
1017
1018 ASSERT(stripe->mirror_num > 0);
1019
1020 wait_scrub_stripe_io(stripe);
1021 scrub_verify_one_stripe(stripe, stripe->extent_sector_bitmap);
1022 /* Save the initial failed bitmap for later repair and report usage. */
1023 stripe->init_error_bitmap = stripe->error_bitmap;
79b8ee70
QW
1024 stripe->init_nr_io_errors = bitmap_weight(&stripe->io_error_bitmap,
1025 stripe->nr_sectors);
1026 stripe->init_nr_csum_errors = bitmap_weight(&stripe->csum_error_bitmap,
1027 stripe->nr_sectors);
1028 stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap,
1029 stripe->nr_sectors);
9ecb5ef5
QW
1030
1031 if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors))
1032 goto out;
1033
1034 /*
1035 * Try all remaining mirrors.
1036 *
1037 * Here we still try to read as large block as possible, as this is
1038 * faster and we have extra safety nets to rely on.
1039 */
1040 for (mirror = calc_next_mirror(stripe->mirror_num, num_copies);
1041 mirror != stripe->mirror_num;
1042 mirror = calc_next_mirror(mirror, num_copies)) {
1043 const unsigned long old_error_bitmap = stripe->error_bitmap;
1044
1045 scrub_stripe_submit_repair_read(stripe, mirror,
1046 BTRFS_STRIPE_LEN, false);
1047 wait_scrub_stripe_io(stripe);
1048 scrub_verify_one_stripe(stripe, old_error_bitmap);
1049 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1050 goto out;
1051 }
1052
1053 /*
1054 * Last safety net, try re-checking all mirrors, including the failed
1055 * one, sector-by-sector.
1056 *
1057 * As if one sector failed the drive's internal csum, the whole read
1058 * containing the offending sector would be marked as error.
1059 * Thus here we do sector-by-sector read.
1060 *
1061 * This can be slow, thus we only try it as the last resort.
1062 */
1063
1064 for (i = 0, mirror = stripe->mirror_num;
1065 i < num_copies;
1066 i++, mirror = calc_next_mirror(mirror, num_copies)) {
1067 const unsigned long old_error_bitmap = stripe->error_bitmap;
1068
1069 scrub_stripe_submit_repair_read(stripe, mirror,
1070 fs_info->sectorsize, true);
1071 wait_scrub_stripe_io(stripe);
1072 scrub_verify_one_stripe(stripe, old_error_bitmap);
1073 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1074 goto out;
1075 }
1076out:
4fe44f9d
QW
1077 /*
1078 * Submit the repaired sectors. For zoned case, we cannot do repair
1079 * in-place, but queue the bg to be relocated.
1080 */
1081 if (btrfs_is_zoned(fs_info)) {
1082 if (!bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1083 btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start);
1084 } else if (!sctx->readonly) {
1085 unsigned long repaired;
1086
1087 bitmap_andnot(&repaired, &stripe->init_error_bitmap,
1088 &stripe->error_bitmap, stripe->nr_sectors);
1089 scrub_write_sectors(sctx, stripe, repaired, false);
1090 wait_scrub_stripe_io(stripe);
1091 }
1092
1093 scrub_stripe_report_errors(sctx, stripe);
9ecb5ef5
QW
1094 set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state);
1095 wake_up(&stripe->repair_wait);
1096}
1097
54765392 1098static void scrub_read_endio(struct btrfs_bio *bbio)
9ecb5ef5
QW
1099{
1100 struct scrub_stripe *stripe = bbio->private;
1101
1102 if (bbio->bio.bi_status) {
1103 bitmap_set(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
1104 bitmap_set(&stripe->error_bitmap, 0, stripe->nr_sectors);
1105 } else {
1106 bitmap_clear(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
1107 }
1108 bio_put(&bbio->bio);
1109 if (atomic_dec_and_test(&stripe->pending_io)) {
1110 wake_up(&stripe->io_wait);
1111 INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
1112 queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
1113 }
1114}
1115
058e09e6
QW
1116static void scrub_write_endio(struct btrfs_bio *bbio)
1117{
1118 struct scrub_stripe *stripe = bbio->private;
1119 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1120 struct bio_vec *bvec;
1121 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
1122 u32 bio_size = 0;
1123 int i;
1124
1125 bio_for_each_bvec_all(bvec, &bbio->bio, i)
1126 bio_size += bvec->bv_len;
1127
1128 if (bbio->bio.bi_status) {
1129 unsigned long flags;
1130
1131 spin_lock_irqsave(&stripe->write_error_lock, flags);
1132 bitmap_set(&stripe->write_error_bitmap, sector_nr,
1133 bio_size >> fs_info->sectorsize_bits);
1134 spin_unlock_irqrestore(&stripe->write_error_lock, flags);
1135 }
1136 bio_put(&bbio->bio);
1137
1138 if (atomic_dec_and_test(&stripe->pending_io))
1139 wake_up(&stripe->io_wait);
1140}
1141
b675df02
QW
1142static void scrub_submit_write_bio(struct scrub_ctx *sctx,
1143 struct scrub_stripe *stripe,
1144 struct btrfs_bio *bbio, bool dev_replace)
1145{
1146 struct btrfs_fs_info *fs_info = sctx->fs_info;
1147 u32 bio_len = bbio->bio.bi_iter.bi_size;
1148 u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) -
1149 stripe->logical;
1150
1151 fill_writer_pointer_gap(sctx, stripe->physical + bio_off);
1152 atomic_inc(&stripe->pending_io);
1153 btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace);
1154 if (!btrfs_is_zoned(fs_info))
1155 return;
1156 /*
1157 * For zoned writeback, queue depth must be 1, thus we must wait for
1158 * the write to finish before the next write.
1159 */
1160 wait_scrub_stripe_io(stripe);
1161
1162 /*
1163 * And also need to update the write pointer if write finished
1164 * successfully.
1165 */
1166 if (!test_bit(bio_off >> fs_info->sectorsize_bits,
1167 &stripe->write_error_bitmap))
1168 sctx->write_pointer += bio_len;
1169}
1170
058e09e6
QW
1171/*
1172 * Submit the write bio(s) for the sectors specified by @write_bitmap.
1173 *
1174 * Here we utilize btrfs_submit_repair_write(), which has some extra benefits:
1175 *
1176 * - Only needs logical bytenr and mirror_num
1177 * Just like the scrub read path
1178 *
1179 * - Would only result in writes to the specified mirror
1180 * Unlike the regular writeback path, which would write back to all stripes
1181 *
1182 * - Handle dev-replace and read-repair writeback differently
1183 */
54765392
QW
1184static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
1185 unsigned long write_bitmap, bool dev_replace)
058e09e6
QW
1186{
1187 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1188 struct btrfs_bio *bbio = NULL;
058e09e6
QW
1189 int sector_nr;
1190
1191 for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) {
1192 struct page *page = scrub_stripe_get_page(stripe, sector_nr);
1193 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
1194 int ret;
1195
1196 /* We should only writeback sectors covered by an extent. */
1197 ASSERT(test_bit(sector_nr, &stripe->extent_sector_bitmap));
1198
1199 /* Cannot merge with previous sector, submit the current one. */
1200 if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) {
b675df02 1201 scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
058e09e6
QW
1202 bbio = NULL;
1203 }
1204 if (!bbio) {
1205 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_WRITE,
1206 fs_info, scrub_write_endio, stripe);
1207 bbio->bio.bi_iter.bi_sector = (stripe->logical +
1208 (sector_nr << fs_info->sectorsize_bits)) >>
1209 SECTOR_SHIFT;
1210 }
1211 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1212 ASSERT(ret == fs_info->sectorsize);
1213 }
b675df02
QW
1214 if (bbio)
1215 scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
058e09e6
QW
1216}
1217
13a62fd9
QW
1218/*
1219 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
1220 * second. Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
1221 */
e02ee89b
QW
1222static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *device,
1223 unsigned int bio_size)
eb3b5053
DS
1224{
1225 const int time_slice = 1000;
eb3b5053
DS
1226 s64 delta;
1227 ktime_t now;
1228 u32 div;
1229 u64 bwlimit;
1230
eb3b5053
DS
1231 bwlimit = READ_ONCE(device->scrub_speed_max);
1232 if (bwlimit == 0)
1233 return;
1234
1235 /*
1236 * Slice is divided into intervals when the IO is submitted, adjust by
1237 * bwlimit and maximum of 64 intervals.
1238 */
1239 div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
1240 div = min_t(u32, 64, div);
1241
1242 /* Start new epoch, set deadline */
1243 now = ktime_get();
1244 if (sctx->throttle_deadline == 0) {
1245 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
1246 sctx->throttle_sent = 0;
1247 }
1248
1249 /* Still in the time to send? */
1250 if (ktime_before(now, sctx->throttle_deadline)) {
1251 /* If current bio is within the limit, send it */
e02ee89b 1252 sctx->throttle_sent += bio_size;
eb3b5053
DS
1253 if (sctx->throttle_sent <= div_u64(bwlimit, div))
1254 return;
1255
1256 /* We're over the limit, sleep until the rest of the slice */
1257 delta = ktime_ms_delta(sctx->throttle_deadline, now);
1258 } else {
1259 /* New request after deadline, start new epoch */
1260 delta = 0;
1261 }
1262
1263 if (delta) {
1264 long timeout;
1265
1266 timeout = div_u64(delta * HZ, 1000);
1267 schedule_timeout_interruptible(timeout);
1268 }
1269
1270 /* Next call will start the deadline period */
1271 sctx->throttle_deadline = 0;
1272}
1273
3b080b25
WS
1274/*
1275 * Given a physical address, this will calculate it's
1276 * logical offset. if this is a parity stripe, it will return
1277 * the most left data stripe's logical offset.
1278 *
1279 * return 0 if it is a data stripe, 1 means parity stripe.
1280 */
1281static int get_raid56_logic_offset(u64 physical, int num,
7dc66abb 1282 struct btrfs_chunk_map *map, u64 *offset,
5a6ac9ea 1283 u64 *stripe_start)
3b080b25
WS
1284{
1285 int i;
1286 int j = 0;
3b080b25 1287 u64 last_offset;
cff82672 1288 const int data_stripes = nr_data_stripes(map);
3b080b25 1289
cff82672 1290 last_offset = (physical - map->stripes[num].physical) * data_stripes;
5a6ac9ea
MX
1291 if (stripe_start)
1292 *stripe_start = last_offset;
1293
3b080b25 1294 *offset = last_offset;
cff82672 1295 for (i = 0; i < data_stripes; i++) {
6ded22c1
QW
1296 u32 stripe_nr;
1297 u32 stripe_index;
1298 u32 rot;
1299
cb091225 1300 *offset = last_offset + btrfs_stripe_nr_to_offset(i);
3b080b25 1301
6ded22c1 1302 stripe_nr = (u32)(*offset >> BTRFS_STRIPE_LEN_SHIFT) / data_stripes;
3b080b25
WS
1303
1304 /* Work out the disk rotation on this stripe-set */
6ded22c1 1305 rot = stripe_nr % map->num_stripes;
3b080b25
WS
1306 /* calculate which stripe this data locates */
1307 rot += i;
e4fbaee2 1308 stripe_index = rot % map->num_stripes;
3b080b25
WS
1309 if (stripe_index == num)
1310 return 0;
1311 if (stripe_index < num)
1312 j++;
1313 }
cb091225 1314 *offset = last_offset + btrfs_stripe_nr_to_offset(j);
3b080b25
WS
1315 return 1;
1316}
1317
416bd7e7
QW
1318/*
1319 * Return 0 if the extent item range covers any byte of the range.
1320 * Return <0 if the extent item is before @search_start.
1321 * Return >0 if the extent item is after @start_start + @search_len.
1322 */
1323static int compare_extent_item_range(struct btrfs_path *path,
1324 u64 search_start, u64 search_len)
1325{
1326 struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info;
1327 u64 len;
1328 struct btrfs_key key;
1329
1330 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1331 ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY ||
1332 key.type == BTRFS_METADATA_ITEM_KEY);
1333 if (key.type == BTRFS_METADATA_ITEM_KEY)
1334 len = fs_info->nodesize;
1335 else
1336 len = key.offset;
1337
1338 if (key.objectid + len <= search_start)
1339 return -1;
1340 if (key.objectid >= search_start + search_len)
1341 return 1;
1342 return 0;
1343}
1344
1345/*
1346 * Locate one extent item which covers any byte in range
1347 * [@search_start, @search_start + @search_length)
1348 *
1349 * If the path is not initialized, we will initialize the search by doing
1350 * a btrfs_search_slot().
1351 * If the path is already initialized, we will use the path as the initial
1352 * slot, to avoid duplicated btrfs_search_slot() calls.
1353 *
1354 * NOTE: If an extent item starts before @search_start, we will still
1355 * return the extent item. This is for data extent crossing stripe boundary.
1356 *
1357 * Return 0 if we found such extent item, and @path will point to the extent item.
1358 * Return >0 if no such extent item can be found, and @path will be released.
1359 * Return <0 if hit fatal error, and @path will be released.
1360 */
1361static int find_first_extent_item(struct btrfs_root *extent_root,
1362 struct btrfs_path *path,
1363 u64 search_start, u64 search_len)
1364{
1365 struct btrfs_fs_info *fs_info = extent_root->fs_info;
1366 struct btrfs_key key;
1367 int ret;
1368
1369 /* Continue using the existing path */
1370 if (path->nodes[0])
1371 goto search_forward;
1372
1373 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1374 key.type = BTRFS_METADATA_ITEM_KEY;
1375 else
1376 key.type = BTRFS_EXTENT_ITEM_KEY;
1377 key.objectid = search_start;
1378 key.offset = (u64)-1;
1379
1380 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1381 if (ret < 0)
1382 return ret;
1383
1384 ASSERT(ret > 0);
1385 /*
1386 * Here we intentionally pass 0 as @min_objectid, as there could be
1387 * an extent item starting before @search_start.
1388 */
1389 ret = btrfs_previous_extent_item(extent_root, path, 0);
1390 if (ret < 0)
1391 return ret;
1392 /*
1393 * No matter whether we have found an extent item, the next loop will
1394 * properly do every check on the key.
1395 */
1396search_forward:
1397 while (true) {
1398 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1399 if (key.objectid >= search_start + search_len)
1400 break;
1401 if (key.type != BTRFS_METADATA_ITEM_KEY &&
1402 key.type != BTRFS_EXTENT_ITEM_KEY)
1403 goto next;
1404
1405 ret = compare_extent_item_range(path, search_start, search_len);
1406 if (ret == 0)
1407 return ret;
1408 if (ret > 0)
1409 break;
1410next:
ebb0beca
FM
1411 ret = btrfs_next_item(extent_root, path);
1412 if (ret) {
1413 /* Either no more items or a fatal error. */
1414 btrfs_release_path(path);
1415 return ret;
416bd7e7
QW
1416 }
1417 }
1418 btrfs_release_path(path);
1419 return 1;
1420}
1421
09022b14
QW
1422static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret,
1423 u64 *size_ret, u64 *flags_ret, u64 *generation_ret)
1424{
1425 struct btrfs_key key;
1426 struct btrfs_extent_item *ei;
1427
1428 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1429 ASSERT(key.type == BTRFS_METADATA_ITEM_KEY ||
1430 key.type == BTRFS_EXTENT_ITEM_KEY);
1431 *extent_start_ret = key.objectid;
1432 if (key.type == BTRFS_METADATA_ITEM_KEY)
1433 *size_ret = path->nodes[0]->fs_info->nodesize;
1434 else
1435 *size_ret = key.offset;
1436 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item);
1437 *flags_ret = btrfs_extent_flags(path->nodes[0], ei);
1438 *generation_ret = btrfs_extent_generation(path->nodes[0], ei);
1439}
1440
7db1c5d1
NA
1441static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
1442 u64 physical, u64 physical_end)
1443{
1444 struct btrfs_fs_info *fs_info = sctx->fs_info;
1445 int ret = 0;
1446
1447 if (!btrfs_is_zoned(fs_info))
1448 return 0;
1449
7db1c5d1
NA
1450 mutex_lock(&sctx->wr_lock);
1451 if (sctx->write_pointer < physical_end) {
1452 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
1453 physical,
1454 sctx->write_pointer);
1455 if (ret)
1456 btrfs_err(fs_info,
1457 "zoned: failed to recover write pointer");
1458 }
1459 mutex_unlock(&sctx->wr_lock);
1460 btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
1461
1462 return ret;
1463}
1464
b9795475
QW
1465static void fill_one_extent_info(struct btrfs_fs_info *fs_info,
1466 struct scrub_stripe *stripe,
1467 u64 extent_start, u64 extent_len,
1468 u64 extent_flags, u64 extent_gen)
1469{
1470 for (u64 cur_logical = max(stripe->logical, extent_start);
1471 cur_logical < min(stripe->logical + BTRFS_STRIPE_LEN,
1472 extent_start + extent_len);
1473 cur_logical += fs_info->sectorsize) {
1474 const int nr_sector = (cur_logical - stripe->logical) >>
1475 fs_info->sectorsize_bits;
1476 struct scrub_sector_verification *sector =
1477 &stripe->sectors[nr_sector];
1478
1479 set_bit(nr_sector, &stripe->extent_sector_bitmap);
1480 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1481 sector->is_metadata = true;
1482 sector->generation = extent_gen;
1483 }
1484 }
1485}
1486
1487static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe)
1488{
1489 stripe->extent_sector_bitmap = 0;
1490 stripe->init_error_bitmap = 0;
79b8ee70
QW
1491 stripe->init_nr_io_errors = 0;
1492 stripe->init_nr_csum_errors = 0;
1493 stripe->init_nr_meta_errors = 0;
b9795475
QW
1494 stripe->error_bitmap = 0;
1495 stripe->io_error_bitmap = 0;
1496 stripe->csum_error_bitmap = 0;
1497 stripe->meta_error_bitmap = 0;
1498}
1499
1500/*
1501 * Locate one stripe which has at least one extent in its range.
1502 *
1503 * Return 0 if found such stripe, and store its info into @stripe.
1504 * Return >0 if there is no such stripe in the specified range.
1505 * Return <0 for error.
1506 */
54765392 1507static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
1dc4888e 1508 struct btrfs_path *extent_path,
3c771c19 1509 struct btrfs_path *csum_path,
54765392
QW
1510 struct btrfs_device *dev, u64 physical,
1511 int mirror_num, u64 logical_start,
1512 u32 logical_len,
1513 struct scrub_stripe *stripe)
b9795475
QW
1514{
1515 struct btrfs_fs_info *fs_info = bg->fs_info;
1516 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start);
1517 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start);
1518 const u64 logical_end = logical_start + logical_len;
b9795475
QW
1519 u64 cur_logical = logical_start;
1520 u64 stripe_end;
1521 u64 extent_start;
1522 u64 extent_len;
1523 u64 extent_flags;
1524 u64 extent_gen;
1525 int ret;
1526
1527 memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
1528 stripe->nr_sectors);
1529 scrub_stripe_reset_bitmaps(stripe);
1530
1531 /* The range must be inside the bg. */
1532 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
1533
1dc4888e
QW
1534 ret = find_first_extent_item(extent_root, extent_path, logical_start,
1535 logical_len);
b9795475
QW
1536 /* Either error or not found. */
1537 if (ret)
1538 goto out;
1dc4888e
QW
1539 get_extent_info(extent_path, &extent_start, &extent_len, &extent_flags,
1540 &extent_gen);
00965807
QW
1541 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1542 stripe->nr_meta_extents++;
1543 if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1544 stripe->nr_data_extents++;
b9795475
QW
1545 cur_logical = max(extent_start, cur_logical);
1546
1547 /*
1548 * Round down to stripe boundary.
1549 *
1550 * The extra calculation against bg->start is to handle block groups
1551 * whose logical bytenr is not BTRFS_STRIPE_LEN aligned.
1552 */
1553 stripe->logical = round_down(cur_logical - bg->start, BTRFS_STRIPE_LEN) +
1554 bg->start;
1555 stripe->physical = physical + stripe->logical - logical_start;
1556 stripe->dev = dev;
1557 stripe->bg = bg;
1558 stripe->mirror_num = mirror_num;
1559 stripe_end = stripe->logical + BTRFS_STRIPE_LEN - 1;
1560
1561 /* Fill the first extent info into stripe->sectors[] array. */
1562 fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1563 extent_flags, extent_gen);
1564 cur_logical = extent_start + extent_len;
1565
1566 /* Fill the extent info for the remaining sectors. */
1567 while (cur_logical <= stripe_end) {
1dc4888e 1568 ret = find_first_extent_item(extent_root, extent_path, cur_logical,
b9795475
QW
1569 stripe_end - cur_logical + 1);
1570 if (ret < 0)
1571 goto out;
1572 if (ret > 0) {
1573 ret = 0;
1574 break;
1575 }
1dc4888e 1576 get_extent_info(extent_path, &extent_start, &extent_len,
b9795475 1577 &extent_flags, &extent_gen);
00965807
QW
1578 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1579 stripe->nr_meta_extents++;
1580 if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1581 stripe->nr_data_extents++;
b9795475
QW
1582 fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1583 extent_flags, extent_gen);
1584 cur_logical = extent_start + extent_len;
1585 }
1586
1587 /* Now fill the data csum. */
1588 if (bg->flags & BTRFS_BLOCK_GROUP_DATA) {
1589 int sector_nr;
1590 unsigned long csum_bitmap = 0;
1591
1592 /* Csum space should have already been allocated. */
1593 ASSERT(stripe->csums);
1594
1595 /*
1596 * Our csum bitmap should be large enough, as BTRFS_STRIPE_LEN
1597 * should contain at most 16 sectors.
1598 */
1599 ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
1600
3c771c19
QW
1601 ret = btrfs_lookup_csums_bitmap(csum_root, csum_path,
1602 stripe->logical, stripe_end,
1603 stripe->csums, &csum_bitmap);
b9795475
QW
1604 if (ret < 0)
1605 goto out;
1606 if (ret > 0)
1607 ret = 0;
1608
1609 for_each_set_bit(sector_nr, &csum_bitmap, stripe->nr_sectors) {
1610 stripe->sectors[sector_nr].csum = stripe->csums +
1611 sector_nr * fs_info->csum_size;
1612 }
1613 }
1614 set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1615out:
b9795475
QW
1616 return ret;
1617}
1618
54765392
QW
1619static void scrub_reset_stripe(struct scrub_stripe *stripe)
1620{
1621 scrub_stripe_reset_bitmaps(stripe);
1622
1623 stripe->nr_meta_extents = 0;
1624 stripe->nr_data_extents = 0;
1625 stripe->state = 0;
1626
1627 for (int i = 0; i < stripe->nr_sectors; i++) {
1628 stripe->sectors[i].is_metadata = false;
1629 stripe->sectors[i].csum = NULL;
1630 stripe->sectors[i].generation = 0;
1631 }
1632}
1633
9acaa641
JT
1634static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
1635 struct scrub_stripe *stripe)
1636{
1637 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1638 struct btrfs_bio *bbio = NULL;
1639 u64 stripe_len = BTRFS_STRIPE_LEN;
1640 int mirror = stripe->mirror_num;
1641 int i;
1642
1643 atomic_inc(&stripe->pending_io);
1644
1645 for_each_set_bit(i, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
1646 struct page *page = scrub_stripe_get_page(stripe, i);
1647 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, i);
1648
1649 /* The current sector cannot be merged, submit the bio. */
1650 if (bbio &&
1651 ((i > 0 &&
1652 !test_bit(i - 1, &stripe->extent_sector_bitmap)) ||
1653 bbio->bio.bi_iter.bi_size >= stripe_len)) {
1654 ASSERT(bbio->bio.bi_iter.bi_size);
1655 atomic_inc(&stripe->pending_io);
1656 btrfs_submit_bio(bbio, mirror);
1657 bbio = NULL;
1658 }
1659
1660 if (!bbio) {
1661 struct btrfs_io_stripe io_stripe = {};
1662 struct btrfs_io_context *bioc = NULL;
1663 const u64 logical = stripe->logical +
1664 (i << fs_info->sectorsize_bits);
1665 int err;
1666
1667 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
1668 fs_info, scrub_read_endio, stripe);
1669 bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
1670
1671 io_stripe.is_scrub = true;
1672 err = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
1673 &stripe_len, &bioc, &io_stripe,
1674 &mirror);
1675 btrfs_put_bioc(bioc);
1676 if (err) {
1677 btrfs_bio_end_io(bbio,
1678 errno_to_blk_status(err));
1679 return;
1680 }
1681 }
1682
1683 __bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1684 }
1685
1686 if (bbio) {
1687 ASSERT(bbio->bio.bi_iter.bi_size);
1688 atomic_inc(&stripe->pending_io);
1689 btrfs_submit_bio(bbio, mirror);
1690 }
1691
1692 if (atomic_dec_and_test(&stripe->pending_io)) {
1693 wake_up(&stripe->io_wait);
1694 INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
1695 queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
1696 }
1697}
1698
54765392
QW
1699static void scrub_submit_initial_read(struct scrub_ctx *sctx,
1700 struct scrub_stripe *stripe)
1701{
1702 struct btrfs_fs_info *fs_info = sctx->fs_info;
1703 struct btrfs_bio *bbio;
1704 int mirror = stripe->mirror_num;
1705
1706 ASSERT(stripe->bg);
1707 ASSERT(stripe->mirror_num > 0);
1708 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
1709
9acaa641
JT
1710 if (btrfs_need_stripe_tree_update(fs_info, stripe->bg->flags)) {
1711 scrub_submit_extent_sector_read(sctx, stripe);
1712 return;
1713 }
1714
54765392
QW
1715 bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
1716 scrub_read_endio, stripe);
1717
1718 /* Read the whole stripe. */
1719 bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
1720 for (int i = 0; i < BTRFS_STRIPE_LEN >> PAGE_SHIFT; i++) {
1721 int ret;
1722
1723 ret = bio_add_page(&bbio->bio, stripe->pages[i], PAGE_SIZE, 0);
1724 /* We should have allocated enough bio vectors. */
1725 ASSERT(ret == PAGE_SIZE);
1726 }
1727 atomic_inc(&stripe->pending_io);
1728
1729 /*
1730 * For dev-replace, either user asks to avoid the source dev, or
1731 * the device is missing, we try the next mirror instead.
1732 */
1733 if (sctx->is_dev_replace &&
1734 (fs_info->dev_replace.cont_reading_from_srcdev_mode ==
1735 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID ||
1736 !stripe->dev->bdev)) {
1737 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1738 stripe->bg->length);
1739
1740 mirror = calc_next_mirror(mirror, num_copies);
1741 }
1742 btrfs_submit_bio(bbio, mirror);
1743}
1744
8eb3dd17
QW
1745static bool stripe_has_metadata_error(struct scrub_stripe *stripe)
1746{
1747 int i;
1748
1749 for_each_set_bit(i, &stripe->error_bitmap, stripe->nr_sectors) {
1750 if (stripe->sectors[i].is_metadata) {
1751 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1752
1753 btrfs_err(fs_info,
1754 "stripe %llu has unrepaired metadata sector at %llu",
1755 stripe->logical,
1756 stripe->logical + (i << fs_info->sectorsize_bits));
1757 return true;
1758 }
1759 }
1760 return false;
1761}
1762
ae76d8e3
QW
1763static void submit_initial_group_read(struct scrub_ctx *sctx,
1764 unsigned int first_slot,
1765 unsigned int nr_stripes)
1766{
1767 struct blk_plug plug;
1768
1769 ASSERT(first_slot < SCRUB_TOTAL_STRIPES);
1770 ASSERT(first_slot + nr_stripes <= SCRUB_TOTAL_STRIPES);
1771
1772 scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
1773 btrfs_stripe_nr_to_offset(nr_stripes));
1774 blk_start_plug(&plug);
1775 for (int i = 0; i < nr_stripes; i++) {
1776 struct scrub_stripe *stripe = &sctx->stripes[first_slot + i];
1777
1778 /* Those stripes should be initialized. */
1779 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
1780 scrub_submit_initial_read(sctx, stripe);
1781 }
1782 blk_finish_plug(&plug);
1783}
1784
8eb3dd17 1785static int flush_scrub_stripes(struct scrub_ctx *sctx)
54765392
QW
1786{
1787 struct btrfs_fs_info *fs_info = sctx->fs_info;
1788 struct scrub_stripe *stripe;
1789 const int nr_stripes = sctx->cur_stripe;
8eb3dd17 1790 int ret = 0;
54765392
QW
1791
1792 if (!nr_stripes)
8eb3dd17 1793 return 0;
54765392
QW
1794
1795 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state));
e02ee89b 1796
ae76d8e3
QW
1797 /* Submit the stripes which are populated but not submitted. */
1798 if (nr_stripes % SCRUB_STRIPES_PER_GROUP) {
1799 const int first_slot = round_down(nr_stripes, SCRUB_STRIPES_PER_GROUP);
1800
1801 submit_initial_group_read(sctx, first_slot, nr_stripes - first_slot);
54765392
QW
1802 }
1803
1804 for (int i = 0; i < nr_stripes; i++) {
1805 stripe = &sctx->stripes[i];
1806
1807 wait_event(stripe->repair_wait,
1808 test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
1809 }
1810
54765392
QW
1811 /* Submit for dev-replace. */
1812 if (sctx->is_dev_replace) {
8eb3dd17
QW
1813 /*
1814 * For dev-replace, if we know there is something wrong with
eefaf0a1 1815 * metadata, we should immediately abort.
8eb3dd17
QW
1816 */
1817 for (int i = 0; i < nr_stripes; i++) {
1818 if (stripe_has_metadata_error(&sctx->stripes[i])) {
1819 ret = -EIO;
1820 goto out;
1821 }
1822 }
54765392
QW
1823 for (int i = 0; i < nr_stripes; i++) {
1824 unsigned long good;
1825
1826 stripe = &sctx->stripes[i];
1827
1828 ASSERT(stripe->dev == fs_info->dev_replace.srcdev);
1829
1830 bitmap_andnot(&good, &stripe->extent_sector_bitmap,
1831 &stripe->error_bitmap, stripe->nr_sectors);
1832 scrub_write_sectors(sctx, stripe, good, true);
1833 }
1834 }
1835
1836 /* Wait for the above writebacks to finish. */
1837 for (int i = 0; i < nr_stripes; i++) {
1838 stripe = &sctx->stripes[i];
1839
1840 wait_scrub_stripe_io(stripe);
1841 scrub_reset_stripe(stripe);
1842 }
8eb3dd17 1843out:
54765392 1844 sctx->cur_stripe = 0;
8eb3dd17 1845 return ret;
54765392
QW
1846}
1847
1009254b
QW
1848static void raid56_scrub_wait_endio(struct bio *bio)
1849{
1850 complete(bio->bi_private);
1851}
1852
e02ee89b
QW
1853static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg,
1854 struct btrfs_device *dev, int mirror_num,
ae76d8e3
QW
1855 u64 logical, u32 length, u64 physical,
1856 u64 *found_logical_ret)
54765392
QW
1857{
1858 struct scrub_stripe *stripe;
1859 int ret;
1860
ae76d8e3
QW
1861 /*
1862 * There should always be one slot left, as caller filling the last
1863 * slot should flush them all.
1864 */
1865 ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES);
54765392 1866
47e2b06b
QW
1867 /* @found_logical_ret must be specified. */
1868 ASSERT(found_logical_ret);
1869
54765392 1870 stripe = &sctx->stripes[sctx->cur_stripe];
54765392 1871 scrub_reset_stripe(stripe);
3c771c19
QW
1872 ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
1873 &sctx->csum_path, dev, physical,
1874 mirror_num, logical, length, stripe);
54765392
QW
1875 /* Either >0 as no more extents or <0 for error. */
1876 if (ret)
1877 return ret;
47e2b06b 1878 *found_logical_ret = stripe->logical;
54765392 1879 sctx->cur_stripe++;
ae76d8e3
QW
1880
1881 /* We filled one group, submit it. */
1882 if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) {
1883 const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP;
1884
1885 submit_initial_group_read(sctx, first_slot, SCRUB_STRIPES_PER_GROUP);
1886 }
1887
1888 /* Last slot used, flush them all. */
1889 if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES)
1890 return flush_scrub_stripes(sctx);
54765392
QW
1891 return 0;
1892}
1893
1009254b
QW
1894static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
1895 struct btrfs_device *scrub_dev,
1896 struct btrfs_block_group *bg,
7dc66abb 1897 struct btrfs_chunk_map *map,
1009254b
QW
1898 u64 full_stripe_start)
1899{
1900 DECLARE_COMPLETION_ONSTACK(io_done);
1901 struct btrfs_fs_info *fs_info = sctx->fs_info;
1902 struct btrfs_raid_bio *rbio;
1903 struct btrfs_io_context *bioc = NULL;
1dc4888e 1904 struct btrfs_path extent_path = { 0 };
3c771c19 1905 struct btrfs_path csum_path = { 0 };
1009254b
QW
1906 struct bio *bio;
1907 struct scrub_stripe *stripe;
1908 bool all_empty = true;
1909 const int data_stripes = nr_data_stripes(map);
1910 unsigned long extent_bitmap = 0;
cb091225 1911 u64 length = btrfs_stripe_nr_to_offset(data_stripes);
1009254b
QW
1912 int ret;
1913
1914 ASSERT(sctx->raid56_data_stripes);
1915
1dc4888e 1916 /*
3c771c19
QW
1917 * For data stripe search, we cannot re-use the same extent/csum paths,
1918 * as the data stripe bytenr may be smaller than previous extent. Thus
1919 * we have to use our own extent/csum paths.
1dc4888e
QW
1920 */
1921 extent_path.search_commit_root = 1;
1922 extent_path.skip_locking = 1;
3c771c19
QW
1923 csum_path.search_commit_root = 1;
1924 csum_path.skip_locking = 1;
1dc4888e 1925
1009254b
QW
1926 for (int i = 0; i < data_stripes; i++) {
1927 int stripe_index;
1928 int rot;
1929 u64 physical;
1930
1931 stripe = &sctx->raid56_data_stripes[i];
1932 rot = div_u64(full_stripe_start - bg->start,
1933 data_stripes) >> BTRFS_STRIPE_LEN_SHIFT;
1934 stripe_index = (i + rot) % map->num_stripes;
1935 physical = map->stripes[stripe_index].physical +
cb091225 1936 btrfs_stripe_nr_to_offset(rot);
1009254b
QW
1937
1938 scrub_reset_stripe(stripe);
1939 set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state);
3c771c19 1940 ret = scrub_find_fill_first_stripe(bg, &extent_path, &csum_path,
1009254b 1941 map->stripes[stripe_index].dev, physical, 1,
cb091225 1942 full_stripe_start + btrfs_stripe_nr_to_offset(i),
1009254b
QW
1943 BTRFS_STRIPE_LEN, stripe);
1944 if (ret < 0)
1945 goto out;
1946 /*
1947 * No extent in this data stripe, need to manually mark them
1948 * initialized to make later read submission happy.
1949 */
1950 if (ret > 0) {
1951 stripe->logical = full_stripe_start +
cb091225 1952 btrfs_stripe_nr_to_offset(i);
1009254b
QW
1953 stripe->dev = map->stripes[stripe_index].dev;
1954 stripe->mirror_num = 1;
1955 set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1956 }
1957 }
1958
1959 /* Check if all data stripes are empty. */
1960 for (int i = 0; i < data_stripes; i++) {
1961 stripe = &sctx->raid56_data_stripes[i];
1962 if (!bitmap_empty(&stripe->extent_sector_bitmap, stripe->nr_sectors)) {
1963 all_empty = false;
1964 break;
1965 }
1966 }
1967 if (all_empty) {
1968 ret = 0;
1969 goto out;
1970 }
1971
1972 for (int i = 0; i < data_stripes; i++) {
1973 stripe = &sctx->raid56_data_stripes[i];
1974 scrub_submit_initial_read(sctx, stripe);
1975 }
1976 for (int i = 0; i < data_stripes; i++) {
1977 stripe = &sctx->raid56_data_stripes[i];
1978
1979 wait_event(stripe->repair_wait,
1980 test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
1981 }
1982 /* For now, no zoned support for RAID56. */
1983 ASSERT(!btrfs_is_zoned(sctx->fs_info));
1984
1009254b
QW
1985 /*
1986 * Now all data stripes are properly verified. Check if we have any
1987 * unrepaired, if so abort immediately or we could further corrupt the
1988 * P/Q stripes.
1989 *
1990 * During the loop, also populate extent_bitmap.
1991 */
1992 for (int i = 0; i < data_stripes; i++) {
1993 unsigned long error;
1994
1995 stripe = &sctx->raid56_data_stripes[i];
1996
1997 /*
1998 * We should only check the errors where there is an extent.
1999 * As we may hit an empty data stripe while it's missing.
2000 */
2001 bitmap_and(&error, &stripe->error_bitmap,
2002 &stripe->extent_sector_bitmap, stripe->nr_sectors);
2003 if (!bitmap_empty(&error, stripe->nr_sectors)) {
2004 btrfs_err(fs_info,
2005"unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl",
2006 full_stripe_start, i, stripe->nr_sectors,
2007 &error);
2008 ret = -EIO;
2009 goto out;
2010 }
2011 bitmap_or(&extent_bitmap, &extent_bitmap,
2012 &stripe->extent_sector_bitmap, stripe->nr_sectors);
2013 }
2014
2015 /* Now we can check and regenerate the P/Q stripe. */
2016 bio = bio_alloc(NULL, 1, REQ_OP_READ, GFP_NOFS);
2017 bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
2018 bio->bi_private = &io_done;
2019 bio->bi_end_io = raid56_scrub_wait_endio;
2020
2021 btrfs_bio_counter_inc_blocked(fs_info);
723b8bb1 2022 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
9fb2acc2 2023 &length, &bioc, NULL, NULL);
1009254b
QW
2024 if (ret < 0) {
2025 btrfs_put_bioc(bioc);
2026 btrfs_bio_counter_dec(fs_info);
2027 goto out;
2028 }
2029 rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, scrub_dev, &extent_bitmap,
2030 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
2031 btrfs_put_bioc(bioc);
2032 if (!rbio) {
2033 ret = -ENOMEM;
2034 btrfs_bio_counter_dec(fs_info);
2035 goto out;
2036 }
94ead93e
QW
2037 /* Use the recovered stripes as cache to avoid read them from disk again. */
2038 for (int i = 0; i < data_stripes; i++) {
2039 stripe = &sctx->raid56_data_stripes[i];
2040
2041 raid56_parity_cache_data_pages(rbio, stripe->pages,
2042 full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT));
2043 }
1009254b
QW
2044 raid56_parity_submit_scrub_rbio(rbio);
2045 wait_for_completion_io(&io_done);
2046 ret = blk_status_to_errno(bio->bi_status);
2047 bio_put(bio);
2048 btrfs_bio_counter_dec(fs_info);
2049
1dc4888e 2050 btrfs_release_path(&extent_path);
3c771c19 2051 btrfs_release_path(&csum_path);
1009254b
QW
2052out:
2053 return ret;
2054}
2055
09022b14
QW
2056/*
2057 * Scrub one range which can only has simple mirror based profile.
2058 * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in
2059 * RAID0/RAID10).
2060 *
2061 * Since we may need to handle a subset of block group, we need @logical_start
2062 * and @logical_length parameter.
2063 */
2064static int scrub_simple_mirror(struct scrub_ctx *sctx,
09022b14 2065 struct btrfs_block_group *bg,
7dc66abb 2066 struct btrfs_chunk_map *map,
09022b14
QW
2067 u64 logical_start, u64 logical_length,
2068 struct btrfs_device *device,
2069 u64 physical, int mirror_num)
2070{
2071 struct btrfs_fs_info *fs_info = sctx->fs_info;
2072 const u64 logical_end = logical_start + logical_length;
09022b14
QW
2073 u64 cur_logical = logical_start;
2074 int ret;
2075
2076 /* The range must be inside the bg */
2077 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
2078
09022b14
QW
2079 /* Go through each extent items inside the logical range */
2080 while (cur_logical < logical_end) {
47e2b06b 2081 u64 found_logical = U64_MAX;
e02ee89b 2082 u64 cur_physical = physical + cur_logical - logical_start;
09022b14
QW
2083
2084 /* Canceled? */
2085 if (atomic_read(&fs_info->scrub_cancel_req) ||
2086 atomic_read(&sctx->cancel_req)) {
2087 ret = -ECANCELED;
2088 break;
2089 }
2090 /* Paused? */
2091 if (atomic_read(&fs_info->scrub_pause_req)) {
2092 /* Push queued extents */
09022b14
QW
2093 scrub_blocked_if_needed(fs_info);
2094 }
2095 /* Block group removed? */
2096 spin_lock(&bg->lock);
3349b57f 2097 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
09022b14
QW
2098 spin_unlock(&bg->lock);
2099 ret = 0;
2100 break;
2101 }
2102 spin_unlock(&bg->lock);
2103
e02ee89b
QW
2104 ret = queue_scrub_stripe(sctx, bg, device, mirror_num,
2105 cur_logical, logical_end - cur_logical,
ae76d8e3 2106 cur_physical, &found_logical);
09022b14
QW
2107 if (ret > 0) {
2108 /* No more extent, just update the accounting */
2109 sctx->stat.last_physical = physical + logical_length;
2110 ret = 0;
2111 break;
2112 }
2113 if (ret < 0)
2114 break;
09022b14 2115
47e2b06b
QW
2116 /* queue_scrub_stripe() returned 0, @found_logical must be updated. */
2117 ASSERT(found_logical != U64_MAX);
ae76d8e3 2118 cur_logical = found_logical + BTRFS_STRIPE_LEN;
e02ee89b 2119
09022b14
QW
2120 /* Don't hold CPU for too long time */
2121 cond_resched();
2122 }
09022b14
QW
2123 return ret;
2124}
2125
8557635e 2126/* Calculate the full stripe length for simple stripe based profiles */
7dc66abb 2127static u64 simple_stripe_full_stripe_len(const struct btrfs_chunk_map *map)
8557635e
QW
2128{
2129 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2130 BTRFS_BLOCK_GROUP_RAID10));
2131
cb091225 2132 return btrfs_stripe_nr_to_offset(map->num_stripes / map->sub_stripes);
8557635e
QW
2133}
2134
2135/* Get the logical bytenr for the stripe */
7dc66abb 2136static u64 simple_stripe_get_logical(struct btrfs_chunk_map *map,
8557635e
QW
2137 struct btrfs_block_group *bg,
2138 int stripe_index)
2139{
2140 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2141 BTRFS_BLOCK_GROUP_RAID10));
2142 ASSERT(stripe_index < map->num_stripes);
2143
2144 /*
2145 * (stripe_index / sub_stripes) gives how many data stripes we need to
2146 * skip.
2147 */
cb091225 2148 return btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes) +
a97699d1 2149 bg->start;
8557635e
QW
2150}
2151
2152/* Get the mirror number for the stripe */
7dc66abb 2153static int simple_stripe_mirror_num(struct btrfs_chunk_map *map, int stripe_index)
8557635e
QW
2154{
2155 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2156 BTRFS_BLOCK_GROUP_RAID10));
2157 ASSERT(stripe_index < map->num_stripes);
2158
2159 /* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */
2160 return stripe_index % map->sub_stripes + 1;
2161}
2162
2163static int scrub_simple_stripe(struct scrub_ctx *sctx,
8557635e 2164 struct btrfs_block_group *bg,
7dc66abb 2165 struct btrfs_chunk_map *map,
8557635e
QW
2166 struct btrfs_device *device,
2167 int stripe_index)
2168{
2169 const u64 logical_increment = simple_stripe_full_stripe_len(map);
2170 const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index);
2171 const u64 orig_physical = map->stripes[stripe_index].physical;
2172 const int mirror_num = simple_stripe_mirror_num(map, stripe_index);
2173 u64 cur_logical = orig_logical;
2174 u64 cur_physical = orig_physical;
2175 int ret = 0;
2176
2177 while (cur_logical < bg->start + bg->length) {
2178 /*
2179 * Inside each stripe, RAID0 is just SINGLE, and RAID10 is
2180 * just RAID1, so we can reuse scrub_simple_mirror() to scrub
2181 * this stripe.
2182 */
6b4d375a
QW
2183 ret = scrub_simple_mirror(sctx, bg, map, cur_logical,
2184 BTRFS_STRIPE_LEN, device, cur_physical,
2185 mirror_num);
8557635e
QW
2186 if (ret)
2187 return ret;
2188 /* Skip to next stripe which belongs to the target device */
2189 cur_logical += logical_increment;
2190 /* For physical offset, we just go to next stripe */
a97699d1 2191 cur_physical += BTRFS_STRIPE_LEN;
8557635e
QW
2192 }
2193 return ret;
2194}
2195
d9d181c1 2196static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2ae8ae3d 2197 struct btrfs_block_group *bg,
7dc66abb 2198 struct btrfs_chunk_map *map,
a36cf8b8 2199 struct btrfs_device *scrub_dev,
bc88b486 2200 int stripe_index)
a2de733c 2201{
fb456252 2202 struct btrfs_fs_info *fs_info = sctx->fs_info;
09022b14 2203 const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
2ae8ae3d 2204 const u64 chunk_logical = bg->start;
a2de733c 2205 int ret;
8eb3dd17 2206 int ret2;
1194a824 2207 u64 physical = map->stripes[stripe_index].physical;
7dc66abb 2208 const u64 dev_stripe_len = btrfs_calc_stripe_length(map);
bc88b486 2209 const u64 physical_end = physical + dev_stripe_len;
a2de733c 2210 u64 logical;
625f1c8d 2211 u64 logic_end;
18d30ab9 2212 /* The logical increment after finishing one stripe */
5c07c53f 2213 u64 increment;
18d30ab9 2214 /* Offset inside the chunk */
a2de733c 2215 u64 offset;
5a6ac9ea 2216 u64 stripe_logical;
3b080b25 2217 int stop_loop = 0;
53b381b3 2218
1dc4888e
QW
2219 /* Extent_path should be released by now. */
2220 ASSERT(sctx->extent_path.nodes[0] == NULL);
2221
cb7ab021 2222 scrub_blocked_if_needed(fs_info);
7a26285e 2223
de17addc
NA
2224 if (sctx->is_dev_replace &&
2225 btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
2226 mutex_lock(&sctx->wr_lock);
2227 sctx->write_pointer = physical;
2228 mutex_unlock(&sctx->wr_lock);
de17addc
NA
2229 }
2230
1009254b
QW
2231 /* Prepare the extra data stripes used by RAID56. */
2232 if (profile & BTRFS_BLOCK_GROUP_RAID56_MASK) {
2233 ASSERT(sctx->raid56_data_stripes == NULL);
2234
2235 sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map),
2236 sizeof(struct scrub_stripe),
2237 GFP_KERNEL);
2238 if (!sctx->raid56_data_stripes) {
2239 ret = -ENOMEM;
2240 goto out;
2241 }
2242 for (int i = 0; i < nr_data_stripes(map); i++) {
2243 ret = init_scrub_stripe(fs_info,
2244 &sctx->raid56_data_stripes[i]);
2245 if (ret < 0)
2246 goto out;
2247 sctx->raid56_data_stripes[i].bg = bg;
2248 sctx->raid56_data_stripes[i].sctx = sctx;
2249 }
2250 }
09022b14
QW
2251 /*
2252 * There used to be a big double loop to handle all profiles using the
2253 * same routine, which grows larger and more gross over time.
2254 *
2255 * So here we handle each profile differently, so simpler profiles
2256 * have simpler scrubbing function.
2257 */
2258 if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 |
2259 BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2260 /*
2261 * Above check rules out all complex profile, the remaining
2262 * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple
2263 * mirrored duplication without stripe.
2264 *
2265 * Only @physical and @mirror_num needs to calculated using
2266 * @stripe_index.
2267 */
6b4d375a
QW
2268 ret = scrub_simple_mirror(sctx, bg, map, bg->start, bg->length,
2269 scrub_dev, map->stripes[stripe_index].physical,
09022b14 2270 stripe_index + 1);
e430c428 2271 offset = 0;
09022b14
QW
2272 goto out;
2273 }
8557635e 2274 if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
6b4d375a 2275 ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index);
cb091225 2276 offset = btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes);
8557635e
QW
2277 goto out;
2278 }
2279
2280 /* Only RAID56 goes through the old code */
2281 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
a2de733c 2282 ret = 0;
e430c428
QW
2283
2284 /* Calculate the logical end of the stripe */
2285 get_raid56_logic_offset(physical_end, stripe_index,
2286 map, &logic_end, NULL);
2287 logic_end += chunk_logical;
2288
2289 /* Initialize @offset in case we need to go to out: label */
2290 get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
cb091225 2291 increment = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
e430c428 2292
18d30ab9
QW
2293 /*
2294 * Due to the rotation, for RAID56 it's better to iterate each stripe
2295 * using their physical offset.
2296 */
3b080b25 2297 while (physical < physical_end) {
18d30ab9
QW
2298 ret = get_raid56_logic_offset(physical, stripe_index, map,
2299 &logical, &stripe_logical);
e430c428
QW
2300 logical += chunk_logical;
2301 if (ret) {
2302 /* it is parity strip */
2303 stripe_logical += chunk_logical;
1009254b
QW
2304 ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg,
2305 map, stripe_logical);
e430c428
QW
2306 if (ret)
2307 goto out;
18d30ab9 2308 goto next;
f2f66a2f
ZL
2309 }
2310
18d30ab9
QW
2311 /*
2312 * Now we're at a data stripe, scrub each extents in the range.
2313 *
2314 * At this stage, if we ignore the repair part, inside each data
2315 * stripe it is no different than SINGLE profile.
2316 * We can reuse scrub_simple_mirror() here, as the repair part
2317 * is still based on @mirror_num.
2318 */
6b4d375a 2319 ret = scrub_simple_mirror(sctx, bg, map, logical, BTRFS_STRIPE_LEN,
18d30ab9 2320 scrub_dev, physical, 1);
a2de733c
AJ
2321 if (ret < 0)
2322 goto out;
a2de733c 2323next:
a2de733c 2324 logical += increment;
a97699d1 2325 physical += BTRFS_STRIPE_LEN;
d9d181c1 2326 spin_lock(&sctx->stat_lock);
625f1c8d 2327 if (stop_loop)
bc88b486
QW
2328 sctx->stat.last_physical =
2329 map->stripes[stripe_index].physical + dev_stripe_len;
625f1c8d
LB
2330 else
2331 sctx->stat.last_physical = physical;
d9d181c1 2332 spin_unlock(&sctx->stat_lock);
625f1c8d
LB
2333 if (stop_loop)
2334 break;
a2de733c 2335 }
ff023aac 2336out:
8eb3dd17 2337 ret2 = flush_scrub_stripes(sctx);
b50f2d04 2338 if (!ret)
8eb3dd17 2339 ret = ret2;
1dc4888e 2340 btrfs_release_path(&sctx->extent_path);
3c771c19 2341 btrfs_release_path(&sctx->csum_path);
1dc4888e 2342
1009254b
QW
2343 if (sctx->raid56_data_stripes) {
2344 for (int i = 0; i < nr_data_stripes(map); i++)
2345 release_scrub_stripe(&sctx->raid56_data_stripes[i]);
2346 kfree(sctx->raid56_data_stripes);
2347 sctx->raid56_data_stripes = NULL;
2348 }
7db1c5d1
NA
2349
2350 if (sctx->is_dev_replace && ret >= 0) {
2351 int ret2;
2352
2ae8ae3d
QW
2353 ret2 = sync_write_pointer_for_zoned(sctx,
2354 chunk_logical + offset,
2355 map->stripes[stripe_index].physical,
2356 physical_end);
7db1c5d1
NA
2357 if (ret2)
2358 ret = ret2;
2359 }
2360
a2de733c
AJ
2361 return ret < 0 ? ret : 0;
2362}
2363
d9d181c1 2364static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
d04fbe19 2365 struct btrfs_block_group *bg,
a36cf8b8 2366 struct btrfs_device *scrub_dev,
020d5b73 2367 u64 dev_offset,
d04fbe19 2368 u64 dev_extent_len)
a2de733c 2369{
fb456252 2370 struct btrfs_fs_info *fs_info = sctx->fs_info;
7dc66abb 2371 struct btrfs_chunk_map *map;
a2de733c 2372 int i;
ff023aac 2373 int ret = 0;
a2de733c 2374
7dc66abb
FM
2375 map = btrfs_find_chunk_map(fs_info, bg->start, bg->length);
2376 if (!map) {
020d5b73
FM
2377 /*
2378 * Might have been an unused block group deleted by the cleaner
2379 * kthread or relocation.
2380 */
d04fbe19 2381 spin_lock(&bg->lock);
3349b57f 2382 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags))
020d5b73 2383 ret = -EINVAL;
d04fbe19 2384 spin_unlock(&bg->lock);
020d5b73
FM
2385
2386 return ret;
2387 }
7dc66abb 2388 if (map->start != bg->start)
a2de733c 2389 goto out;
7dc66abb 2390 if (map->chunk_len < dev_extent_len)
a2de733c
AJ
2391 goto out;
2392
2393 for (i = 0; i < map->num_stripes; ++i) {
a36cf8b8 2394 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
859acaf1 2395 map->stripes[i].physical == dev_offset) {
7dc66abb 2396 ret = scrub_stripe(sctx, bg, map, scrub_dev, i);
a2de733c
AJ
2397 if (ret)
2398 goto out;
2399 }
2400 }
2401out:
7dc66abb 2402 btrfs_free_chunk_map(map);
a2de733c
AJ
2403
2404 return ret;
2405}
2406
de17addc
NA
2407static int finish_extent_writes_for_zoned(struct btrfs_root *root,
2408 struct btrfs_block_group *cache)
2409{
2410 struct btrfs_fs_info *fs_info = cache->fs_info;
2411 struct btrfs_trans_handle *trans;
2412
2413 if (!btrfs_is_zoned(fs_info))
2414 return 0;
2415
2416 btrfs_wait_block_group_reservations(cache);
2417 btrfs_wait_nocow_writers(cache);
2418 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
2419
2420 trans = btrfs_join_transaction(root);
2421 if (IS_ERR(trans))
2422 return PTR_ERR(trans);
2423 return btrfs_commit_transaction(trans);
2424}
2425
a2de733c 2426static noinline_for_stack
a36cf8b8 2427int scrub_enumerate_chunks(struct scrub_ctx *sctx,
32934280 2428 struct btrfs_device *scrub_dev, u64 start, u64 end)
a2de733c
AJ
2429{
2430 struct btrfs_dev_extent *dev_extent = NULL;
2431 struct btrfs_path *path;
0b246afa
JM
2432 struct btrfs_fs_info *fs_info = sctx->fs_info;
2433 struct btrfs_root *root = fs_info->dev_root;
a2de733c 2434 u64 chunk_offset;
55e3a601 2435 int ret = 0;
76a8efa1 2436 int ro_set;
a2de733c
AJ
2437 int slot;
2438 struct extent_buffer *l;
2439 struct btrfs_key key;
2440 struct btrfs_key found_key;
32da5386 2441 struct btrfs_block_group *cache;
ff023aac 2442 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
a2de733c
AJ
2443
2444 path = btrfs_alloc_path();
2445 if (!path)
2446 return -ENOMEM;
2447
e4058b54 2448 path->reada = READA_FORWARD;
a2de733c
AJ
2449 path->search_commit_root = 1;
2450 path->skip_locking = 1;
2451
a36cf8b8 2452 key.objectid = scrub_dev->devid;
a2de733c
AJ
2453 key.offset = 0ull;
2454 key.type = BTRFS_DEV_EXTENT_KEY;
2455
a2de733c 2456 while (1) {
d04fbe19
QW
2457 u64 dev_extent_len;
2458
a2de733c
AJ
2459 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2460 if (ret < 0)
8c51032f
AJ
2461 break;
2462 if (ret > 0) {
2463 if (path->slots[0] >=
2464 btrfs_header_nritems(path->nodes[0])) {
2465 ret = btrfs_next_leaf(root, path);
55e3a601
Z
2466 if (ret < 0)
2467 break;
2468 if (ret > 0) {
2469 ret = 0;
8c51032f 2470 break;
55e3a601
Z
2471 }
2472 } else {
2473 ret = 0;
8c51032f
AJ
2474 }
2475 }
a2de733c
AJ
2476
2477 l = path->nodes[0];
2478 slot = path->slots[0];
2479
2480 btrfs_item_key_to_cpu(l, &found_key, slot);
2481
a36cf8b8 2482 if (found_key.objectid != scrub_dev->devid)
a2de733c
AJ
2483 break;
2484
962a298f 2485 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
a2de733c
AJ
2486 break;
2487
2488 if (found_key.offset >= end)
2489 break;
2490
2491 if (found_key.offset < key.offset)
2492 break;
2493
2494 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
d04fbe19 2495 dev_extent_len = btrfs_dev_extent_length(l, dev_extent);
a2de733c 2496
d04fbe19 2497 if (found_key.offset + dev_extent_len <= start)
ced96edc 2498 goto skip;
a2de733c 2499
a2de733c
AJ
2500 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2501
2502 /*
2503 * get a reference on the corresponding block group to prevent
2504 * the chunk from going away while we scrub it
2505 */
2506 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
ced96edc
QW
2507
2508 /* some chunks are removed but not committed to disk yet,
2509 * continue scrubbing */
2510 if (!cache)
2511 goto skip;
2512
a692e13d
FM
2513 ASSERT(cache->start <= chunk_offset);
2514 /*
2515 * We are using the commit root to search for device extents, so
2516 * that means we could have found a device extent item from a
2517 * block group that was deleted in the current transaction. The
2518 * logical start offset of the deleted block group, stored at
2519 * @chunk_offset, might be part of the logical address range of
2520 * a new block group (which uses different physical extents).
2521 * In this case btrfs_lookup_block_group() has returned the new
2522 * block group, and its start address is less than @chunk_offset.
2523 *
2524 * We skip such new block groups, because it's pointless to
2525 * process them, as we won't find their extents because we search
2526 * for them using the commit root of the extent tree. For a device
2527 * replace it's also fine to skip it, we won't miss copying them
2528 * to the target device because we have the write duplication
2529 * setup through the regular write path (by btrfs_map_block()),
2530 * and we have committed a transaction when we started the device
2531 * replace, right after setting up the device replace state.
2532 */
2533 if (cache->start < chunk_offset) {
2534 btrfs_put_block_group(cache);
2535 goto skip;
2536 }
2537
78ce9fc2 2538 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
3349b57f 2539 if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) {
0dc16ef4
FM
2540 btrfs_put_block_group(cache);
2541 goto skip;
78ce9fc2 2542 }
78ce9fc2
NA
2543 }
2544
2473d24f
FM
2545 /*
2546 * Make sure that while we are scrubbing the corresponding block
2547 * group doesn't get its logical address and its device extents
2548 * reused for another block group, which can possibly be of a
2549 * different type and different profile. We do this to prevent
2550 * false error detections and crashes due to bogus attempts to
2551 * repair extents.
2552 */
2553 spin_lock(&cache->lock);
3349b57f 2554 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
2473d24f
FM
2555 spin_unlock(&cache->lock);
2556 btrfs_put_block_group(cache);
2557 goto skip;
2558 }
6b7304af 2559 btrfs_freeze_block_group(cache);
2473d24f
FM
2560 spin_unlock(&cache->lock);
2561
55e3a601
Z
2562 /*
2563 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
2564 * to avoid deadlock caused by:
2565 * btrfs_inc_block_group_ro()
2566 * -> btrfs_wait_for_commit()
2567 * -> btrfs_commit_transaction()
2568 * -> btrfs_scrub_pause()
2569 */
2570 scrub_pause_on(fs_info);
b12de528
QW
2571
2572 /*
2573 * Don't do chunk preallocation for scrub.
2574 *
2575 * This is especially important for SYSTEM bgs, or we can hit
2576 * -EFBIG from btrfs_finish_chunk_alloc() like:
2577 * 1. The only SYSTEM bg is marked RO.
2578 * Since SYSTEM bg is small, that's pretty common.
2579 * 2. New SYSTEM bg will be allocated
2580 * Due to regular version will allocate new chunk.
2581 * 3. New SYSTEM bg is empty and will get cleaned up
2582 * Before cleanup really happens, it's marked RO again.
2583 * 4. Empty SYSTEM bg get scrubbed
2584 * We go back to 2.
2585 *
2586 * This can easily boost the amount of SYSTEM chunks if cleaner
2587 * thread can't be triggered fast enough, and use up all space
2588 * of btrfs_super_block::sys_chunk_array
1bbb97b8
QW
2589 *
2590 * While for dev replace, we need to try our best to mark block
2591 * group RO, to prevent race between:
2592 * - Write duplication
2593 * Contains latest data
2594 * - Scrub copy
2595 * Contains data from commit tree
2596 *
2597 * If target block group is not marked RO, nocow writes can
2598 * be overwritten by scrub copy, causing data corruption.
2599 * So for dev-replace, it's not allowed to continue if a block
2600 * group is not RO.
b12de528 2601 */
1bbb97b8 2602 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
de17addc
NA
2603 if (!ret && sctx->is_dev_replace) {
2604 ret = finish_extent_writes_for_zoned(root, cache);
2605 if (ret) {
2606 btrfs_dec_block_group_ro(cache);
2607 scrub_pause_off(fs_info);
2608 btrfs_put_block_group(cache);
2609 break;
2610 }
2611 }
2612
76a8efa1
Z
2613 if (ret == 0) {
2614 ro_set = 1;
7561551e
QW
2615 } else if (ret == -ENOSPC && !sctx->is_dev_replace &&
2616 !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) {
76a8efa1
Z
2617 /*
2618 * btrfs_inc_block_group_ro return -ENOSPC when it
2619 * failed in creating new chunk for metadata.
1bbb97b8 2620 * It is not a problem for scrub, because
76a8efa1
Z
2621 * metadata are always cowed, and our scrub paused
2622 * commit_transactions.
7561551e
QW
2623 *
2624 * For RAID56 chunks, we have to mark them read-only
2625 * for scrub, as later we would use our own cache
2626 * out of RAID56 realm.
2627 * Thus we want the RAID56 bg to be marked RO to
2628 * prevent RMW from screwing up out cache.
76a8efa1
Z
2629 */
2630 ro_set = 0;
195a49ea
FM
2631 } else if (ret == -ETXTBSY) {
2632 btrfs_warn(fs_info,
2633 "skipping scrub of block group %llu due to active swapfile",
2634 cache->start);
2635 scrub_pause_off(fs_info);
2636 ret = 0;
2637 goto skip_unfreeze;
76a8efa1 2638 } else {
5d163e0e 2639 btrfs_warn(fs_info,
913e1535 2640 "failed setting block group ro: %d", ret);
6b7304af 2641 btrfs_unfreeze_block_group(cache);
55e3a601 2642 btrfs_put_block_group(cache);
1bbb97b8 2643 scrub_pause_off(fs_info);
55e3a601
Z
2644 break;
2645 }
2646
1bbb97b8
QW
2647 /*
2648 * Now the target block is marked RO, wait for nocow writes to
2649 * finish before dev-replace.
2650 * COW is fine, as COW never overwrites extents in commit tree.
2651 */
2652 if (sctx->is_dev_replace) {
2653 btrfs_wait_nocow_writers(cache);
2654 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
2655 cache->length);
2656 }
2657
2658 scrub_pause_off(fs_info);
3ec17a67 2659 down_write(&dev_replace->rwsem);
d04fbe19 2660 dev_replace->cursor_right = found_key.offset + dev_extent_len;
ff023aac
SB
2661 dev_replace->cursor_left = found_key.offset;
2662 dev_replace->item_needs_writeback = 1;
cb5583dd
DS
2663 up_write(&dev_replace->rwsem);
2664
d04fbe19
QW
2665 ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
2666 dev_extent_len);
78ce9fc2
NA
2667 if (sctx->is_dev_replace &&
2668 !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
2669 cache, found_key.offset))
2670 ro_set = 0;
2671
3ec17a67 2672 down_write(&dev_replace->rwsem);
1a1a8b73
FM
2673 dev_replace->cursor_left = dev_replace->cursor_right;
2674 dev_replace->item_needs_writeback = 1;
3ec17a67 2675 up_write(&dev_replace->rwsem);
1a1a8b73 2676
76a8efa1 2677 if (ro_set)
2ff7e61e 2678 btrfs_dec_block_group_ro(cache);
ff023aac 2679
758f2dfc
FM
2680 /*
2681 * We might have prevented the cleaner kthread from deleting
2682 * this block group if it was already unused because we raced
2683 * and set it to RO mode first. So add it back to the unused
2684 * list, otherwise it might not ever be deleted unless a manual
2685 * balance is triggered or it becomes used and unused again.
2686 */
2687 spin_lock(&cache->lock);
3349b57f
JB
2688 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) &&
2689 !cache->ro && cache->reserved == 0 && cache->used == 0) {
758f2dfc 2690 spin_unlock(&cache->lock);
6e80d4f8
DZ
2691 if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
2692 btrfs_discard_queue_work(&fs_info->discard_ctl,
2693 cache);
2694 else
2695 btrfs_mark_bg_unused(cache);
758f2dfc
FM
2696 } else {
2697 spin_unlock(&cache->lock);
2698 }
195a49ea 2699skip_unfreeze:
6b7304af 2700 btrfs_unfreeze_block_group(cache);
a2de733c
AJ
2701 btrfs_put_block_group(cache);
2702 if (ret)
2703 break;
32934280 2704 if (sctx->is_dev_replace &&
af1be4f8 2705 atomic64_read(&dev_replace->num_write_errors) > 0) {
ff023aac
SB
2706 ret = -EIO;
2707 break;
2708 }
2709 if (sctx->stat.malloc_errors > 0) {
2710 ret = -ENOMEM;
2711 break;
2712 }
ced96edc 2713skip:
d04fbe19 2714 key.offset = found_key.offset + dev_extent_len;
71267333 2715 btrfs_release_path(path);
a2de733c
AJ
2716 }
2717
a2de733c 2718 btrfs_free_path(path);
8c51032f 2719
55e3a601 2720 return ret;
a2de733c
AJ
2721}
2722
2a2dc22f
QW
2723static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev,
2724 struct page *page, u64 physical, u64 generation)
2725{
2726 struct btrfs_fs_info *fs_info = sctx->fs_info;
2727 struct bio_vec bvec;
2728 struct bio bio;
2729 struct btrfs_super_block *sb = page_address(page);
2730 int ret;
2731
2732 bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_READ);
2733 bio.bi_iter.bi_sector = physical >> SECTOR_SHIFT;
2734 __bio_add_page(&bio, page, BTRFS_SUPER_INFO_SIZE, 0);
2735 ret = submit_bio_wait(&bio);
2736 bio_uninit(&bio);
2737
2738 if (ret < 0)
2739 return ret;
2740 ret = btrfs_check_super_csum(fs_info, sb);
2741 if (ret != 0) {
2742 btrfs_err_rl(fs_info,
2743 "super block at physical %llu devid %llu has bad csum",
2744 physical, dev->devid);
2745 return -EIO;
2746 }
2747 if (btrfs_super_generation(sb) != generation) {
2748 btrfs_err_rl(fs_info,
2749"super block at physical %llu devid %llu has bad generation %llu expect %llu",
2750 physical, dev->devid,
2751 btrfs_super_generation(sb), generation);
2752 return -EUCLEAN;
2753 }
2754
2755 return btrfs_validate_super(fs_info, sb, -1);
2756}
2757
a36cf8b8
SB
2758static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2759 struct btrfs_device *scrub_dev)
a2de733c
AJ
2760{
2761 int i;
2762 u64 bytenr;
2763 u64 gen;
2a2dc22f
QW
2764 int ret = 0;
2765 struct page *page;
0b246afa 2766 struct btrfs_fs_info *fs_info = sctx->fs_info;
a2de733c 2767
84961539 2768 if (BTRFS_FS_ERROR(fs_info))
fbabd4a3 2769 return -EROFS;
79787eaa 2770
2a2dc22f
QW
2771 page = alloc_page(GFP_KERNEL);
2772 if (!page) {
2773 spin_lock(&sctx->stat_lock);
2774 sctx->stat.malloc_errors++;
2775 spin_unlock(&sctx->stat_lock);
2776 return -ENOMEM;
2777 }
2778
5f546063 2779 /* Seed devices of a new filesystem has their own generation. */
0b246afa 2780 if (scrub_dev->fs_devices != fs_info->fs_devices)
5f546063
MX
2781 gen = scrub_dev->generation;
2782 else
0124855f 2783 gen = btrfs_get_last_trans_committed(fs_info);
a2de733c
AJ
2784
2785 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2786 bytenr = btrfs_sb_offset(i);
935e5cc9
MX
2787 if (bytenr + BTRFS_SUPER_INFO_SIZE >
2788 scrub_dev->commit_total_bytes)
a2de733c 2789 break;
12659251
NA
2790 if (!btrfs_check_super_location(scrub_dev, bytenr))
2791 continue;
a2de733c 2792
2a2dc22f
QW
2793 ret = scrub_one_super(sctx, scrub_dev, page, bytenr, gen);
2794 if (ret) {
2795 spin_lock(&sctx->stat_lock);
2796 sctx->stat.super_errors++;
2797 spin_unlock(&sctx->stat_lock);
2798 }
a2de733c 2799 }
2a2dc22f 2800 __free_page(page);
a2de733c
AJ
2801 return 0;
2802}
2803
e89c4a9c
JB
2804static void scrub_workers_put(struct btrfs_fs_info *fs_info)
2805{
2806 if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
2807 &fs_info->scrub_lock)) {
be539518 2808 struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
e89c4a9c
JB
2809
2810 fs_info->scrub_workers = NULL;
e89c4a9c
JB
2811 mutex_unlock(&fs_info->scrub_lock);
2812
be539518
CH
2813 if (scrub_workers)
2814 destroy_workqueue(scrub_workers);
e89c4a9c
JB
2815 }
2816}
2817
a2de733c
AJ
2818/*
2819 * get a reference count on fs_info->scrub_workers. start worker if necessary
2820 */
39dc7bd9 2821static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info)
a2de733c 2822{
be539518 2823 struct workqueue_struct *scrub_workers = NULL;
6f011058 2824 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
0339ef2f 2825 int max_active = fs_info->thread_pool_size;
e89c4a9c 2826 int ret = -ENOMEM;
a2de733c 2827
e89c4a9c
JB
2828 if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
2829 return 0;
eb4318e5 2830
39dc7bd9 2831 scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active);
e89c4a9c 2832 if (!scrub_workers)
81db6ae8 2833 return -ENOMEM;
ff09c4ca 2834
e89c4a9c
JB
2835 mutex_lock(&fs_info->scrub_lock);
2836 if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
81db6ae8 2837 ASSERT(fs_info->scrub_workers == NULL);
e89c4a9c 2838 fs_info->scrub_workers = scrub_workers;
ff09c4ca 2839 refcount_set(&fs_info->scrub_workers_refcnt, 1);
e89c4a9c
JB
2840 mutex_unlock(&fs_info->scrub_lock);
2841 return 0;
632dd772 2842 }
e89c4a9c
JB
2843 /* Other thread raced in and created the workers for us */
2844 refcount_inc(&fs_info->scrub_workers_refcnt);
2845 mutex_unlock(&fs_info->scrub_lock);
e82afc52 2846
e89c4a9c 2847 ret = 0;
5dc96f8d 2848
be539518 2849 destroy_workqueue(scrub_workers);
e89c4a9c 2850 return ret;
a2de733c
AJ
2851}
2852
aa1b8cd4
SB
2853int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
2854 u64 end, struct btrfs_scrub_progress *progress,
63a212ab 2855 int readonly, int is_dev_replace)
a2de733c 2856{
562d7b15 2857 struct btrfs_dev_lookup_args args = { .devid = devid };
d9d181c1 2858 struct scrub_ctx *sctx;
a2de733c
AJ
2859 int ret;
2860 struct btrfs_device *dev;
a5fb1142 2861 unsigned int nofs_flag;
f9eab5f0 2862 bool need_commit = false;
a2de733c 2863
aa1b8cd4 2864 if (btrfs_fs_closing(fs_info))
6c3abeda 2865 return -EAGAIN;
a2de733c 2866
fc65bb53
QW
2867 /* At mount time we have ensured nodesize is in the range of [4K, 64K]. */
2868 ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN);
b5d67f64 2869
fc65bb53
QW
2870 /*
2871 * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible
2872 * value (max nodesize / min sectorsize), thus nodesize should always
2873 * be fine.
2874 */
2875 ASSERT(fs_info->nodesize <=
2876 SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits);
7a9e9987 2877
0e94c4f4
DS
2878 /* Allocate outside of device_list_mutex */
2879 sctx = scrub_setup_ctx(fs_info, is_dev_replace);
2880 if (IS_ERR(sctx))
2881 return PTR_ERR(sctx);
a2de733c 2882
39dc7bd9 2883 ret = scrub_workers_get(fs_info);
e89c4a9c
JB
2884 if (ret)
2885 goto out_free_ctx;
2886
aa1b8cd4 2887 mutex_lock(&fs_info->fs_devices->device_list_mutex);
562d7b15 2888 dev = btrfs_find_device(fs_info->fs_devices, &args);
e6e674bd
AJ
2889 if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
2890 !is_dev_replace)) {
aa1b8cd4 2891 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
0e94c4f4 2892 ret = -ENODEV;
e89c4a9c 2893 goto out;
a2de733c 2894 }
a2de733c 2895
ebbede42
AJ
2896 if (!is_dev_replace && !readonly &&
2897 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
5d68da3b 2898 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
a4852cf2
DS
2899 btrfs_err_in_rcu(fs_info,
2900 "scrub on devid %llu: filesystem on %s is not writable",
cb3e217b 2901 devid, btrfs_dev_name(dev));
0e94c4f4 2902 ret = -EROFS;
e89c4a9c 2903 goto out;
5d68da3b
MX
2904 }
2905
3b7a016f 2906 mutex_lock(&fs_info->scrub_lock);
e12c9621 2907 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
401e29c1 2908 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
a2de733c 2909 mutex_unlock(&fs_info->scrub_lock);
aa1b8cd4 2910 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
0e94c4f4 2911 ret = -EIO;
e89c4a9c 2912 goto out;
a2de733c
AJ
2913 }
2914
cb5583dd 2915 down_read(&fs_info->dev_replace.rwsem);
cadbc0a0 2916 if (dev->scrub_ctx ||
8dabb742
SB
2917 (!is_dev_replace &&
2918 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
cb5583dd 2919 up_read(&fs_info->dev_replace.rwsem);
a2de733c 2920 mutex_unlock(&fs_info->scrub_lock);
aa1b8cd4 2921 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
0e94c4f4 2922 ret = -EINPROGRESS;
e89c4a9c 2923 goto out;
a2de733c 2924 }
cb5583dd 2925 up_read(&fs_info->dev_replace.rwsem);
3b7a016f 2926
d9d181c1 2927 sctx->readonly = readonly;
cadbc0a0 2928 dev->scrub_ctx = sctx;
3cb0929a 2929 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
a2de733c 2930
3cb0929a
WS
2931 /*
2932 * checking @scrub_pause_req here, we can avoid
2933 * race between committing transaction and scrubbing.
2934 */
cb7ab021 2935 __scrub_blocked_if_needed(fs_info);
a2de733c
AJ
2936 atomic_inc(&fs_info->scrubs_running);
2937 mutex_unlock(&fs_info->scrub_lock);
a2de733c 2938
a5fb1142
FM
2939 /*
2940 * In order to avoid deadlock with reclaim when there is a transaction
2941 * trying to pause scrub, make sure we use GFP_NOFS for all the
46343501 2942 * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity()
a5fb1142
FM
2943 * invoked by our callees. The pausing request is done when the
2944 * transaction commit starts, and it blocks the transaction until scrub
2945 * is paused (done at specific points at scrub_stripe() or right above
2946 * before incrementing fs_info->scrubs_running).
2947 */
2948 nofs_flag = memalloc_nofs_save();
ff023aac 2949 if (!is_dev_replace) {
f9eab5f0
QW
2950 u64 old_super_errors;
2951
2952 spin_lock(&sctx->stat_lock);
2953 old_super_errors = sctx->stat.super_errors;
2954 spin_unlock(&sctx->stat_lock);
2955
d1e14420 2956 btrfs_info(fs_info, "scrub: started on devid %llu", devid);
9b011adf
WS
2957 /*
2958 * by holding device list mutex, we can
2959 * kick off writing super in log tree sync.
2960 */
3cb0929a 2961 mutex_lock(&fs_info->fs_devices->device_list_mutex);
ff023aac 2962 ret = scrub_supers(sctx, dev);
3cb0929a 2963 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
f9eab5f0
QW
2964
2965 spin_lock(&sctx->stat_lock);
2966 /*
2967 * Super block errors found, but we can not commit transaction
2968 * at current context, since btrfs_commit_transaction() needs
2969 * to pause the current running scrub (hold by ourselves).
2970 */
2971 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
2972 need_commit = true;
2973 spin_unlock(&sctx->stat_lock);
ff023aac 2974 }
a2de733c
AJ
2975
2976 if (!ret)
32934280 2977 ret = scrub_enumerate_chunks(sctx, dev, start, end);
a5fb1142 2978 memalloc_nofs_restore(nofs_flag);
a2de733c 2979
a2de733c
AJ
2980 atomic_dec(&fs_info->scrubs_running);
2981 wake_up(&fs_info->scrub_pause_wait);
2982
2983 if (progress)
d9d181c1 2984 memcpy(progress, &sctx->stat, sizeof(*progress));
a2de733c 2985
d1e14420
AJ
2986 if (!is_dev_replace)
2987 btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
2988 ret ? "not finished" : "finished", devid, ret);
2989
a2de733c 2990 mutex_lock(&fs_info->scrub_lock);
cadbc0a0 2991 dev->scrub_ctx = NULL;
a2de733c
AJ
2992 mutex_unlock(&fs_info->scrub_lock);
2993
e89c4a9c 2994 scrub_workers_put(fs_info);
f55985f4 2995 scrub_put_ctx(sctx);
a2de733c 2996
f9eab5f0
QW
2997 /*
2998 * We found some super block errors before, now try to force a
2999 * transaction commit, as scrub has finished.
3000 */
3001 if (need_commit) {
3002 struct btrfs_trans_handle *trans;
3003
3004 trans = btrfs_start_transaction(fs_info->tree_root, 0);
3005 if (IS_ERR(trans)) {
3006 ret = PTR_ERR(trans);
3007 btrfs_err(fs_info,
3008 "scrub: failed to start transaction to fix super block errors: %d", ret);
3009 return ret;
3010 }
3011 ret = btrfs_commit_transaction(trans);
3012 if (ret < 0)
3013 btrfs_err(fs_info,
3014 "scrub: failed to commit transaction to fix super block errors: %d", ret);
3015 }
0e94c4f4 3016 return ret;
e89c4a9c
JB
3017out:
3018 scrub_workers_put(fs_info);
0e94c4f4
DS
3019out_free_ctx:
3020 scrub_free_ctx(sctx);
3021
a2de733c
AJ
3022 return ret;
3023}
3024
2ff7e61e 3025void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
a2de733c 3026{
a2de733c
AJ
3027 mutex_lock(&fs_info->scrub_lock);
3028 atomic_inc(&fs_info->scrub_pause_req);
3029 while (atomic_read(&fs_info->scrubs_paused) !=
3030 atomic_read(&fs_info->scrubs_running)) {
3031 mutex_unlock(&fs_info->scrub_lock);
3032 wait_event(fs_info->scrub_pause_wait,
3033 atomic_read(&fs_info->scrubs_paused) ==
3034 atomic_read(&fs_info->scrubs_running));
3035 mutex_lock(&fs_info->scrub_lock);
3036 }
3037 mutex_unlock(&fs_info->scrub_lock);
a2de733c
AJ
3038}
3039
2ff7e61e 3040void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
a2de733c 3041{
a2de733c
AJ
3042 atomic_dec(&fs_info->scrub_pause_req);
3043 wake_up(&fs_info->scrub_pause_wait);
a2de733c
AJ
3044}
3045
aa1b8cd4 3046int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
a2de733c 3047{
a2de733c
AJ
3048 mutex_lock(&fs_info->scrub_lock);
3049 if (!atomic_read(&fs_info->scrubs_running)) {
3050 mutex_unlock(&fs_info->scrub_lock);
3051 return -ENOTCONN;
3052 }
3053
3054 atomic_inc(&fs_info->scrub_cancel_req);
3055 while (atomic_read(&fs_info->scrubs_running)) {
3056 mutex_unlock(&fs_info->scrub_lock);
3057 wait_event(fs_info->scrub_pause_wait,
3058 atomic_read(&fs_info->scrubs_running) == 0);
3059 mutex_lock(&fs_info->scrub_lock);
3060 }
3061 atomic_dec(&fs_info->scrub_cancel_req);
3062 mutex_unlock(&fs_info->scrub_lock);
3063
3064 return 0;
3065}
3066
163e97ee 3067int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
49b25e05 3068{
163e97ee 3069 struct btrfs_fs_info *fs_info = dev->fs_info;
d9d181c1 3070 struct scrub_ctx *sctx;
a2de733c
AJ
3071
3072 mutex_lock(&fs_info->scrub_lock);
cadbc0a0 3073 sctx = dev->scrub_ctx;
d9d181c1 3074 if (!sctx) {
a2de733c
AJ
3075 mutex_unlock(&fs_info->scrub_lock);
3076 return -ENOTCONN;
3077 }
d9d181c1 3078 atomic_inc(&sctx->cancel_req);
cadbc0a0 3079 while (dev->scrub_ctx) {
a2de733c
AJ
3080 mutex_unlock(&fs_info->scrub_lock);
3081 wait_event(fs_info->scrub_pause_wait,
cadbc0a0 3082 dev->scrub_ctx == NULL);
a2de733c
AJ
3083 mutex_lock(&fs_info->scrub_lock);
3084 }
3085 mutex_unlock(&fs_info->scrub_lock);
3086
3087 return 0;
3088}
1623edeb 3089
2ff7e61e 3090int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
a2de733c
AJ
3091 struct btrfs_scrub_progress *progress)
3092{
562d7b15 3093 struct btrfs_dev_lookup_args args = { .devid = devid };
a2de733c 3094 struct btrfs_device *dev;
d9d181c1 3095 struct scrub_ctx *sctx = NULL;
a2de733c 3096
0b246afa 3097 mutex_lock(&fs_info->fs_devices->device_list_mutex);
562d7b15 3098 dev = btrfs_find_device(fs_info->fs_devices, &args);
a2de733c 3099 if (dev)
cadbc0a0 3100 sctx = dev->scrub_ctx;
d9d181c1
SB
3101 if (sctx)
3102 memcpy(progress, &sctx->stat, sizeof(*progress));
0b246afa 3103 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
a2de733c 3104
d9d181c1 3105 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
a2de733c 3106}
This page took 1.456862 seconds and 4 git commands to generate.