static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
int mirror_num, u64 physical_for_dev_replace);
static void copy_nocow_pages_worker(struct btrfs_work *work);
+static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
+static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
wake_up(&sctx->list_wait);
}
+static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
+{
+ while (atomic_read(&fs_info->scrub_pause_req)) {
+ mutex_unlock(&fs_info->scrub_lock);
+ wait_event(fs_info->scrub_pause_wait,
+ atomic_read(&fs_info->scrub_pause_req) == 0);
+ mutex_lock(&fs_info->scrub_lock);
+ }
+}
+
+static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
+{
+ atomic_inc(&fs_info->scrubs_paused);
+ wake_up(&fs_info->scrub_pause_wait);
+
+ mutex_lock(&fs_info->scrub_lock);
+ __scrub_blocked_if_needed(fs_info);
+ atomic_dec(&fs_info->scrubs_paused);
+ mutex_unlock(&fs_info->scrub_lock);
+
+ wake_up(&fs_info->scrub_pause_wait);
+}
+
/*
* used for workers that require transaction commits (i.e., for the
* NOCOW case)
* hold all of the paths here
*/
for (i = 0; i < ipath->fspath->elem_cnt; ++i)
- printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
+ printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
"%s, sector %llu, root %llu, inode %llu, offset %llu, "
"length %llu, links %u (path: %s)\n", swarn->errstr,
swarn->logical, rcu_str_deref(swarn->dev->name),
return 0;
err:
- printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
+ printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
"%s, sector %llu, root %llu, inode %llu, offset %llu: path "
"resolving failed with ret=%d\n", swarn->errstr,
swarn->logical, rcu_str_deref(swarn->dev->name),
ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
&ref_root, &ref_level);
printk_in_rcu(KERN_WARNING
- "btrfs: %s at logical %llu on dev %s, "
+ "BTRFS: %s at logical %llu on dev %s, "
"sector %llu: metadata %s (level %d) in tree "
"%llu\n", errstr, swarn.logical,
rcu_str_deref(dev->name),
struct scrub_fixup_nodatasum *fixup;
struct scrub_ctx *sctx;
struct btrfs_trans_handle *trans = NULL;
- struct btrfs_fs_info *fs_info;
struct btrfs_path *path;
int uncorrectable = 0;
fixup = container_of(work, struct scrub_fixup_nodatasum, work);
sctx = fixup->sctx;
- fs_info = fixup->root->fs_info;
path = btrfs_alloc_path();
if (!path) {
btrfs_dev_replace_stats_inc(
&sctx->dev_root->fs_info->dev_replace.
num_uncorrectable_read_errors);
- printk_ratelimited_in_rcu(KERN_ERR
- "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
+ printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
+ "unable to fixup (nodatasum) error at logical %llu on dev %s\n",
fixup->logical, rcu_str_deref(fixup->dev->name));
}
sctx->stat.corrected_errors++;
spin_unlock(&sctx->stat_lock);
printk_ratelimited_in_rcu(KERN_ERR
- "btrfs: fixed up error at logical %llu on dev %s\n",
+ "BTRFS: fixed up error at logical %llu on dev %s\n",
logical, rcu_str_deref(dev->name));
}
} else {
sctx->stat.uncorrectable_errors++;
spin_unlock(&sctx->stat_lock);
printk_ratelimited_in_rcu(KERN_ERR
- "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
+ "BTRFS: unable to fixup (regular) error at logical %llu on dev %s\n",
logical, rcu_str_deref(dev->name));
}
int ret;
if (!page_bad->dev->bdev) {
- printk_ratelimited(KERN_WARNING
- "btrfs: scrub_repair_page_from_good_copy(bdev == NULL) is unexpected!\n");
+ printk_ratelimited(KERN_WARNING "BTRFS: "
+ "scrub_repair_page_from_good_copy(bdev == NULL) "
+ "is unexpected!\n");
return -EIO;
}
* This case is handled correctly (but _very_ slowly).
*/
printk_ratelimited(KERN_WARNING
- "btrfs: scrub_submit(bio bdev == NULL) is unexpected!\n");
+ "BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
bio_endio(sbio->bio, -EIO);
} else {
btrfsic_submit_bio(READ, sbio->bio);
wait_event(sctx->list_wait,
atomic_read(&sctx->bios_in_flight) == 0);
- atomic_inc(&fs_info->scrubs_paused);
- wake_up(&fs_info->scrub_pause_wait);
+ scrub_blocked_if_needed(fs_info);
/* FIXME it might be better to start readahead at commit root */
key_start.objectid = logical;
if (!IS_ERR(reada2))
btrfs_reada_wait(reada2);
- mutex_lock(&fs_info->scrub_lock);
- while (atomic_read(&fs_info->scrub_pause_req)) {
- mutex_unlock(&fs_info->scrub_lock);
- wait_event(fs_info->scrub_pause_wait,
- atomic_read(&fs_info->scrub_pause_req) == 0);
- mutex_lock(&fs_info->scrub_lock);
- }
- atomic_dec(&fs_info->scrubs_paused);
- mutex_unlock(&fs_info->scrub_lock);
- wake_up(&fs_info->scrub_pause_wait);
/*
* collect all data csums for the stripe to avoid seeking during
wait_event(sctx->list_wait,
atomic_read(&sctx->bios_in_flight) == 0);
atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
- atomic_inc(&fs_info->scrubs_paused);
- wake_up(&fs_info->scrub_pause_wait);
- mutex_lock(&fs_info->scrub_lock);
- while (atomic_read(&fs_info->scrub_pause_req)) {
- mutex_unlock(&fs_info->scrub_lock);
- wait_event(fs_info->scrub_pause_wait,
- atomic_read(&fs_info->scrub_pause_req) == 0);
- mutex_lock(&fs_info->scrub_lock);
- }
- atomic_dec(&fs_info->scrubs_paused);
- mutex_unlock(&fs_info->scrub_lock);
- wake_up(&fs_info->scrub_pause_wait);
+ scrub_blocked_if_needed(fs_info);
}
key.objectid = logical;
if (key.objectid < logical &&
(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
- printk(KERN_ERR
- "btrfs scrub: tree block %llu spanning "
- "stripes, ignored. logical=%llu\n",
+ btrfs_err(fs_info,
+ "scrub: tree block %llu spanning "
+ "stripes, ignored. logical=%llu",
key.objectid, logical);
goto next;
}
wait_event(sctx->list_wait,
atomic_read(&sctx->bios_in_flight) == 0);
atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
- atomic_inc(&fs_info->scrubs_paused);
- wake_up(&fs_info->scrub_pause_wait);
wait_event(sctx->list_wait,
atomic_read(&sctx->workers_pending) == 0);
-
- mutex_lock(&fs_info->scrub_lock);
- while (atomic_read(&fs_info->scrub_pause_req)) {
- mutex_unlock(&fs_info->scrub_lock);
- wait_event(fs_info->scrub_pause_wait,
- atomic_read(&fs_info->scrub_pause_req) == 0);
- mutex_lock(&fs_info->scrub_lock);
- }
- atomic_dec(&fs_info->scrubs_paused);
- mutex_unlock(&fs_info->scrub_lock);
- wake_up(&fs_info->scrub_pause_wait);
+ scrub_blocked_if_needed(fs_info);
btrfs_put_block_group(cache);
if (ret)
* check some assumptions
*/
if (fs_info->chunk_root->nodesize != fs_info->chunk_root->leafsize) {
- printk(KERN_ERR
- "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
+ btrfs_err(fs_info,
+ "scrub: size assumption nodesize == leafsize (%d == %d) fails",
fs_info->chunk_root->nodesize,
fs_info->chunk_root->leafsize);
return -EINVAL;
* the way scrub is implemented. Do not handle this
* situation at all because it won't ever happen.
*/
- printk(KERN_ERR
- "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
+ btrfs_err(fs_info,
+ "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
return -EINVAL;
}
if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
/* not supported for data w/o checksums */
- printk(KERN_ERR
- "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails\n",
+ btrfs_err(fs_info,
+ "scrub: size assumption sectorsize != PAGE_SIZE "
+ "(%d != %lu) fails",
fs_info->chunk_root->sectorsize, PAGE_SIZE);
return -EINVAL;
}
* would exhaust the array bounds of pagev member in
* struct scrub_block
*/
- pr_err("btrfs_scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails\n",
+ btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
+ "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
fs_info->chunk_root->nodesize,
SCRUB_MAX_PAGES_PER_BLOCK,
fs_info->chunk_root->sectorsize,
}
sctx->readonly = readonly;
dev->scrub_device = sctx;
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ /*
+ * checking @scrub_pause_req here, we can avoid
+ * race between committing transaction and scrubbing.
+ */
+ __scrub_blocked_if_needed(fs_info);
atomic_inc(&fs_info->scrubs_running);
mutex_unlock(&fs_info->scrub_lock);
* by holding device list mutex, we can
* kick off writing super in log tree sync.
*/
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
ret = scrub_supers(sctx, dev);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
}
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
if (!ret)
ret = scrub_enumerate_chunks(sctx, dev, start, end,
ret = iterate_inodes_from_logical(logical, fs_info, path,
record_inode_for_nocow, nocow_ctx);
if (ret != 0 && ret != -ENOENT) {
- pr_warn("iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d\n",
+ btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
+ "phys %llu, len %llu, mir %u, ret %d",
logical, physical_for_dev_replace, len, mirror_num,
ret);
not_written = 1;
again:
page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
if (!page) {
- pr_err("find_or_create_page() failed\n");
+ btrfs_err(fs_info, "find_or_create_page() failed");
ret = -ENOMEM;
goto out;
}
return -EIO;
if (!dev->bdev) {
printk_ratelimited(KERN_WARNING
- "btrfs: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
+ "BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
return -EIO;
}
bio = btrfs_io_bio_alloc(GFP_NOFS, 1);