1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2017-2023 Oracle. All Rights Reserved.
6 #ifndef __XFS_SCRUB_COMMON_H__
7 #define __XFS_SCRUB_COMMON_H__
10 * We /could/ terminate a scrub/repair operation early. If we're not
11 * in a good place to continue (fatal signal, etc.) then bail out.
12 * Note that we're careful not to make any judgements about *error.
15 xchk_should_terminate(
20 * If preemption is disabled, we need to yield to the scheduler every
21 * few seconds so that we don't run afoul of the soft lockup watchdog
22 * or RCU stall detector.
26 if (fatal_signal_pending(current)) {
34 int xchk_trans_alloc(struct xfs_scrub *sc, uint resblks);
35 void xchk_trans_cancel(struct xfs_scrub *sc);
37 bool xchk_process_error(struct xfs_scrub *sc, xfs_agnumber_t agno,
38 xfs_agblock_t bno, int *error);
39 bool xchk_fblock_process_error(struct xfs_scrub *sc, int whichfork,
40 xfs_fileoff_t offset, int *error);
42 bool xchk_xref_process_error(struct xfs_scrub *sc,
43 xfs_agnumber_t agno, xfs_agblock_t bno, int *error);
44 bool xchk_fblock_xref_process_error(struct xfs_scrub *sc,
45 int whichfork, xfs_fileoff_t offset, int *error);
47 void xchk_block_set_preen(struct xfs_scrub *sc,
49 void xchk_ino_set_preen(struct xfs_scrub *sc, xfs_ino_t ino);
51 void xchk_set_corrupt(struct xfs_scrub *sc);
52 void xchk_block_set_corrupt(struct xfs_scrub *sc,
54 void xchk_ino_set_corrupt(struct xfs_scrub *sc, xfs_ino_t ino);
55 void xchk_fblock_set_corrupt(struct xfs_scrub *sc, int whichfork,
56 xfs_fileoff_t offset);
58 void xchk_block_xref_set_corrupt(struct xfs_scrub *sc,
60 void xchk_ino_xref_set_corrupt(struct xfs_scrub *sc,
62 void xchk_fblock_xref_set_corrupt(struct xfs_scrub *sc,
63 int whichfork, xfs_fileoff_t offset);
65 void xchk_ino_set_warning(struct xfs_scrub *sc, xfs_ino_t ino);
66 void xchk_fblock_set_warning(struct xfs_scrub *sc, int whichfork,
67 xfs_fileoff_t offset);
69 void xchk_set_incomplete(struct xfs_scrub *sc);
70 int xchk_checkpoint_log(struct xfs_mount *mp);
72 /* Are we set up for a cross-referencing check? */
73 bool xchk_should_check_xref(struct xfs_scrub *sc, int *error,
74 struct xfs_btree_cur **curpp);
77 int xchk_setup_agheader(struct xfs_scrub *sc);
78 int xchk_setup_fs(struct xfs_scrub *sc);
79 int xchk_setup_ag_allocbt(struct xfs_scrub *sc);
80 int xchk_setup_ag_iallocbt(struct xfs_scrub *sc);
81 int xchk_setup_ag_rmapbt(struct xfs_scrub *sc);
82 int xchk_setup_ag_refcountbt(struct xfs_scrub *sc);
83 int xchk_setup_inode(struct xfs_scrub *sc);
84 int xchk_setup_inode_bmap(struct xfs_scrub *sc);
85 int xchk_setup_inode_bmap_data(struct xfs_scrub *sc);
86 int xchk_setup_directory(struct xfs_scrub *sc);
87 int xchk_setup_xattr(struct xfs_scrub *sc);
88 int xchk_setup_symlink(struct xfs_scrub *sc);
89 int xchk_setup_parent(struct xfs_scrub *sc);
91 int xchk_setup_rt(struct xfs_scrub *sc);
94 xchk_setup_rt(struct xfs_scrub *sc)
99 #ifdef CONFIG_XFS_QUOTA
100 int xchk_setup_quota(struct xfs_scrub *sc);
103 xchk_setup_quota(struct xfs_scrub *sc)
108 int xchk_setup_fscounters(struct xfs_scrub *sc);
110 void xchk_ag_free(struct xfs_scrub *sc, struct xchk_ag *sa);
111 int xchk_ag_init(struct xfs_scrub *sc, xfs_agnumber_t agno,
115 * Grab all AG resources, treating the inability to grab the perag structure as
116 * a fs corruption. This is intended for callers checking an ondisk reference
117 * to a given AG, which means that the AG must still exist.
120 xchk_ag_init_existing(
121 struct xfs_scrub *sc,
125 int error = xchk_ag_init(sc, agno, sa);
127 return error == -ENOENT ? -EFSCORRUPTED : error;
130 int xchk_ag_read_headers(struct xfs_scrub *sc, xfs_agnumber_t agno,
132 void xchk_ag_btcur_free(struct xchk_ag *sa);
133 void xchk_ag_btcur_init(struct xfs_scrub *sc, struct xchk_ag *sa);
134 int xchk_count_rmap_ownedby_ag(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
135 const struct xfs_owner_info *oinfo, xfs_filblks_t *blocks);
137 int xchk_setup_ag_btree(struct xfs_scrub *sc, bool force_log);
138 int xchk_iget_for_scrubbing(struct xfs_scrub *sc);
139 int xchk_setup_inode_contents(struct xfs_scrub *sc, unsigned int resblks);
140 void xchk_buffer_recheck(struct xfs_scrub *sc, struct xfs_buf *bp);
142 int xchk_iget(struct xfs_scrub *sc, xfs_ino_t inum, struct xfs_inode **ipp);
143 int xchk_iget_agi(struct xfs_scrub *sc, xfs_ino_t inum,
144 struct xfs_buf **agi_bpp, struct xfs_inode **ipp);
145 void xchk_irele(struct xfs_scrub *sc, struct xfs_inode *ip);
146 int xchk_install_handle_inode(struct xfs_scrub *sc, struct xfs_inode *ip);
149 * Don't bother cross-referencing if we already found corruption or cross
150 * referencing discrepancies.
152 static inline bool xchk_skip_xref(struct xfs_scrub_metadata *sm)
154 return sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
155 XFS_SCRUB_OFLAG_XCORRUPT);
158 int xchk_metadata_inode_forks(struct xfs_scrub *sc);
161 * Setting up a hook to wait for intents to drain is costly -- we have to take
162 * the CPU hotplug lock and force an i-cache flush on all CPUs once to set it
163 * up, and again to tear it down. These costs add up quickly, so we only want
164 * to enable the drain waiter if the drain actually detected a conflict with
165 * running intent chains.
167 static inline bool xchk_need_intent_drain(struct xfs_scrub *sc)
169 return sc->flags & XCHK_NEED_DRAIN;
172 void xchk_fsgates_enable(struct xfs_scrub *sc, unsigned int scrub_fshooks);
174 #endif /* __XFS_SCRUB_COMMON_H__ */