1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2017 Oracle. All Rights Reserved.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_btree.h"
14 #include "xfs_alloc.h"
15 #include "xfs_ialloc.h"
18 #include "scrub/scrub.h"
19 #include "scrub/common.h"
23 /* Cross-reference with the other btrees. */
29 struct xfs_mount *mp = sc->mp;
30 xfs_agnumber_t agno = sc->sm->sm_agno;
34 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
37 agbno = XFS_SB_BLOCK(mp);
39 error = xchk_ag_init_existing(sc, agno, &sc->sa);
40 if (!xchk_xref_process_error(sc, agno, agbno, &error))
43 xchk_xref_is_used_space(sc, agbno, 1);
44 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
45 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
46 xchk_xref_is_not_shared(sc, agbno, 1);
48 /* scrub teardown will take care of sc->sa for us */
52 * Scrub the filesystem superblock.
54 * Note: We do /not/ attempt to check AG 0's superblock. Mount is
55 * responsible for validating all the geometry information in sb 0, so
56 * if the filesystem is capable of initiating online scrub, then clearly
57 * sb 0 is ok and we can use its information to check everything else.
63 struct xfs_mount *mp = sc->mp;
66 struct xfs_perag *pag;
73 agno = sc->sm->sm_agno;
78 * Grab an active reference to the perag structure. If we can't get
79 * it, we're racing with something that's tearing down the AG, so
80 * signal that the AG no longer exists.
82 pag = xfs_perag_get(mp, agno);
86 error = xfs_sb_read_secondary(mp, sc->tp, agno, &bp);
88 * The superblock verifier can return several different error codes
89 * if it thinks the superblock doesn't look right. For a mount these
90 * would all get bounced back to userspace, but if we're here then the
91 * fs mounted successfully, which means that this secondary superblock
92 * is simply incorrect. Treat all these codes the same way we treat
96 case -EINVAL: /* also -EWRONGFS */
99 error = -EFSCORRUPTED;
104 if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
110 * Verify the geometries match. Fields that are permanently
111 * set by mkfs are checked; fields that can be updated later
112 * (and are not propagated to backup superblocks) are preen
115 if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
116 xchk_block_set_corrupt(sc, bp);
118 if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
119 xchk_block_set_corrupt(sc, bp);
121 if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
122 xchk_block_set_corrupt(sc, bp);
124 if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
125 xchk_block_set_corrupt(sc, bp);
127 if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
128 xchk_block_set_preen(sc, bp);
130 if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
131 xchk_block_set_corrupt(sc, bp);
133 if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
134 xchk_block_set_preen(sc, bp);
136 if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
137 xchk_block_set_preen(sc, bp);
139 if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
140 xchk_block_set_preen(sc, bp);
142 if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
143 xchk_block_set_corrupt(sc, bp);
145 if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
146 xchk_block_set_corrupt(sc, bp);
148 if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
149 xchk_block_set_corrupt(sc, bp);
151 if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
152 xchk_block_set_corrupt(sc, bp);
154 if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
155 xchk_block_set_corrupt(sc, bp);
157 /* Check sb_versionnum bits that are set at mkfs time. */
158 vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
159 XFS_SB_VERSION_NUMBITS |
160 XFS_SB_VERSION_ALIGNBIT |
161 XFS_SB_VERSION_DALIGNBIT |
162 XFS_SB_VERSION_SHAREDBIT |
163 XFS_SB_VERSION_LOGV2BIT |
164 XFS_SB_VERSION_SECTORBIT |
165 XFS_SB_VERSION_EXTFLGBIT |
166 XFS_SB_VERSION_DIRV2BIT);
167 if ((sb->sb_versionnum & vernum_mask) !=
168 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
169 xchk_block_set_corrupt(sc, bp);
171 /* Check sb_versionnum bits that can be set after mkfs time. */
172 vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
173 XFS_SB_VERSION_NLINKBIT |
174 XFS_SB_VERSION_QUOTABIT);
175 if ((sb->sb_versionnum & vernum_mask) !=
176 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
177 xchk_block_set_preen(sc, bp);
179 if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
180 xchk_block_set_corrupt(sc, bp);
182 if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
183 xchk_block_set_corrupt(sc, bp);
185 if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
186 xchk_block_set_corrupt(sc, bp);
188 if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
189 xchk_block_set_preen(sc, bp);
191 if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
192 xchk_block_set_corrupt(sc, bp);
194 if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
195 xchk_block_set_corrupt(sc, bp);
197 if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
198 xchk_block_set_corrupt(sc, bp);
200 if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
201 xchk_block_set_corrupt(sc, bp);
203 if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
204 xchk_block_set_corrupt(sc, bp);
206 if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
207 xchk_block_set_corrupt(sc, bp);
209 if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
210 xchk_block_set_preen(sc, bp);
213 * Skip the summary counters since we track them in memory anyway.
214 * sb_icount, sb_ifree, sb_fdblocks, sb_frexents
217 if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
218 xchk_block_set_preen(sc, bp);
220 if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
221 xchk_block_set_preen(sc, bp);
224 * Skip the quota flags since repair will force quotacheck.
228 if (sb->sb_flags != mp->m_sb.sb_flags)
229 xchk_block_set_corrupt(sc, bp);
231 if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
232 xchk_block_set_corrupt(sc, bp);
234 if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
235 xchk_block_set_corrupt(sc, bp);
237 if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
238 xchk_block_set_preen(sc, bp);
240 if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
241 xchk_block_set_preen(sc, bp);
243 if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
244 xchk_block_set_corrupt(sc, bp);
246 if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
247 xchk_block_set_corrupt(sc, bp);
249 if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
250 xchk_block_set_corrupt(sc, bp);
252 if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
253 xchk_block_set_corrupt(sc, bp);
255 /* Do we see any invalid bits in sb_features2? */
256 if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
257 if (sb->sb_features2 != 0)
258 xchk_block_set_corrupt(sc, bp);
260 v2_ok = XFS_SB_VERSION2_OKBITS;
261 if (xfs_sb_is_v5(&mp->m_sb))
262 v2_ok |= XFS_SB_VERSION2_CRCBIT;
264 if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
265 xchk_block_set_corrupt(sc, bp);
267 if (sb->sb_features2 != sb->sb_bad_features2)
268 xchk_block_set_preen(sc, bp);
271 /* Check sb_features2 flags that are set at mkfs time. */
272 features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT |
273 XFS_SB_VERSION2_PROJID32BIT |
274 XFS_SB_VERSION2_CRCBIT |
275 XFS_SB_VERSION2_FTYPE);
276 if ((sb->sb_features2 & features_mask) !=
277 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
278 xchk_block_set_corrupt(sc, bp);
280 /* Check sb_features2 flags that can be set after mkfs time. */
281 features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
282 if ((sb->sb_features2 & features_mask) !=
283 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
284 xchk_block_set_corrupt(sc, bp);
286 if (!xfs_has_crc(mp)) {
287 /* all v5 fields must be zero */
288 if (memchr_inv(&sb->sb_features_compat, 0,
289 sizeof(struct xfs_dsb) -
290 offsetof(struct xfs_dsb, sb_features_compat)))
291 xchk_block_set_corrupt(sc, bp);
293 /* Check compat flags; all are set at mkfs time. */
294 features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
295 if ((sb->sb_features_compat & features_mask) !=
296 (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
297 xchk_block_set_corrupt(sc, bp);
299 /* Check ro compat flags; all are set at mkfs time. */
300 features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
301 XFS_SB_FEAT_RO_COMPAT_FINOBT |
302 XFS_SB_FEAT_RO_COMPAT_RMAPBT |
303 XFS_SB_FEAT_RO_COMPAT_REFLINK);
304 if ((sb->sb_features_ro_compat & features_mask) !=
305 (cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
307 xchk_block_set_corrupt(sc, bp);
309 /* Check incompat flags; all are set at mkfs time. */
310 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
311 XFS_SB_FEAT_INCOMPAT_FTYPE |
312 XFS_SB_FEAT_INCOMPAT_SPINODES |
313 XFS_SB_FEAT_INCOMPAT_META_UUID);
314 if ((sb->sb_features_incompat & features_mask) !=
315 (cpu_to_be32(mp->m_sb.sb_features_incompat) &
317 xchk_block_set_corrupt(sc, bp);
319 /* Check log incompat flags; all are set at mkfs time. */
320 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
321 if ((sb->sb_features_log_incompat & features_mask) !=
322 (cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
324 xchk_block_set_corrupt(sc, bp);
326 /* Don't care about sb_crc */
328 if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
329 xchk_block_set_corrupt(sc, bp);
331 if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
332 xchk_block_set_preen(sc, bp);
334 /* Don't care about sb_lsn */
337 if (xfs_has_metauuid(mp)) {
338 /* The metadata UUID must be the same for all supers */
339 if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
340 xchk_block_set_corrupt(sc, bp);
343 /* Everything else must be zero. */
344 if (memchr_inv(sb + 1, 0,
345 BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
346 xchk_block_set_corrupt(sc, bp);
348 xchk_superblock_xref(sc, bp);
356 /* Tally freespace record lengths. */
358 xchk_agf_record_bno_lengths(
359 struct xfs_btree_cur *cur,
360 const struct xfs_alloc_rec_incore *rec,
363 xfs_extlen_t *blocks = priv;
365 (*blocks) += rec->ar_blockcount;
369 /* Check agf_freeblks */
371 xchk_agf_xref_freeblks(
372 struct xfs_scrub *sc)
374 struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
375 xfs_extlen_t blocks = 0;
381 error = xfs_alloc_query_all(sc->sa.bno_cur,
382 xchk_agf_record_bno_lengths, &blocks);
383 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
385 if (blocks != be32_to_cpu(agf->agf_freeblks))
386 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
389 /* Cross reference the AGF with the cntbt (freespace by length btree) */
392 struct xfs_scrub *sc)
394 struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
403 /* Any freespace at all? */
404 error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
405 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
408 if (agf->agf_freeblks != cpu_to_be32(0))
409 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
413 /* Check agf_longest */
414 error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
415 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
417 if (!have || blocks != be32_to_cpu(agf->agf_longest))
418 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
421 /* Check the btree block counts in the AGF against the btrees. */
423 xchk_agf_xref_btreeblks(
424 struct xfs_scrub *sc)
426 struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
427 struct xfs_mount *mp = sc->mp;
428 xfs_agblock_t blocks;
429 xfs_agblock_t btreeblks;
432 /* agf_btreeblks didn't exist before lazysbcount */
433 if (!xfs_has_lazysbcount(sc->mp))
436 /* Check agf_rmap_blocks; set up for agf_btreeblks check */
437 if (sc->sa.rmap_cur) {
438 error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
439 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
441 btreeblks = blocks - 1;
442 if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
443 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
449 * No rmap cursor; we can't xref if we have the rmapbt feature.
450 * We also can't do it if we're missing the free space btree cursors.
452 if ((xfs_has_rmapbt(mp) && !sc->sa.rmap_cur) ||
453 !sc->sa.bno_cur || !sc->sa.cnt_cur)
456 /* Check agf_btreeblks */
457 error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
458 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
460 btreeblks += blocks - 1;
462 error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
463 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
465 btreeblks += blocks - 1;
467 if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
468 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
471 /* Check agf_refcount_blocks against tree size */
473 xchk_agf_xref_refcblks(
474 struct xfs_scrub *sc)
476 struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
477 xfs_agblock_t blocks;
480 if (!sc->sa.refc_cur)
483 error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
484 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
486 if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
487 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
490 /* Cross-reference with the other btrees. */
493 struct xfs_scrub *sc)
495 struct xfs_mount *mp = sc->mp;
498 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
501 agbno = XFS_AGF_BLOCK(mp);
503 xchk_ag_btcur_init(sc, &sc->sa);
505 xchk_xref_is_used_space(sc, agbno, 1);
506 xchk_agf_xref_freeblks(sc);
507 xchk_agf_xref_cntbt(sc);
508 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
509 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
510 xchk_agf_xref_btreeblks(sc);
511 xchk_xref_is_not_shared(sc, agbno, 1);
512 xchk_agf_xref_refcblks(sc);
514 /* scrub teardown will take care of sc->sa for us */
520 struct xfs_scrub *sc)
522 struct xfs_mount *mp = sc->mp;
524 struct xfs_perag *pag;
525 xfs_agnumber_t agno = sc->sm->sm_agno;
528 xfs_agblock_t agfl_first;
529 xfs_agblock_t agfl_last;
530 xfs_agblock_t agfl_count;
531 xfs_agblock_t fl_count;
535 error = xchk_ag_read_headers(sc, agno, &sc->sa);
536 if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
538 xchk_buffer_recheck(sc, sc->sa.agf_bp);
540 agf = sc->sa.agf_bp->b_addr;
543 /* Check the AG length */
544 eoag = be32_to_cpu(agf->agf_length);
545 if (eoag != xfs_ag_block_count(mp, agno))
546 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
548 /* Check the AGF btree roots and levels */
549 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
550 if (!xfs_verify_agbno(mp, agno, agbno))
551 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
553 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
554 if (!xfs_verify_agbno(mp, agno, agbno))
555 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
557 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
558 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
559 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
561 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
562 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
563 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
565 if (xfs_has_rmapbt(mp)) {
566 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
567 if (!xfs_verify_agbno(mp, agno, agbno))
568 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
570 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
571 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
572 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
575 if (xfs_has_reflink(mp)) {
576 agbno = be32_to_cpu(agf->agf_refcount_root);
577 if (!xfs_verify_agbno(mp, agno, agbno))
578 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
580 level = be32_to_cpu(agf->agf_refcount_level);
581 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
582 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
585 /* Check the AGFL counters */
586 agfl_first = be32_to_cpu(agf->agf_flfirst);
587 agfl_last = be32_to_cpu(agf->agf_fllast);
588 agfl_count = be32_to_cpu(agf->agf_flcount);
589 if (agfl_last > agfl_first)
590 fl_count = agfl_last - agfl_first + 1;
592 fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1;
593 if (agfl_count != 0 && fl_count != agfl_count)
594 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
596 /* Do the incore counters match? */
597 if (pag->pagf_freeblks != be32_to_cpu(agf->agf_freeblks))
598 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
599 if (pag->pagf_flcount != be32_to_cpu(agf->agf_flcount))
600 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
601 if (xfs_has_lazysbcount(sc->mp) &&
602 pag->pagf_btreeblks != be32_to_cpu(agf->agf_btreeblks))
603 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
612 struct xchk_agfl_info {
613 unsigned int sz_entries;
614 unsigned int nr_entries;
615 xfs_agblock_t *entries;
616 struct xfs_scrub *sc;
619 /* Cross-reference with the other btrees. */
621 xchk_agfl_block_xref(
622 struct xfs_scrub *sc,
625 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
628 xchk_xref_is_used_space(sc, agbno, 1);
629 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
630 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_AG);
631 xchk_xref_is_not_shared(sc, agbno, 1);
634 /* Scrub an AGFL block. */
637 struct xfs_mount *mp,
641 struct xchk_agfl_info *sai = priv;
642 struct xfs_scrub *sc = sai->sc;
643 xfs_agnumber_t agno = sc->sa.pag->pag_agno;
645 if (xfs_verify_agbno(mp, agno, agbno) &&
646 sai->nr_entries < sai->sz_entries)
647 sai->entries[sai->nr_entries++] = agbno;
649 xchk_block_set_corrupt(sc, sc->sa.agfl_bp);
651 xchk_agfl_block_xref(sc, agbno);
653 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
664 const xfs_agblock_t *a = pa;
665 const xfs_agblock_t *b = pb;
667 return (int)*a - (int)*b;
670 /* Cross-reference with the other btrees. */
673 struct xfs_scrub *sc)
675 struct xfs_mount *mp = sc->mp;
678 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
681 agbno = XFS_AGFL_BLOCK(mp);
683 xchk_ag_btcur_init(sc, &sc->sa);
685 xchk_xref_is_used_space(sc, agbno, 1);
686 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
687 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
688 xchk_xref_is_not_shared(sc, agbno, 1);
691 * Scrub teardown will take care of sc->sa for us. Leave sc->sa
692 * active so that the agfl block xref can use it too.
696 /* Scrub the AGFL. */
699 struct xfs_scrub *sc)
701 struct xchk_agfl_info sai;
703 xfs_agnumber_t agno = sc->sm->sm_agno;
704 unsigned int agflcount;
708 error = xchk_ag_read_headers(sc, agno, &sc->sa);
709 if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
712 return -EFSCORRUPTED;
713 xchk_buffer_recheck(sc, sc->sa.agfl_bp);
717 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
720 /* Allocate buffer to ensure uniqueness of AGFL entries. */
721 agf = sc->sa.agf_bp->b_addr;
722 agflcount = be32_to_cpu(agf->agf_flcount);
723 if (agflcount > xfs_agfl_size(sc->mp)) {
724 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
727 memset(&sai, 0, sizeof(sai));
729 sai.sz_entries = agflcount;
730 sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount,
737 /* Check the blocks in the AGFL. */
738 error = xfs_agfl_walk(sc->mp, sc->sa.agf_bp->b_addr,
739 sc->sa.agfl_bp, xchk_agfl_block, &sai);
740 if (error == -ECANCELED) {
747 if (agflcount != sai.nr_entries) {
748 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
752 /* Sort entries, check for duplicates. */
753 sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
754 xchk_agblock_cmp, NULL);
755 for (i = 1; i < sai.nr_entries; i++) {
756 if (sai.entries[i] == sai.entries[i - 1]) {
757 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
763 kmem_free(sai.entries);
770 /* Check agi_count/agi_freecount */
772 xchk_agi_xref_icounts(
773 struct xfs_scrub *sc)
775 struct xfs_agi *agi = sc->sa.agi_bp->b_addr;
777 xfs_agino_t freecount;
783 error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
784 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
786 if (be32_to_cpu(agi->agi_count) != icount ||
787 be32_to_cpu(agi->agi_freecount) != freecount)
788 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
791 /* Check agi_[fi]blocks against tree size */
793 xchk_agi_xref_fiblocks(
794 struct xfs_scrub *sc)
796 struct xfs_agi *agi = sc->sa.agi_bp->b_addr;
797 xfs_agblock_t blocks;
800 if (!xfs_has_inobtcounts(sc->mp))
803 if (sc->sa.ino_cur) {
804 error = xfs_btree_count_blocks(sc->sa.ino_cur, &blocks);
805 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
807 if (blocks != be32_to_cpu(agi->agi_iblocks))
808 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
811 if (sc->sa.fino_cur) {
812 error = xfs_btree_count_blocks(sc->sa.fino_cur, &blocks);
813 if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur))
815 if (blocks != be32_to_cpu(agi->agi_fblocks))
816 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
820 /* Cross-reference with the other btrees. */
823 struct xfs_scrub *sc)
825 struct xfs_mount *mp = sc->mp;
828 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
831 agbno = XFS_AGI_BLOCK(mp);
833 xchk_ag_btcur_init(sc, &sc->sa);
835 xchk_xref_is_used_space(sc, agbno, 1);
836 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
837 xchk_agi_xref_icounts(sc);
838 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
839 xchk_xref_is_not_shared(sc, agbno, 1);
840 xchk_agi_xref_fiblocks(sc);
842 /* scrub teardown will take care of sc->sa for us */
848 struct xfs_scrub *sc)
850 struct xfs_mount *mp = sc->mp;
852 struct xfs_perag *pag;
853 xfs_agnumber_t agno = sc->sm->sm_agno;
857 xfs_agino_t first_agino;
858 xfs_agino_t last_agino;
864 error = xchk_ag_read_headers(sc, agno, &sc->sa);
865 if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
867 xchk_buffer_recheck(sc, sc->sa.agi_bp);
869 agi = sc->sa.agi_bp->b_addr;
872 /* Check the AG length */
873 eoag = be32_to_cpu(agi->agi_length);
874 if (eoag != xfs_ag_block_count(mp, agno))
875 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
877 /* Check btree roots and levels */
878 agbno = be32_to_cpu(agi->agi_root);
879 if (!xfs_verify_agbno(mp, agno, agbno))
880 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
882 level = be32_to_cpu(agi->agi_level);
883 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
884 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
886 if (xfs_has_finobt(mp)) {
887 agbno = be32_to_cpu(agi->agi_free_root);
888 if (!xfs_verify_agbno(mp, agno, agbno))
889 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
891 level = be32_to_cpu(agi->agi_free_level);
892 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
893 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
896 /* Check inode counters */
897 xfs_agino_range(mp, agno, &first_agino, &last_agino);
898 icount = be32_to_cpu(agi->agi_count);
899 if (icount > last_agino - first_agino + 1 ||
900 icount < be32_to_cpu(agi->agi_freecount))
901 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
903 /* Check inode pointers */
904 agino = be32_to_cpu(agi->agi_newino);
905 if (!xfs_verify_agino_or_null(mp, agno, agino))
906 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
908 agino = be32_to_cpu(agi->agi_dirino);
909 if (!xfs_verify_agino_or_null(mp, agno, agino))
910 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
912 /* Check unlinked inode buckets */
913 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
914 agino = be32_to_cpu(agi->agi_unlinked[i]);
915 if (!xfs_verify_agino_or_null(mp, agno, agino))
916 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
919 if (agi->agi_pad32 != cpu_to_be32(0))
920 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
922 /* Do the incore counters match? */
923 if (pag->pagi_count != be32_to_cpu(agi->agi_count))
924 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
925 if (pag->pagi_freecount != be32_to_cpu(agi->agi_freecount))
926 xchk_block_set_corrupt(sc, sc->sa.agi_bp);