2 * Copyright (C) 2017 Oracle. All Rights Reserved.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_defer.h"
27 #include "xfs_btree.h"
29 #include "xfs_log_format.h"
30 #include "xfs_trans.h"
32 #include "xfs_inode.h"
33 #include "xfs_inode_fork.h"
34 #include "xfs_alloc.h"
35 #include "xfs_rtalloc.h"
37 #include "xfs_bmap_util.h"
38 #include "xfs_bmap_btree.h"
40 #include "xfs_rmap_btree.h"
41 #include "xfs_refcount.h"
42 #include "scrub/xfs_scrub.h"
43 #include "scrub/scrub.h"
44 #include "scrub/common.h"
45 #include "scrub/btree.h"
46 #include "scrub/trace.h"
48 /* Set us up with an inode's bmap. */
50 xfs_scrub_setup_inode_bmap(
51 struct xfs_scrub_context *sc,
56 error = xfs_scrub_get_inode(sc, ip);
60 sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
61 xfs_ilock(sc->ip, sc->ilock_flags);
64 * We don't want any ephemeral data fork updates sitting around
65 * while we inspect block mappings, so wait for directio to finish
66 * and flush dirty data if we have delalloc reservations.
68 if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
69 sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) {
70 inode_dio_wait(VFS_I(sc->ip));
71 error = filemap_write_and_wait(VFS_I(sc->ip)->i_mapping);
76 /* Got the inode, lock it and we're ready to go. */
77 error = xfs_scrub_trans_alloc(sc, 0);
80 sc->ilock_flags |= XFS_ILOCK_EXCL;
81 xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
84 /* scrub teardown will unlock and release the inode */
89 * Inode fork block mapping (BMBT) scrubber.
90 * More complex than the others because we have to scrub
91 * all the extents regardless of whether or not the fork
95 struct xfs_scrub_bmap_info {
96 struct xfs_scrub_context *sc;
97 xfs_fileoff_t lastoff;
103 /* Look for a corresponding rmap for this irec. */
105 xfs_scrub_bmap_get_rmap(
106 struct xfs_scrub_bmap_info *info,
107 struct xfs_bmbt_irec *irec,
110 struct xfs_rmap_irec *rmap)
112 xfs_fileoff_t offset;
113 unsigned int rflags = 0;
117 if (info->whichfork == XFS_ATTR_FORK)
118 rflags |= XFS_RMAP_ATTR_FORK;
121 * CoW staging extents are owned (on disk) by the refcountbt, so
122 * their rmaps do not have offsets.
124 if (info->whichfork == XFS_COW_FORK)
127 offset = irec->br_startoff;
130 * If the caller thinks this could be a shared bmbt extent (IOWs,
131 * any data fork extent of a reflink inode) then we have to use the
132 * range rmap lookup to make sure we get the correct owner/offset.
134 if (info->is_shared) {
135 error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
136 owner, offset, rflags, rmap, &has_rmap);
137 if (!xfs_scrub_should_check_xref(info->sc, &error,
138 &info->sc->sa.rmap_cur))
144 * Otherwise, use the (faster) regular lookup.
146 error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner,
147 offset, rflags, &has_rmap);
148 if (!xfs_scrub_should_check_xref(info->sc, &error,
149 &info->sc->sa.rmap_cur))
154 error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap);
155 if (!xfs_scrub_should_check_xref(info->sc, &error,
156 &info->sc->sa.rmap_cur))
161 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
166 /* Make sure that we have rmapbt records for this extent. */
168 xfs_scrub_bmap_xref_rmap(
169 struct xfs_scrub_bmap_info *info,
170 struct xfs_bmbt_irec *irec,
173 struct xfs_rmap_irec rmap;
174 unsigned long long rmap_end;
177 if (!info->sc->sa.rmap_cur || xfs_scrub_skip_xref(info->sc->sm))
180 if (info->whichfork == XFS_COW_FORK)
181 owner = XFS_RMAP_OWN_COW;
183 owner = info->sc->ip->i_ino;
185 /* Find the rmap record for this irec. */
186 if (!xfs_scrub_bmap_get_rmap(info, irec, agbno, owner, &rmap))
189 /* Check the rmap. */
190 rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
191 if (rmap.rm_startblock > agbno ||
192 agbno + irec->br_blockcount > rmap_end)
193 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
197 * Check the logical offsets if applicable. CoW staging extents
198 * don't track logical offsets since the mappings only exist in
201 if (info->whichfork != XFS_COW_FORK) {
202 rmap_end = (unsigned long long)rmap.rm_offset +
204 if (rmap.rm_offset > irec->br_startoff ||
205 irec->br_startoff + irec->br_blockcount > rmap_end)
206 xfs_scrub_fblock_xref_set_corrupt(info->sc,
207 info->whichfork, irec->br_startoff);
210 if (rmap.rm_owner != owner)
211 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
215 * Check for discrepancies between the unwritten flag in the irec and
216 * the rmap. Note that the (in-memory) CoW fork distinguishes between
217 * unwritten and written extents, but we don't track that in the rmap
218 * records because the blocks are owned (on-disk) by the refcountbt,
219 * which doesn't track unwritten state.
221 if (owner != XFS_RMAP_OWN_COW &&
222 irec->br_state == XFS_EXT_UNWRITTEN &&
223 !(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
224 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
227 if (info->whichfork == XFS_ATTR_FORK &&
228 !(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
229 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
231 if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
232 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
236 /* Cross-reference a single rtdev extent record. */
238 xfs_scrub_bmap_rt_extent_xref(
239 struct xfs_scrub_bmap_info *info,
240 struct xfs_inode *ip,
241 struct xfs_btree_cur *cur,
242 struct xfs_bmbt_irec *irec)
244 if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
247 xfs_scrub_xref_is_used_rt_space(info->sc, irec->br_startblock,
248 irec->br_blockcount);
251 /* Cross-reference a single datadev extent record. */
253 xfs_scrub_bmap_extent_xref(
254 struct xfs_scrub_bmap_info *info,
255 struct xfs_inode *ip,
256 struct xfs_btree_cur *cur,
257 struct xfs_bmbt_irec *irec)
259 struct xfs_mount *mp = info->sc->mp;
265 if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
268 agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock);
269 agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
270 len = irec->br_blockcount;
272 error = xfs_scrub_ag_init(info->sc, agno, &info->sc->sa);
273 if (!xfs_scrub_fblock_process_error(info->sc, info->whichfork,
274 irec->br_startoff, &error))
277 xfs_scrub_xref_is_used_space(info->sc, agbno, len);
278 xfs_scrub_xref_is_not_inode_chunk(info->sc, agbno, len);
279 xfs_scrub_bmap_xref_rmap(info, irec, agbno);
280 switch (info->whichfork) {
282 if (xfs_is_reflink_inode(info->sc->ip))
286 xfs_scrub_xref_is_not_shared(info->sc, agbno,
287 irec->br_blockcount);
290 xfs_scrub_xref_is_cow_staging(info->sc, agbno,
291 irec->br_blockcount);
295 xfs_scrub_ag_free(info->sc, &info->sc->sa);
298 /* Scrub a single extent record. */
300 xfs_scrub_bmap_extent(
301 struct xfs_inode *ip,
302 struct xfs_btree_cur *cur,
303 struct xfs_scrub_bmap_info *info,
304 struct xfs_bmbt_irec *irec)
306 struct xfs_mount *mp = info->sc->mp;
307 struct xfs_buf *bp = NULL;
312 xfs_btree_get_block(cur, 0, &bp);
315 * Check for out-of-order extents. This record could have come
316 * from the incore list, for which there is no ordering check.
318 if (irec->br_startoff < info->lastoff)
319 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
322 /* There should never be a "hole" extent in either extent list. */
323 if (irec->br_startblock == HOLESTARTBLOCK)
324 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
328 * Check for delalloc extents. We never iterate the ones in the
329 * in-core extent scan, and we should never see these in the bmbt.
331 if (isnullstartblock(irec->br_startblock))
332 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
335 /* Make sure the extent points to a valid place. */
336 if (irec->br_blockcount > MAXEXTLEN)
337 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
339 if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock)
340 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
342 end = irec->br_startblock + irec->br_blockcount - 1;
344 (!xfs_verify_rtbno(mp, irec->br_startblock) ||
345 !xfs_verify_rtbno(mp, end)))
346 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
349 (!xfs_verify_fsbno(mp, irec->br_startblock) ||
350 !xfs_verify_fsbno(mp, end) ||
351 XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
352 XFS_FSB_TO_AGNO(mp, end)))
353 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
356 /* We don't allow unwritten extents on attr forks. */
357 if (irec->br_state == XFS_EXT_UNWRITTEN &&
358 info->whichfork == XFS_ATTR_FORK)
359 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
363 xfs_scrub_bmap_rt_extent_xref(info, ip, cur, irec);
365 xfs_scrub_bmap_extent_xref(info, ip, cur, irec);
367 info->lastoff = irec->br_startoff + irec->br_blockcount;
371 /* Scrub a bmbt record. */
373 xfs_scrub_bmapbt_rec(
374 struct xfs_scrub_btree *bs,
375 union xfs_btree_rec *rec)
377 struct xfs_bmbt_irec irec;
378 struct xfs_scrub_bmap_info *info = bs->private;
379 struct xfs_inode *ip = bs->cur->bc_private.b.ip;
380 struct xfs_buf *bp = NULL;
381 struct xfs_btree_block *block;
386 * Check the owners of the btree blocks up to the level below
387 * the root since the verifiers don't do that.
389 if (xfs_sb_version_hascrc(&bs->cur->bc_mp->m_sb) &&
390 bs->cur->bc_ptrs[0] == 1) {
391 for (i = 0; i < bs->cur->bc_nlevels - 1; i++) {
392 block = xfs_btree_get_block(bs->cur, i, &bp);
393 owner = be64_to_cpu(block->bb_u.l.bb_owner);
394 if (owner != ip->i_ino)
395 xfs_scrub_fblock_set_corrupt(bs->sc,
400 /* Set up the in-core record and scrub it. */
401 xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
402 return xfs_scrub_bmap_extent(ip, bs->cur, info, &irec);
405 /* Scan the btree records. */
407 xfs_scrub_bmap_btree(
408 struct xfs_scrub_context *sc,
410 struct xfs_scrub_bmap_info *info)
412 struct xfs_owner_info oinfo;
413 struct xfs_mount *mp = sc->mp;
414 struct xfs_inode *ip = sc->ip;
415 struct xfs_btree_cur *cur;
418 cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
419 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
420 error = xfs_scrub_btree(sc, cur, xfs_scrub_bmapbt_rec, &oinfo, info);
421 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR :
426 struct xfs_scrub_bmap_check_rmap_info {
427 struct xfs_scrub_context *sc;
429 struct xfs_iext_cursor icur;
432 /* Can we find bmaps that fit this rmap? */
434 xfs_scrub_bmap_check_rmap(
435 struct xfs_btree_cur *cur,
436 struct xfs_rmap_irec *rec,
439 struct xfs_bmbt_irec irec;
440 struct xfs_scrub_bmap_check_rmap_info *sbcri = priv;
441 struct xfs_ifork *ifp;
442 struct xfs_scrub_context *sc = sbcri->sc;
445 /* Is this even the right fork? */
446 if (rec->rm_owner != sc->ip->i_ino)
448 if ((sbcri->whichfork == XFS_ATTR_FORK) ^
449 !!(rec->rm_flags & XFS_RMAP_ATTR_FORK))
451 if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK)
454 /* Now look up the bmbt record. */
455 ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork);
457 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
461 have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset,
462 &sbcri->icur, &irec);
464 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
467 * bmap extent record lengths are constrained to 2^21 blocks in length
468 * because of space constraints in the on-disk metadata structure.
469 * However, rmap extent record lengths are constrained only by AG
470 * length, so we have to loop through the bmbt to make sure that the
471 * entire rmap is covered by bmbt records.
474 if (irec.br_startoff != rec->rm_offset)
475 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
477 if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
478 cur->bc_private.a.agno, rec->rm_startblock))
479 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
481 if (irec.br_blockcount > rec->rm_blockcount)
482 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
484 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
486 rec->rm_startblock += irec.br_blockcount;
487 rec->rm_offset += irec.br_blockcount;
488 rec->rm_blockcount -= irec.br_blockcount;
489 if (rec->rm_blockcount == 0)
491 have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec);
493 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
498 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
499 return XFS_BTREE_QUERY_RANGE_ABORT;
503 /* Make sure each rmap has a corresponding bmbt entry. */
505 xfs_scrub_bmap_check_ag_rmaps(
506 struct xfs_scrub_context *sc,
510 struct xfs_scrub_bmap_check_rmap_info sbcri;
511 struct xfs_btree_cur *cur;
515 error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf);
519 cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, agno);
526 sbcri.whichfork = whichfork;
527 error = xfs_rmap_query_all(cur, xfs_scrub_bmap_check_rmap, &sbcri);
528 if (error == XFS_BTREE_QUERY_RANGE_ABORT)
531 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
533 xfs_trans_brelse(sc->tp, agf);
537 /* Make sure each rmap has a corresponding bmbt entry. */
539 xfs_scrub_bmap_check_rmaps(
540 struct xfs_scrub_context *sc,
547 if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb) ||
548 whichfork == XFS_COW_FORK ||
549 (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
552 /* Don't support realtime rmap checks yet. */
553 if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK)
557 * Only do this for complex maps that are in btree format, or for
558 * situations where we would seem to have a size but zero extents.
559 * The inode repair code can zap broken iforks, which means we have
560 * to flag this bmap as corrupt if there are rmaps that need to be
565 size = i_size_read(VFS_I(sc->ip));
568 size = XFS_IFORK_Q(sc->ip);
574 if (XFS_IFORK_FORMAT(sc->ip, whichfork) != XFS_DINODE_FMT_BTREE &&
575 (size == 0 || XFS_IFORK_NEXTENTS(sc->ip, whichfork) > 0))
578 for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) {
579 error = xfs_scrub_bmap_check_ag_rmaps(sc, whichfork, agno);
582 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
590 * Scrub an inode fork's block mappings.
592 * First we scan every record in every btree block, if applicable.
593 * Then we unconditionally scan the incore extent cache.
597 struct xfs_scrub_context *sc,
600 struct xfs_bmbt_irec irec;
601 struct xfs_scrub_bmap_info info = { NULL };
602 struct xfs_mount *mp = sc->mp;
603 struct xfs_inode *ip = sc->ip;
604 struct xfs_ifork *ifp;
605 xfs_fileoff_t endoff;
606 struct xfs_iext_cursor icur;
609 ifp = XFS_IFORK_PTR(ip, whichfork);
611 info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip);
612 info.whichfork = whichfork;
613 info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip);
618 /* Non-existent CoW forks are ignorable. */
621 /* No CoW forks on non-reflink inodes/filesystems. */
622 if (!xfs_is_reflink_inode(ip)) {
623 xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
630 if (!xfs_sb_version_hasattr(&mp->m_sb) &&
631 !xfs_sb_version_hasattr2(&mp->m_sb))
632 xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
635 ASSERT(whichfork == XFS_DATA_FORK);
639 /* Check the fork values */
640 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
641 case XFS_DINODE_FMT_UUID:
642 case XFS_DINODE_FMT_DEV:
643 case XFS_DINODE_FMT_LOCAL:
644 /* No mappings to check. */
646 case XFS_DINODE_FMT_EXTENTS:
647 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
648 xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
652 case XFS_DINODE_FMT_BTREE:
653 if (whichfork == XFS_COW_FORK) {
654 xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
658 error = xfs_scrub_bmap_btree(sc, whichfork, &info);
663 xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
667 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
670 /* Now try to scrub the in-memory extent list. */
671 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
672 error = xfs_iread_extents(sc->tp, ip, whichfork);
673 if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error))
677 /* Find the offset of the last extent in the mapping. */
678 error = xfs_bmap_last_offset(ip, &endoff, whichfork);
679 if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error))
682 /* Scrub extent records. */
684 ifp = XFS_IFORK_PTR(ip, whichfork);
685 for_each_xfs_iext(ifp, &icur, &irec) {
686 if (xfs_scrub_should_terminate(sc, &error) ||
687 (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
689 if (isnullstartblock(irec.br_startblock))
691 if (irec.br_startoff >= endoff) {
692 xfs_scrub_fblock_set_corrupt(sc, whichfork,
696 error = xfs_scrub_bmap_extent(ip, NULL, &info, &irec);
702 error = xfs_scrub_bmap_check_rmaps(sc, whichfork);
703 if (!xfs_scrub_fblock_xref_process_error(sc, whichfork, 0, &error))
709 /* Scrub an inode's data fork. */
712 struct xfs_scrub_context *sc)
714 return xfs_scrub_bmap(sc, XFS_DATA_FORK);
717 /* Scrub an inode's attr fork. */
720 struct xfs_scrub_context *sc)
722 return xfs_scrub_bmap(sc, XFS_ATTR_FORK);
725 /* Scrub an inode's CoW fork. */
728 struct xfs_scrub_context *sc)
730 if (!xfs_is_reflink_inode(sc->ip))
733 return xfs_scrub_bmap(sc, XFS_COW_FORK);