]> Git Repo - linux.git/blob - fs/xfs/scrub/bmap.c
MIPS: Simplify FP context initialization
[linux.git] / fs / xfs / scrub / bmap.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2017 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <[email protected]>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_defer.h"
13 #include "xfs_btree.h"
14 #include "xfs_bit.h"
15 #include "xfs_log_format.h"
16 #include "xfs_trans.h"
17 #include "xfs_sb.h"
18 #include "xfs_inode.h"
19 #include "xfs_inode_fork.h"
20 #include "xfs_alloc.h"
21 #include "xfs_rtalloc.h"
22 #include "xfs_bmap.h"
23 #include "xfs_bmap_util.h"
24 #include "xfs_bmap_btree.h"
25 #include "xfs_rmap.h"
26 #include "xfs_rmap_btree.h"
27 #include "xfs_refcount.h"
28 #include "scrub/xfs_scrub.h"
29 #include "scrub/scrub.h"
30 #include "scrub/common.h"
31 #include "scrub/btree.h"
32 #include "scrub/trace.h"
33
34 /* Set us up with an inode's bmap. */
35 int
36 xchk_setup_inode_bmap(
37         struct xfs_scrub        *sc,
38         struct xfs_inode        *ip)
39 {
40         int                     error;
41
42         error = xchk_get_inode(sc, ip);
43         if (error)
44                 goto out;
45
46         sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
47         xfs_ilock(sc->ip, sc->ilock_flags);
48
49         /*
50          * We don't want any ephemeral data fork updates sitting around
51          * while we inspect block mappings, so wait for directio to finish
52          * and flush dirty data if we have delalloc reservations.
53          */
54         if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
55             sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) {
56                 inode_dio_wait(VFS_I(sc->ip));
57                 error = filemap_write_and_wait(VFS_I(sc->ip)->i_mapping);
58                 if (error)
59                         goto out;
60         }
61
62         /* Got the inode, lock it and we're ready to go. */
63         error = xchk_trans_alloc(sc, 0);
64         if (error)
65                 goto out;
66         sc->ilock_flags |= XFS_ILOCK_EXCL;
67         xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
68
69 out:
70         /* scrub teardown will unlock and release the inode */
71         return error;
72 }
73
74 /*
75  * Inode fork block mapping (BMBT) scrubber.
76  * More complex than the others because we have to scrub
77  * all the extents regardless of whether or not the fork
78  * is in btree format.
79  */
80
81 struct xchk_bmap_info {
82         struct xfs_scrub        *sc;
83         xfs_fileoff_t           lastoff;
84         bool                    is_rt;
85         bool                    is_shared;
86         int                     whichfork;
87 };
88
89 /* Look for a corresponding rmap for this irec. */
90 static inline bool
91 xchk_bmap_get_rmap(
92         struct xchk_bmap_info   *info,
93         struct xfs_bmbt_irec    *irec,
94         xfs_agblock_t           agbno,
95         uint64_t                owner,
96         struct xfs_rmap_irec    *rmap)
97 {
98         xfs_fileoff_t           offset;
99         unsigned int            rflags = 0;
100         int                     has_rmap;
101         int                     error;
102
103         if (info->whichfork == XFS_ATTR_FORK)
104                 rflags |= XFS_RMAP_ATTR_FORK;
105
106         /*
107          * CoW staging extents are owned (on disk) by the refcountbt, so
108          * their rmaps do not have offsets.
109          */
110         if (info->whichfork == XFS_COW_FORK)
111                 offset = 0;
112         else
113                 offset = irec->br_startoff;
114
115         /*
116          * If the caller thinks this could be a shared bmbt extent (IOWs,
117          * any data fork extent of a reflink inode) then we have to use the
118          * range rmap lookup to make sure we get the correct owner/offset.
119          */
120         if (info->is_shared) {
121                 error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
122                                 owner, offset, rflags, rmap, &has_rmap);
123                 if (!xchk_should_check_xref(info->sc, &error,
124                                 &info->sc->sa.rmap_cur))
125                         return false;
126                 goto out;
127         }
128
129         /*
130          * Otherwise, use the (faster) regular lookup.
131          */
132         error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner,
133                         offset, rflags, &has_rmap);
134         if (!xchk_should_check_xref(info->sc, &error,
135                         &info->sc->sa.rmap_cur))
136                 return false;
137         if (!has_rmap)
138                 goto out;
139
140         error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap);
141         if (!xchk_should_check_xref(info->sc, &error,
142                         &info->sc->sa.rmap_cur))
143                 return false;
144
145 out:
146         if (!has_rmap)
147                 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
148                         irec->br_startoff);
149         return has_rmap;
150 }
151
152 /* Make sure that we have rmapbt records for this extent. */
153 STATIC void
154 xchk_bmap_xref_rmap(
155         struct xchk_bmap_info   *info,
156         struct xfs_bmbt_irec    *irec,
157         xfs_agblock_t           agbno)
158 {
159         struct xfs_rmap_irec    rmap;
160         unsigned long long      rmap_end;
161         uint64_t                owner;
162
163         if (!info->sc->sa.rmap_cur || xchk_skip_xref(info->sc->sm))
164                 return;
165
166         if (info->whichfork == XFS_COW_FORK)
167                 owner = XFS_RMAP_OWN_COW;
168         else
169                 owner = info->sc->ip->i_ino;
170
171         /* Find the rmap record for this irec. */
172         if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap))
173                 return;
174
175         /* Check the rmap. */
176         rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
177         if (rmap.rm_startblock > agbno ||
178             agbno + irec->br_blockcount > rmap_end)
179                 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
180                                 irec->br_startoff);
181
182         /*
183          * Check the logical offsets if applicable.  CoW staging extents
184          * don't track logical offsets since the mappings only exist in
185          * memory.
186          */
187         if (info->whichfork != XFS_COW_FORK) {
188                 rmap_end = (unsigned long long)rmap.rm_offset +
189                                 rmap.rm_blockcount;
190                 if (rmap.rm_offset > irec->br_startoff ||
191                     irec->br_startoff + irec->br_blockcount > rmap_end)
192                         xchk_fblock_xref_set_corrupt(info->sc,
193                                         info->whichfork, irec->br_startoff);
194         }
195
196         if (rmap.rm_owner != owner)
197                 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
198                                 irec->br_startoff);
199
200         /*
201          * Check for discrepancies between the unwritten flag in the irec and
202          * the rmap.  Note that the (in-memory) CoW fork distinguishes between
203          * unwritten and written extents, but we don't track that in the rmap
204          * records because the blocks are owned (on-disk) by the refcountbt,
205          * which doesn't track unwritten state.
206          */
207         if (owner != XFS_RMAP_OWN_COW &&
208             irec->br_state == XFS_EXT_UNWRITTEN &&
209             !(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
210                 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
211                                 irec->br_startoff);
212
213         if (info->whichfork == XFS_ATTR_FORK &&
214             !(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
215                 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
216                                 irec->br_startoff);
217         if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
218                 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
219                                 irec->br_startoff);
220 }
221
222 /* Cross-reference a single rtdev extent record. */
223 STATIC void
224 xchk_bmap_rt_extent_xref(
225         struct xchk_bmap_info   *info,
226         struct xfs_inode        *ip,
227         struct xfs_btree_cur    *cur,
228         struct xfs_bmbt_irec    *irec)
229 {
230         if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
231                 return;
232
233         xchk_xref_is_used_rt_space(info->sc, irec->br_startblock,
234                         irec->br_blockcount);
235 }
236
237 /* Cross-reference a single datadev extent record. */
238 STATIC void
239 xchk_bmap_extent_xref(
240         struct xchk_bmap_info   *info,
241         struct xfs_inode        *ip,
242         struct xfs_btree_cur    *cur,
243         struct xfs_bmbt_irec    *irec)
244 {
245         struct xfs_mount        *mp = info->sc->mp;
246         xfs_agnumber_t          agno;
247         xfs_agblock_t           agbno;
248         xfs_extlen_t            len;
249         int                     error;
250
251         if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
252                 return;
253
254         agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock);
255         agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
256         len = irec->br_blockcount;
257
258         error = xchk_ag_init(info->sc, agno, &info->sc->sa);
259         if (!xchk_fblock_process_error(info->sc, info->whichfork,
260                         irec->br_startoff, &error))
261                 return;
262
263         xchk_xref_is_used_space(info->sc, agbno, len);
264         xchk_xref_is_not_inode_chunk(info->sc, agbno, len);
265         xchk_bmap_xref_rmap(info, irec, agbno);
266         switch (info->whichfork) {
267         case XFS_DATA_FORK:
268                 if (xfs_is_reflink_inode(info->sc->ip))
269                         break;
270                 /* fall through */
271         case XFS_ATTR_FORK:
272                 xchk_xref_is_not_shared(info->sc, agbno,
273                                 irec->br_blockcount);
274                 break;
275         case XFS_COW_FORK:
276                 xchk_xref_is_cow_staging(info->sc, agbno,
277                                 irec->br_blockcount);
278                 break;
279         }
280
281         xchk_ag_free(info->sc, &info->sc->sa);
282 }
283
284 /* Scrub a single extent record. */
285 STATIC int
286 xchk_bmap_extent(
287         struct xfs_inode        *ip,
288         struct xfs_btree_cur    *cur,
289         struct xchk_bmap_info   *info,
290         struct xfs_bmbt_irec    *irec)
291 {
292         struct xfs_mount        *mp = info->sc->mp;
293         struct xfs_buf          *bp = NULL;
294         xfs_filblks_t           end;
295         int                     error = 0;
296
297         if (cur)
298                 xfs_btree_get_block(cur, 0, &bp);
299
300         /*
301          * Check for out-of-order extents.  This record could have come
302          * from the incore list, for which there is no ordering check.
303          */
304         if (irec->br_startoff < info->lastoff)
305                 xchk_fblock_set_corrupt(info->sc, info->whichfork,
306                                 irec->br_startoff);
307
308         /* There should never be a "hole" extent in either extent list. */
309         if (irec->br_startblock == HOLESTARTBLOCK)
310                 xchk_fblock_set_corrupt(info->sc, info->whichfork,
311                                 irec->br_startoff);
312
313         /*
314          * Check for delalloc extents.  We never iterate the ones in the
315          * in-core extent scan, and we should never see these in the bmbt.
316          */
317         if (isnullstartblock(irec->br_startblock))
318                 xchk_fblock_set_corrupt(info->sc, info->whichfork,
319                                 irec->br_startoff);
320
321         /* Make sure the extent points to a valid place. */
322         if (irec->br_blockcount > MAXEXTLEN)
323                 xchk_fblock_set_corrupt(info->sc, info->whichfork,
324                                 irec->br_startoff);
325         if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock)
326                 xchk_fblock_set_corrupt(info->sc, info->whichfork,
327                                 irec->br_startoff);
328         end = irec->br_startblock + irec->br_blockcount - 1;
329         if (info->is_rt &&
330             (!xfs_verify_rtbno(mp, irec->br_startblock) ||
331              !xfs_verify_rtbno(mp, end)))
332                 xchk_fblock_set_corrupt(info->sc, info->whichfork,
333                                 irec->br_startoff);
334         if (!info->is_rt &&
335             (!xfs_verify_fsbno(mp, irec->br_startblock) ||
336              !xfs_verify_fsbno(mp, end) ||
337              XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
338                                 XFS_FSB_TO_AGNO(mp, end)))
339                 xchk_fblock_set_corrupt(info->sc, info->whichfork,
340                                 irec->br_startoff);
341
342         /* We don't allow unwritten extents on attr forks. */
343         if (irec->br_state == XFS_EXT_UNWRITTEN &&
344             info->whichfork == XFS_ATTR_FORK)
345                 xchk_fblock_set_corrupt(info->sc, info->whichfork,
346                                 irec->br_startoff);
347
348         if (info->is_rt)
349                 xchk_bmap_rt_extent_xref(info, ip, cur, irec);
350         else
351                 xchk_bmap_extent_xref(info, ip, cur, irec);
352
353         info->lastoff = irec->br_startoff + irec->br_blockcount;
354         return error;
355 }
356
357 /* Scrub a bmbt record. */
358 STATIC int
359 xchk_bmapbt_rec(
360         struct xchk_btree       *bs,
361         union xfs_btree_rec     *rec)
362 {
363         struct xfs_bmbt_irec    irec;
364         struct xchk_bmap_info   *info = bs->private;
365         struct xfs_inode        *ip = bs->cur->bc_private.b.ip;
366         struct xfs_buf          *bp = NULL;
367         struct xfs_btree_block  *block;
368         uint64_t                owner;
369         int                     i;
370
371         /*
372          * Check the owners of the btree blocks up to the level below
373          * the root since the verifiers don't do that.
374          */
375         if (xfs_sb_version_hascrc(&bs->cur->bc_mp->m_sb) &&
376             bs->cur->bc_ptrs[0] == 1) {
377                 for (i = 0; i < bs->cur->bc_nlevels - 1; i++) {
378                         block = xfs_btree_get_block(bs->cur, i, &bp);
379                         owner = be64_to_cpu(block->bb_u.l.bb_owner);
380                         if (owner != ip->i_ino)
381                                 xchk_fblock_set_corrupt(bs->sc,
382                                                 info->whichfork, 0);
383                 }
384         }
385
386         /* Set up the in-core record and scrub it. */
387         xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
388         return xchk_bmap_extent(ip, bs->cur, info, &irec);
389 }
390
391 /* Scan the btree records. */
392 STATIC int
393 xchk_bmap_btree(
394         struct xfs_scrub        *sc,
395         int                     whichfork,
396         struct xchk_bmap_info   *info)
397 {
398         struct xfs_owner_info   oinfo;
399         struct xfs_mount        *mp = sc->mp;
400         struct xfs_inode        *ip = sc->ip;
401         struct xfs_btree_cur    *cur;
402         int                     error;
403
404         cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
405         xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
406         error = xchk_btree(sc, cur, xchk_bmapbt_rec, &oinfo, info);
407         xfs_btree_del_cursor(cur, error);
408         return error;
409 }
410
411 struct xchk_bmap_check_rmap_info {
412         struct xfs_scrub        *sc;
413         int                     whichfork;
414         struct xfs_iext_cursor  icur;
415 };
416
417 /* Can we find bmaps that fit this rmap? */
418 STATIC int
419 xchk_bmap_check_rmap(
420         struct xfs_btree_cur            *cur,
421         struct xfs_rmap_irec            *rec,
422         void                            *priv)
423 {
424         struct xfs_bmbt_irec            irec;
425         struct xchk_bmap_check_rmap_info        *sbcri = priv;
426         struct xfs_ifork                *ifp;
427         struct xfs_scrub                *sc = sbcri->sc;
428         bool                            have_map;
429
430         /* Is this even the right fork? */
431         if (rec->rm_owner != sc->ip->i_ino)
432                 return 0;
433         if ((sbcri->whichfork == XFS_ATTR_FORK) ^
434             !!(rec->rm_flags & XFS_RMAP_ATTR_FORK))
435                 return 0;
436         if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK)
437                 return 0;
438
439         /* Now look up the bmbt record. */
440         ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork);
441         if (!ifp) {
442                 xchk_fblock_set_corrupt(sc, sbcri->whichfork,
443                                 rec->rm_offset);
444                 goto out;
445         }
446         have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset,
447                         &sbcri->icur, &irec);
448         if (!have_map)
449                 xchk_fblock_set_corrupt(sc, sbcri->whichfork,
450                                 rec->rm_offset);
451         /*
452          * bmap extent record lengths are constrained to 2^21 blocks in length
453          * because of space constraints in the on-disk metadata structure.
454          * However, rmap extent record lengths are constrained only by AG
455          * length, so we have to loop through the bmbt to make sure that the
456          * entire rmap is covered by bmbt records.
457          */
458         while (have_map) {
459                 if (irec.br_startoff != rec->rm_offset)
460                         xchk_fblock_set_corrupt(sc, sbcri->whichfork,
461                                         rec->rm_offset);
462                 if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
463                                 cur->bc_private.a.agno, rec->rm_startblock))
464                         xchk_fblock_set_corrupt(sc, sbcri->whichfork,
465                                         rec->rm_offset);
466                 if (irec.br_blockcount > rec->rm_blockcount)
467                         xchk_fblock_set_corrupt(sc, sbcri->whichfork,
468                                         rec->rm_offset);
469                 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
470                         break;
471                 rec->rm_startblock += irec.br_blockcount;
472                 rec->rm_offset += irec.br_blockcount;
473                 rec->rm_blockcount -= irec.br_blockcount;
474                 if (rec->rm_blockcount == 0)
475                         break;
476                 have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec);
477                 if (!have_map)
478                         xchk_fblock_set_corrupt(sc, sbcri->whichfork,
479                                         rec->rm_offset);
480         }
481
482 out:
483         if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
484                 return XFS_BTREE_QUERY_RANGE_ABORT;
485         return 0;
486 }
487
488 /* Make sure each rmap has a corresponding bmbt entry. */
489 STATIC int
490 xchk_bmap_check_ag_rmaps(
491         struct xfs_scrub                *sc,
492         int                             whichfork,
493         xfs_agnumber_t                  agno)
494 {
495         struct xchk_bmap_check_rmap_info        sbcri;
496         struct xfs_btree_cur            *cur;
497         struct xfs_buf                  *agf;
498         int                             error;
499
500         error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf);
501         if (error)
502                 return error;
503
504         cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, agno);
505         if (!cur) {
506                 error = -ENOMEM;
507                 goto out_agf;
508         }
509
510         sbcri.sc = sc;
511         sbcri.whichfork = whichfork;
512         error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri);
513         if (error == XFS_BTREE_QUERY_RANGE_ABORT)
514                 error = 0;
515
516         xfs_btree_del_cursor(cur, error);
517 out_agf:
518         xfs_trans_brelse(sc->tp, agf);
519         return error;
520 }
521
522 /* Make sure each rmap has a corresponding bmbt entry. */
523 STATIC int
524 xchk_bmap_check_rmaps(
525         struct xfs_scrub        *sc,
526         int                     whichfork)
527 {
528         loff_t                  size;
529         xfs_agnumber_t          agno;
530         int                     error;
531
532         if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb) ||
533             whichfork == XFS_COW_FORK ||
534             (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
535                 return 0;
536
537         /* Don't support realtime rmap checks yet. */
538         if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK)
539                 return 0;
540
541         /*
542          * Only do this for complex maps that are in btree format, or for
543          * situations where we would seem to have a size but zero extents.
544          * The inode repair code can zap broken iforks, which means we have
545          * to flag this bmap as corrupt if there are rmaps that need to be
546          * reattached.
547          */
548         switch (whichfork) {
549         case XFS_DATA_FORK:
550                 size = i_size_read(VFS_I(sc->ip));
551                 break;
552         case XFS_ATTR_FORK:
553                 size = XFS_IFORK_Q(sc->ip);
554                 break;
555         default:
556                 size = 0;
557                 break;
558         }
559         if (XFS_IFORK_FORMAT(sc->ip, whichfork) != XFS_DINODE_FMT_BTREE &&
560             (size == 0 || XFS_IFORK_NEXTENTS(sc->ip, whichfork) > 0))
561                 return 0;
562
563         for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) {
564                 error = xchk_bmap_check_ag_rmaps(sc, whichfork, agno);
565                 if (error)
566                         return error;
567                 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
568                         break;
569         }
570
571         return 0;
572 }
573
574 /*
575  * Scrub an inode fork's block mappings.
576  *
577  * First we scan every record in every btree block, if applicable.
578  * Then we unconditionally scan the incore extent cache.
579  */
580 STATIC int
581 xchk_bmap(
582         struct xfs_scrub        *sc,
583         int                     whichfork)
584 {
585         struct xfs_bmbt_irec    irec;
586         struct xchk_bmap_info   info = { NULL };
587         struct xfs_mount        *mp = sc->mp;
588         struct xfs_inode        *ip = sc->ip;
589         struct xfs_ifork        *ifp;
590         xfs_fileoff_t           endoff;
591         struct xfs_iext_cursor  icur;
592         int                     error = 0;
593
594         ifp = XFS_IFORK_PTR(ip, whichfork);
595
596         info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip);
597         info.whichfork = whichfork;
598         info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip);
599         info.sc = sc;
600
601         switch (whichfork) {
602         case XFS_COW_FORK:
603                 /* Non-existent CoW forks are ignorable. */
604                 if (!ifp)
605                         goto out;
606                 /* No CoW forks on non-reflink inodes/filesystems. */
607                 if (!xfs_is_reflink_inode(ip)) {
608                         xchk_ino_set_corrupt(sc, sc->ip->i_ino);
609                         goto out;
610                 }
611                 break;
612         case XFS_ATTR_FORK:
613                 if (!ifp)
614                         goto out_check_rmap;
615                 if (!xfs_sb_version_hasattr(&mp->m_sb) &&
616                     !xfs_sb_version_hasattr2(&mp->m_sb))
617                         xchk_ino_set_corrupt(sc, sc->ip->i_ino);
618                 break;
619         default:
620                 ASSERT(whichfork == XFS_DATA_FORK);
621                 break;
622         }
623
624         /* Check the fork values */
625         switch (XFS_IFORK_FORMAT(ip, whichfork)) {
626         case XFS_DINODE_FMT_UUID:
627         case XFS_DINODE_FMT_DEV:
628         case XFS_DINODE_FMT_LOCAL:
629                 /* No mappings to check. */
630                 goto out;
631         case XFS_DINODE_FMT_EXTENTS:
632                 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
633                         xchk_fblock_set_corrupt(sc, whichfork, 0);
634                         goto out;
635                 }
636                 break;
637         case XFS_DINODE_FMT_BTREE:
638                 if (whichfork == XFS_COW_FORK) {
639                         xchk_fblock_set_corrupt(sc, whichfork, 0);
640                         goto out;
641                 }
642
643                 error = xchk_bmap_btree(sc, whichfork, &info);
644                 if (error)
645                         goto out;
646                 break;
647         default:
648                 xchk_fblock_set_corrupt(sc, whichfork, 0);
649                 goto out;
650         }
651
652         if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
653                 goto out;
654
655         /* Now try to scrub the in-memory extent list. */
656         if (!(ifp->if_flags & XFS_IFEXTENTS)) {
657                 error = xfs_iread_extents(sc->tp, ip, whichfork);
658                 if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
659                         goto out;
660         }
661
662         /* Find the offset of the last extent in the mapping. */
663         error = xfs_bmap_last_offset(ip, &endoff, whichfork);
664         if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
665                 goto out;
666
667         /* Scrub extent records. */
668         info.lastoff = 0;
669         ifp = XFS_IFORK_PTR(ip, whichfork);
670         for_each_xfs_iext(ifp, &icur, &irec) {
671                 if (xchk_should_terminate(sc, &error) ||
672                     (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
673                         break;
674                 if (isnullstartblock(irec.br_startblock))
675                         continue;
676                 if (irec.br_startoff >= endoff) {
677                         xchk_fblock_set_corrupt(sc, whichfork,
678                                         irec.br_startoff);
679                         goto out;
680                 }
681                 error = xchk_bmap_extent(ip, NULL, &info, &irec);
682                 if (error)
683                         goto out;
684         }
685
686 out_check_rmap:
687         error = xchk_bmap_check_rmaps(sc, whichfork);
688         if (!xchk_fblock_xref_process_error(sc, whichfork, 0, &error))
689                 goto out;
690 out:
691         return error;
692 }
693
694 /* Scrub an inode's data fork. */
695 int
696 xchk_bmap_data(
697         struct xfs_scrub        *sc)
698 {
699         return xchk_bmap(sc, XFS_DATA_FORK);
700 }
701
702 /* Scrub an inode's attr fork. */
703 int
704 xchk_bmap_attr(
705         struct xfs_scrub        *sc)
706 {
707         return xchk_bmap(sc, XFS_ATTR_FORK);
708 }
709
710 /* Scrub an inode's CoW fork. */
711 int
712 xchk_bmap_cow(
713         struct xfs_scrub        *sc)
714 {
715         if (!xfs_is_reflink_inode(sc->ip))
716                 return -ENOENT;
717
718         return xchk_bmap(sc, XFS_COW_FORK);
719 }
This page took 0.074645 seconds and 4 git commands to generate.