1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2018 Red Hat, Inc.
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_trans_resv.h"
15 #include "xfs_mount.h"
16 #include "xfs_btree.h"
17 #include "xfs_alloc_btree.h"
18 #include "xfs_rmap_btree.h"
19 #include "xfs_alloc.h"
20 #include "xfs_ialloc.h"
23 #include "xfs_ag_resv.h"
24 #include "xfs_health.h"
25 #include "xfs_error.h"
27 #include "xfs_defer.h"
28 #include "xfs_log_format.h"
29 #include "xfs_trans.h"
30 #include "xfs_trace.h"
31 #include "xfs_inode.h"
32 #include "xfs_icache.h"
33 #include "xfs_group.h"
36 * xfs_initialize_perag_data
38 * Read in each per-ag structure so we can count up the number of
39 * allocated inodes, free inodes and used filesystem blocks as this
40 * information is no longer persistent in the superblock. Once we have
41 * this information, write it into the in-core superblock structure.
44 xfs_initialize_perag_data(
46 xfs_agnumber_t agcount)
49 struct xfs_perag *pag;
50 struct xfs_sb *sbp = &mp->m_sb;
54 uint64_t bfreelst = 0;
59 for (index = 0; index < agcount; index++) {
61 * Read the AGF and AGI buffers to populate the per-ag
64 pag = xfs_perag_get(mp, index);
65 error = xfs_alloc_read_agf(pag, NULL, 0, NULL);
67 error = xfs_ialloc_read_agi(pag, NULL, 0, NULL);
73 ifree += pag->pagi_freecount;
74 ialloc += pag->pagi_count;
75 bfree += pag->pagf_freeblks;
76 bfreelst += pag->pagf_flcount;
77 btree += pag->pagf_btreeblks;
80 fdblocks = bfree + bfreelst + btree;
83 * If the new summary counts are obviously incorrect, fail the
84 * mount operation because that implies the AGFs are also corrupt.
85 * Clear FS_COUNTERS so that we don't unmount with a dirty log, which
86 * will prevent xfs_repair from fixing anything.
88 if (fdblocks > sbp->sb_dblocks || ifree > ialloc) {
89 xfs_alert(mp, "AGF corruption. Please run xfs_repair.");
90 xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS);
91 error = -EFSCORRUPTED;
95 /* Overwrite incore superblock counters with just-read data */
96 spin_lock(&mp->m_sb_lock);
97 sbp->sb_ifree = ifree;
98 sbp->sb_icount = ialloc;
99 sbp->sb_fdblocks = fdblocks;
100 spin_unlock(&mp->m_sb_lock);
102 xfs_reinit_percpu_counters(mp);
104 xfs_fs_mark_healthy(mp, XFS_SICK_FS_COUNTERS);
110 struct xfs_group *xg)
113 struct xfs_perag *pag = to_perag(xg);
115 cancel_delayed_work_sync(&pag->pag_blockgc_work);
116 xfs_buf_cache_destroy(&pag->pag_bcache);
121 * Free up the per-ag resources within the specified AG range.
124 xfs_free_perag_range(
125 struct xfs_mount *mp,
126 xfs_agnumber_t first_agno,
127 xfs_agnumber_t end_agno)
132 for (agno = first_agno; agno < end_agno; agno++)
133 xfs_group_free(mp, agno, XG_TYPE_AG, xfs_perag_uninit);
136 /* Find the size of the AG, in blocks. */
138 __xfs_ag_block_count(
139 struct xfs_mount *mp,
141 xfs_agnumber_t agcount,
142 xfs_rfsblock_t dblocks)
144 ASSERT(agno < agcount);
146 if (agno < agcount - 1)
147 return mp->m_sb.sb_agblocks;
148 return dblocks - (agno * mp->m_sb.sb_agblocks);
153 struct xfs_mount *mp,
156 return __xfs_ag_block_count(mp, agno, mp->m_sb.sb_agcount,
157 mp->m_sb.sb_dblocks);
160 /* Calculate the first and last possible inode number in an AG. */
163 struct xfs_mount *mp,
171 * Calculate the first inode, which will be in the first
172 * cluster-aligned block after the AGFL.
174 bno = round_up(XFS_AGFL_BLOCK(mp) + 1, M_IGEO(mp)->cluster_align);
175 *first = XFS_AGB_TO_AGINO(mp, bno);
178 * Calculate the last inode, which will be at the end of the
179 * last (aligned) cluster that can be allocated in the AG.
181 bno = round_down(eoag, M_IGEO(mp)->cluster_align);
182 *last = XFS_AGB_TO_AGINO(mp, bno) - 1;
187 struct xfs_mount *mp,
192 return __xfs_agino_range(mp, xfs_ag_block_count(mp, agno), first, last);
196 * Update the perag of the previous tail AG if it has been changed during
197 * recovery (i.e. recovery of a growfs).
200 xfs_update_last_ag_size(
201 struct xfs_mount *mp,
202 xfs_agnumber_t prev_agcount)
204 struct xfs_perag *pag = xfs_perag_grab(mp, prev_agcount - 1);
207 return -EFSCORRUPTED;
208 pag_group(pag)->xg_block_count = __xfs_ag_block_count(mp,
209 prev_agcount - 1, mp->m_sb.sb_agcount,
210 mp->m_sb.sb_dblocks);
211 __xfs_agino_range(mp, pag_group(pag)->xg_block_count, &pag->agino_min,
219 struct xfs_mount *mp,
220 xfs_agnumber_t index,
221 xfs_agnumber_t agcount,
222 xfs_rfsblock_t dblocks)
224 struct xfs_perag *pag;
227 pag = kzalloc(sizeof(*pag), GFP_KERNEL);
232 /* Place kernel structure only init below this point. */
233 spin_lock_init(&pag->pag_ici_lock);
234 INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker);
235 INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
236 #endif /* __KERNEL__ */
238 error = xfs_buf_cache_init(&pag->pag_bcache);
243 * Pre-calculated geometry
245 pag_group(pag)->xg_block_count = __xfs_ag_block_count(mp, index, agcount,
247 pag_group(pag)->xg_min_gbno = XFS_AGFL_BLOCK(mp) + 1;
248 __xfs_agino_range(mp, pag_group(pag)->xg_block_count, &pag->agino_min,
251 error = xfs_group_insert(mp, pag_group(pag), index, XG_TYPE_AG);
253 goto out_buf_cache_destroy;
257 out_buf_cache_destroy:
258 xfs_buf_cache_destroy(&pag->pag_bcache);
265 xfs_initialize_perag(
266 struct xfs_mount *mp,
267 xfs_agnumber_t orig_agcount,
268 xfs_agnumber_t new_agcount,
269 xfs_rfsblock_t dblocks,
270 xfs_agnumber_t *maxagi)
272 xfs_agnumber_t index;
275 if (orig_agcount >= new_agcount)
278 for (index = orig_agcount; index < new_agcount; index++) {
279 error = xfs_perag_alloc(mp, index, new_agcount, dblocks);
281 goto out_unwind_new_pags;
284 *maxagi = xfs_set_inode_alloc(mp, new_agcount);
285 mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
289 xfs_free_perag_range(mp, orig_agcount, index);
295 struct xfs_mount *mp,
298 struct xfs_buf **bpp,
299 const struct xfs_buf_ops *ops)
304 error = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0, &bp);
308 bp->b_maps[0].bm_bn = blkno;
316 * Generic btree root block init function
320 struct xfs_mount *mp,
322 struct aghdr_init_data *id)
324 xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 0, id->agno);
327 /* Finish initializing a free space btree. */
329 xfs_freesp_init_recs(
330 struct xfs_mount *mp,
332 struct aghdr_init_data *id)
334 struct xfs_alloc_rec *arec;
335 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
337 arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
338 arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
340 if (xfs_ag_contains_log(mp, id->agno)) {
341 struct xfs_alloc_rec *nrec;
342 xfs_agblock_t start = XFS_FSB_TO_AGBNO(mp,
343 mp->m_sb.sb_logstart);
345 ASSERT(start >= mp->m_ag_prealloc_blocks);
346 if (start != mp->m_ag_prealloc_blocks) {
348 * Modify first record to pad stripe align of log and
349 * bump the record count.
351 arec->ar_blockcount = cpu_to_be32(start -
352 mp->m_ag_prealloc_blocks);
353 be16_add_cpu(&block->bb_numrecs, 1);
357 * Insert second record at start of internal log
358 * which then gets trimmed.
360 nrec->ar_startblock = cpu_to_be32(
361 be32_to_cpu(arec->ar_startblock) +
362 be32_to_cpu(arec->ar_blockcount));
366 * Change record start to after the internal log
368 be32_add_cpu(&arec->ar_startblock, mp->m_sb.sb_logblocks);
372 * Calculate the block count of this record; if it is nonzero,
373 * increment the record count.
375 arec->ar_blockcount = cpu_to_be32(id->agsize -
376 be32_to_cpu(arec->ar_startblock));
377 if (arec->ar_blockcount)
378 be16_add_cpu(&block->bb_numrecs, 1);
382 * bnobt/cntbt btree root block init functions
386 struct xfs_mount *mp,
388 struct aghdr_init_data *id)
390 xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 0, id->agno);
391 xfs_freesp_init_recs(mp, bp, id);
395 * Reverse map root block init
399 struct xfs_mount *mp,
401 struct aghdr_init_data *id)
403 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
404 struct xfs_rmap_rec *rrec;
406 xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 4, id->agno);
409 * mark the AG header regions as static metadata The BNO
410 * btree block is the first block after the headers, so
411 * it's location defines the size of region the static
414 * Note: unlike mkfs, we never have to account for log
415 * space when growing the data regions
417 rrec = XFS_RMAP_REC_ADDR(block, 1);
418 rrec->rm_startblock = 0;
419 rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp));
420 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS);
423 /* account freespace btree root blocks */
424 rrec = XFS_RMAP_REC_ADDR(block, 2);
425 rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp));
426 rrec->rm_blockcount = cpu_to_be32(2);
427 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
430 /* account inode btree root blocks */
431 rrec = XFS_RMAP_REC_ADDR(block, 3);
432 rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp));
433 rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) -
435 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT);
438 /* account for rmap btree root */
439 rrec = XFS_RMAP_REC_ADDR(block, 4);
440 rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp));
441 rrec->rm_blockcount = cpu_to_be32(1);
442 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
445 /* account for refc btree root */
446 if (xfs_has_reflink(mp)) {
447 rrec = XFS_RMAP_REC_ADDR(block, 5);
448 rrec->rm_startblock = cpu_to_be32(xfs_refc_block(mp));
449 rrec->rm_blockcount = cpu_to_be32(1);
450 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_REFC);
452 be16_add_cpu(&block->bb_numrecs, 1);
455 /* account for the log space */
456 if (xfs_ag_contains_log(mp, id->agno)) {
457 rrec = XFS_RMAP_REC_ADDR(block,
458 be16_to_cpu(block->bb_numrecs) + 1);
459 rrec->rm_startblock = cpu_to_be32(
460 XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart));
461 rrec->rm_blockcount = cpu_to_be32(mp->m_sb.sb_logblocks);
462 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_LOG);
464 be16_add_cpu(&block->bb_numrecs, 1);
469 * Initialise new secondary superblocks with the pre-grow geometry, but mark
470 * them as "in progress" so we know they haven't yet been activated. This will
471 * get cleared when the update with the new geometry information is done after
472 * changes to the primary are committed. This isn't strictly necessary, but we
473 * get it for free with the delayed buffer write lists and it means we can tell
474 * if a grow operation didn't complete properly after the fact.
478 struct xfs_mount *mp,
480 struct aghdr_init_data *id)
482 struct xfs_dsb *dsb = bp->b_addr;
484 xfs_sb_to_disk(dsb, &mp->m_sb);
485 dsb->sb_inprogress = 1;
490 struct xfs_mount *mp,
492 struct aghdr_init_data *id)
494 struct xfs_agf *agf = bp->b_addr;
495 xfs_extlen_t tmpsize;
497 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
498 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
499 agf->agf_seqno = cpu_to_be32(id->agno);
500 agf->agf_length = cpu_to_be32(id->agsize);
501 agf->agf_bno_root = cpu_to_be32(XFS_BNO_BLOCK(mp));
502 agf->agf_cnt_root = cpu_to_be32(XFS_CNT_BLOCK(mp));
503 agf->agf_bno_level = cpu_to_be32(1);
504 agf->agf_cnt_level = cpu_to_be32(1);
505 if (xfs_has_rmapbt(mp)) {
506 agf->agf_rmap_root = cpu_to_be32(XFS_RMAP_BLOCK(mp));
507 agf->agf_rmap_level = cpu_to_be32(1);
508 agf->agf_rmap_blocks = cpu_to_be32(1);
511 agf->agf_flfirst = cpu_to_be32(1);
513 agf->agf_flcount = 0;
514 tmpsize = id->agsize - mp->m_ag_prealloc_blocks;
515 agf->agf_freeblks = cpu_to_be32(tmpsize);
516 agf->agf_longest = cpu_to_be32(tmpsize);
518 uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
519 if (xfs_has_reflink(mp)) {
520 agf->agf_refcount_root = cpu_to_be32(
522 agf->agf_refcount_level = cpu_to_be32(1);
523 agf->agf_refcount_blocks = cpu_to_be32(1);
526 if (xfs_ag_contains_log(mp, id->agno)) {
527 int64_t logblocks = mp->m_sb.sb_logblocks;
529 be32_add_cpu(&agf->agf_freeblks, -logblocks);
530 agf->agf_longest = cpu_to_be32(id->agsize -
531 XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart) - logblocks);
537 struct xfs_mount *mp,
539 struct aghdr_init_data *id)
541 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
545 if (xfs_has_crc(mp)) {
546 agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
547 agfl->agfl_seqno = cpu_to_be32(id->agno);
548 uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
551 agfl_bno = xfs_buf_to_agfl_bno(bp);
552 for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++)
553 agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
558 struct xfs_mount *mp,
560 struct aghdr_init_data *id)
562 struct xfs_agi *agi = bp->b_addr;
565 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
566 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
567 agi->agi_seqno = cpu_to_be32(id->agno);
568 agi->agi_length = cpu_to_be32(id->agsize);
570 agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp));
571 agi->agi_level = cpu_to_be32(1);
572 agi->agi_freecount = 0;
573 agi->agi_newino = cpu_to_be32(NULLAGINO);
574 agi->agi_dirino = cpu_to_be32(NULLAGINO);
576 uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
577 if (xfs_has_finobt(mp)) {
578 agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp));
579 agi->agi_free_level = cpu_to_be32(1);
581 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
582 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
583 if (xfs_has_inobtcounts(mp)) {
584 agi->agi_iblocks = cpu_to_be32(1);
585 if (xfs_has_finobt(mp))
586 agi->agi_fblocks = cpu_to_be32(1);
590 typedef void (*aghdr_init_work_f)(struct xfs_mount *mp, struct xfs_buf *bp,
591 struct aghdr_init_data *id);
594 struct xfs_mount *mp,
595 struct aghdr_init_data *id,
596 aghdr_init_work_f work,
597 const struct xfs_buf_ops *ops)
602 error = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, &bp, ops);
608 xfs_buf_delwri_queue(bp, &id->buffer_list);
613 struct xfs_aghdr_grow_data {
616 const struct xfs_buf_ops *ops;
617 aghdr_init_work_f work;
618 const struct xfs_btree_ops *bc_ops;
623 * Prepare new AG headers to be written to disk. We use uncached buffers here,
624 * as it is assumed these new AG headers are currently beyond the currently
625 * valid filesystem address space. Using cached buffers would trip over EOFS
626 * corruption detection alogrithms in the buffer cache lookup routines.
628 * This is a non-transactional function, but the prepared buffers are added to a
629 * delayed write buffer list supplied by the caller so they can submit them to
630 * disk and wait on them as required.
634 struct xfs_mount *mp,
635 struct aghdr_init_data *id)
638 struct xfs_aghdr_grow_data aghdr_data[] = {
640 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_SB_DADDR),
641 .numblks = XFS_FSS_TO_BB(mp, 1),
642 .ops = &xfs_sb_buf_ops,
643 .work = &xfs_sbblock_init,
647 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGF_DADDR(mp)),
648 .numblks = XFS_FSS_TO_BB(mp, 1),
649 .ops = &xfs_agf_buf_ops,
650 .work = &xfs_agfblock_init,
654 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGFL_DADDR(mp)),
655 .numblks = XFS_FSS_TO_BB(mp, 1),
656 .ops = &xfs_agfl_buf_ops,
657 .work = &xfs_agflblock_init,
661 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGI_DADDR(mp)),
662 .numblks = XFS_FSS_TO_BB(mp, 1),
663 .ops = &xfs_agi_buf_ops,
664 .work = &xfs_agiblock_init,
667 { /* BNO root block */
668 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_BNO_BLOCK(mp)),
669 .numblks = BTOBB(mp->m_sb.sb_blocksize),
670 .ops = &xfs_bnobt_buf_ops,
671 .work = &xfs_bnoroot_init,
672 .bc_ops = &xfs_bnobt_ops,
675 { /* CNT root block */
676 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_CNT_BLOCK(mp)),
677 .numblks = BTOBB(mp->m_sb.sb_blocksize),
678 .ops = &xfs_cntbt_buf_ops,
679 .work = &xfs_bnoroot_init,
680 .bc_ops = &xfs_cntbt_ops,
683 { /* INO root block */
684 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_IBT_BLOCK(mp)),
685 .numblks = BTOBB(mp->m_sb.sb_blocksize),
686 .ops = &xfs_inobt_buf_ops,
687 .work = &xfs_btroot_init,
688 .bc_ops = &xfs_inobt_ops,
691 { /* FINO root block */
692 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_FIBT_BLOCK(mp)),
693 .numblks = BTOBB(mp->m_sb.sb_blocksize),
694 .ops = &xfs_finobt_buf_ops,
695 .work = &xfs_btroot_init,
696 .bc_ops = &xfs_finobt_ops,
697 .need_init = xfs_has_finobt(mp)
699 { /* RMAP root block */
700 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_RMAP_BLOCK(mp)),
701 .numblks = BTOBB(mp->m_sb.sb_blocksize),
702 .ops = &xfs_rmapbt_buf_ops,
703 .work = &xfs_rmaproot_init,
704 .bc_ops = &xfs_rmapbt_ops,
705 .need_init = xfs_has_rmapbt(mp)
707 { /* REFC root block */
708 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, xfs_refc_block(mp)),
709 .numblks = BTOBB(mp->m_sb.sb_blocksize),
710 .ops = &xfs_refcountbt_buf_ops,
711 .work = &xfs_btroot_init,
712 .bc_ops = &xfs_refcountbt_ops,
713 .need_init = xfs_has_reflink(mp)
715 { /* NULL terminating block */
716 .daddr = XFS_BUF_DADDR_NULL,
719 struct xfs_aghdr_grow_data *dp;
722 /* Account for AG free space in new AG */
723 id->nfree += id->agsize - mp->m_ag_prealloc_blocks;
724 for (dp = &aghdr_data[0]; dp->daddr != XFS_BUF_DADDR_NULL; dp++) {
728 id->daddr = dp->daddr;
729 id->numblks = dp->numblks;
730 id->bc_ops = dp->bc_ops;
731 error = xfs_ag_init_hdr(mp, id, dp->work, dp->ops);
740 struct xfs_perag *pag,
741 struct xfs_trans **tpp,
744 struct xfs_mount *mp = pag_mount(pag);
745 struct xfs_alloc_arg args = {
751 .oinfo = XFS_RMAP_OINFO_SKIP_UPDATE,
752 .resv = XFS_AG_RESV_NONE,
755 struct xfs_buf *agibp, *agfbp;
761 ASSERT(pag_agno(pag) == mp->m_sb.sb_agcount - 1);
762 error = xfs_ialloc_read_agi(pag, *tpp, 0, &agibp);
768 error = xfs_alloc_read_agf(pag, *tpp, 0, &agfbp);
773 aglen = be32_to_cpu(agi->agi_length);
774 /* some extra paranoid checks before we shrink the ag */
775 if (XFS_IS_CORRUPT(mp, agf->agf_length != agi->agi_length)) {
776 xfs_ag_mark_sick(pag, XFS_SICK_AG_AGF);
777 return -EFSCORRUPTED;
783 * Make sure that the last inode cluster cannot overlap with the new
784 * end of the AG, even if it's sparse.
786 error = xfs_ialloc_check_shrink(pag, *tpp, agibp, aglen - delta);
791 * Disable perag reservations so it doesn't cause the allocation request
792 * to fail. We'll reestablish reservation before we return.
794 xfs_ag_resv_free(pag);
796 /* internal log shouldn't also show up in the free space btrees */
797 error = xfs_alloc_vextent_exact_bno(&args,
798 xfs_agbno_to_fsb(pag, aglen - delta));
799 if (!error && args.agbno == NULLAGBLOCK)
804 * If extent allocation fails, need to roll the transaction to
805 * ensure that the AGFL fixup has been committed anyway.
807 * We need to hold the AGF across the roll to ensure nothing can
808 * access the AG for allocation until the shrink is fully
809 * cleaned up. And due to the resetting of the AG block
810 * reservation space needing to lock the AGI, we also have to
811 * hold that so we don't get AGI/AGF lock order inversions in
812 * the error handling path.
814 xfs_trans_bhold(*tpp, agfbp);
815 xfs_trans_bhold(*tpp, agibp);
816 err2 = xfs_trans_roll(tpp);
819 xfs_trans_bjoin(*tpp, agfbp);
820 xfs_trans_bjoin(*tpp, agibp);
825 * if successfully deleted from freespace btrees, need to confirm
826 * per-AG reservation works as expected.
828 be32_add_cpu(&agi->agi_length, -delta);
829 be32_add_cpu(&agf->agf_length, -delta);
831 err2 = xfs_ag_resv_init(pag, *tpp);
833 be32_add_cpu(&agi->agi_length, delta);
834 be32_add_cpu(&agf->agf_length, delta);
838 err2 = xfs_free_extent_later(*tpp, args.fsbno, delta, NULL,
839 XFS_AG_RESV_NONE, XFS_FREE_EXTENT_SKIP_DISCARD);
844 * Roll the transaction before trying to re-init the per-ag
845 * reservation. The new transaction is clean so it will cancel
846 * without any side effects.
848 error = xfs_defer_finish(tpp);
856 /* Update perag geometry */
857 pag_group(pag)->xg_block_count -= delta;
858 __xfs_agino_range(mp, pag_group(pag)->xg_block_count, &pag->agino_min,
861 xfs_ialloc_log_agi(*tpp, agibp, XFS_AGI_LENGTH);
862 xfs_alloc_log_agf(*tpp, agfbp, XFS_AGF_LENGTH);
866 err2 = xfs_ag_resv_init(pag, *tpp);
870 xfs_warn(mp, "Error %d reserving per-AG metadata reserve pool.", err2);
871 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
876 * Extent the AG indicated by the @id by the length passed in
880 struct xfs_perag *pag,
881 struct xfs_trans *tp,
884 struct xfs_mount *mp = pag_mount(pag);
890 ASSERT(pag_agno(pag) == mp->m_sb.sb_agcount - 1);
892 error = xfs_ialloc_read_agi(pag, tp, 0, &bp);
897 be32_add_cpu(&agi->agi_length, len);
898 xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
903 error = xfs_alloc_read_agf(pag, tp, 0, &bp);
908 be32_add_cpu(&agf->agf_length, len);
909 ASSERT(agf->agf_length == agi->agi_length);
910 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
913 * Free the new space.
915 * XFS_RMAP_OINFO_SKIP_UPDATE is used here to tell the rmap btree that
916 * this doesn't actually exist in the rmap btree.
918 error = xfs_rmap_free(tp, bp, pag, be32_to_cpu(agf->agf_length) - len,
919 len, &XFS_RMAP_OINFO_SKIP_UPDATE);
923 error = xfs_free_extent(tp, pag, be32_to_cpu(agf->agf_length) - len,
924 len, &XFS_RMAP_OINFO_SKIP_UPDATE, XFS_AG_RESV_NONE);
928 /* Update perag geometry */
929 pag_group(pag)->xg_block_count = be32_to_cpu(agf->agf_length);
930 __xfs_agino_range(mp, pag_group(pag)->xg_block_count, &pag->agino_min,
935 /* Retrieve AG geometry. */
938 struct xfs_perag *pag,
939 struct xfs_ag_geometry *ageo)
941 struct xfs_buf *agi_bp;
942 struct xfs_buf *agf_bp;
945 unsigned int freeblks;
948 /* Lock the AG headers. */
949 error = xfs_ialloc_read_agi(pag, NULL, 0, &agi_bp);
952 error = xfs_alloc_read_agf(pag, NULL, 0, &agf_bp);
957 memset(ageo, 0, sizeof(*ageo));
958 ageo->ag_number = pag_agno(pag);
960 agi = agi_bp->b_addr;
961 ageo->ag_icount = be32_to_cpu(agi->agi_count);
962 ageo->ag_ifree = be32_to_cpu(agi->agi_freecount);
964 agf = agf_bp->b_addr;
965 ageo->ag_length = be32_to_cpu(agf->agf_length);
966 freeblks = pag->pagf_freeblks +
968 pag->pagf_btreeblks -
969 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE);
970 ageo->ag_freeblks = freeblks;
971 xfs_ag_geom_health(pag, ageo);
973 /* Release resources. */
974 xfs_buf_relse(agf_bp);
976 xfs_buf_relse(agi_bp);