1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_shared.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_defer.h"
15 #include "xfs_btree.h"
17 #include "xfs_alloc_btree.h"
18 #include "xfs_alloc.h"
19 #include "xfs_extent_busy.h"
20 #include "xfs_errortag.h"
21 #include "xfs_error.h"
22 #include "xfs_trace.h"
23 #include "xfs_trans.h"
24 #include "xfs_buf_item.h"
27 #include "xfs_ag_resv.h"
30 struct kmem_cache *xfs_extfree_item_cache;
32 struct workqueue_struct *xfs_alloc_wq;
34 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
36 #define XFSA_FIXUP_BNO_OK 1
37 #define XFSA_FIXUP_CNT_OK 2
39 STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
40 STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
41 STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
44 * Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in
45 * the beginning of the block for a proper header with the location information
52 unsigned int size = mp->m_sb.sb_sectsize;
55 size -= sizeof(struct xfs_agfl);
57 return size / sizeof(xfs_agblock_t);
64 if (xfs_has_rmapbt(mp))
65 return XFS_RMAP_BLOCK(mp) + 1;
66 if (xfs_has_finobt(mp))
67 return XFS_FIBT_BLOCK(mp) + 1;
68 return XFS_IBT_BLOCK(mp) + 1;
75 if (xfs_has_reflink(mp))
76 return xfs_refc_block(mp) + 1;
77 if (xfs_has_rmapbt(mp))
78 return XFS_RMAP_BLOCK(mp) + 1;
79 if (xfs_has_finobt(mp))
80 return XFS_FIBT_BLOCK(mp) + 1;
81 return XFS_IBT_BLOCK(mp) + 1;
85 * The number of blocks per AG that we withhold from xfs_mod_fdblocks to
86 * guarantee that we can refill the AGFL prior to allocating space in a nearly
87 * full AG. Although the the space described by the free space btrees, the
88 * blocks used by the freesp btrees themselves, and the blocks owned by the
89 * AGFL are counted in the ondisk fdblocks, it's a mistake to let the ondisk
90 * free space in the AG drop so low that the free space btrees cannot refill an
91 * empty AGFL up to the minimum level. Rather than grind through empty AGs
92 * until the fs goes down, we subtract this many AG blocks from the incore
93 * fdblocks to ensure user allocation does not overcommit the space the
94 * filesystem needs for the AGFLs. The rmap btree uses a per-AG reservation to
95 * withhold space from xfs_mod_fdblocks, so we do not account for that here.
97 #define XFS_ALLOCBT_AGFL_RESERVE 4
100 * Compute the number of blocks that we set aside to guarantee the ability to
101 * refill the AGFL and handle a full bmap btree split.
103 * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
104 * AGF buffer (PV 947395), we place constraints on the relationship among
105 * actual allocations for data blocks, freelist blocks, and potential file data
106 * bmap btree blocks. However, these restrictions may result in no actual space
107 * allocated for a delayed extent, for example, a data block in a certain AG is
108 * allocated but there is no additional block for the additional bmap btree
109 * block due to a split of the bmap btree of the file. The result of this may
110 * lead to an infinite loop when the file gets flushed to disk and all delayed
111 * extents need to be actually allocated. To get around this, we explicitly set
112 * aside a few blocks which will not be reserved in delayed allocation.
114 * For each AG, we need to reserve enough blocks to replenish a totally empty
115 * AGFL and 4 more to handle a potential split of the file's bmap btree.
119 struct xfs_mount *mp)
121 return mp->m_sb.sb_agcount * (XFS_ALLOCBT_AGFL_RESERVE + 4);
125 * When deciding how much space to allocate out of an AG, we limit the
126 * allocation maximum size to the size the AG. However, we cannot use all the
127 * blocks in the AG - some are permanently used by metadata. These
128 * blocks are generally:
129 * - the AG superblock, AGF, AGI and AGFL
130 * - the AGF (bno and cnt) and AGI btree root blocks, and optionally
131 * the AGI free inode and rmap btree root blocks.
132 * - blocks on the AGFL according to xfs_alloc_set_aside() limits
133 * - the rmapbt root block
135 * The AG headers are sector sized, so the amount of space they take up is
136 * dependent on filesystem geometry. The others are all single blocks.
139 xfs_alloc_ag_max_usable(
140 struct xfs_mount *mp)
144 blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
145 blocks += XFS_ALLOCBT_AGFL_RESERVE;
146 blocks += 3; /* AGF, AGI btree root blocks */
147 if (xfs_has_finobt(mp))
148 blocks++; /* finobt root block */
149 if (xfs_has_rmapbt(mp))
150 blocks++; /* rmap root block */
151 if (xfs_has_reflink(mp))
152 blocks++; /* refcount root block */
154 return mp->m_sb.sb_agblocks - blocks;
158 * Lookup the record equal to [bno, len] in the btree given by cur.
160 STATIC int /* error */
162 struct xfs_btree_cur *cur, /* btree cursor */
163 xfs_agblock_t bno, /* starting block of extent */
164 xfs_extlen_t len, /* length of extent */
165 int *stat) /* success/failure */
169 cur->bc_rec.a.ar_startblock = bno;
170 cur->bc_rec.a.ar_blockcount = len;
171 error = xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
172 cur->bc_ag.abt.active = (*stat == 1);
177 * Lookup the first record greater than or equal to [bno, len]
178 * in the btree given by cur.
182 struct xfs_btree_cur *cur, /* btree cursor */
183 xfs_agblock_t bno, /* starting block of extent */
184 xfs_extlen_t len, /* length of extent */
185 int *stat) /* success/failure */
189 cur->bc_rec.a.ar_startblock = bno;
190 cur->bc_rec.a.ar_blockcount = len;
191 error = xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
192 cur->bc_ag.abt.active = (*stat == 1);
197 * Lookup the first record less than or equal to [bno, len]
198 * in the btree given by cur.
202 struct xfs_btree_cur *cur, /* btree cursor */
203 xfs_agblock_t bno, /* starting block of extent */
204 xfs_extlen_t len, /* length of extent */
205 int *stat) /* success/failure */
208 cur->bc_rec.a.ar_startblock = bno;
209 cur->bc_rec.a.ar_blockcount = len;
210 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
211 cur->bc_ag.abt.active = (*stat == 1);
216 xfs_alloc_cur_active(
217 struct xfs_btree_cur *cur)
219 return cur && cur->bc_ag.abt.active;
223 * Update the record referred to by cur to the value given
225 * This either works (return 0) or gets an EFSCORRUPTED error.
227 STATIC int /* error */
229 struct xfs_btree_cur *cur, /* btree cursor */
230 xfs_agblock_t bno, /* starting block of extent */
231 xfs_extlen_t len) /* length of extent */
233 union xfs_btree_rec rec;
235 rec.alloc.ar_startblock = cpu_to_be32(bno);
236 rec.alloc.ar_blockcount = cpu_to_be32(len);
237 return xfs_btree_update(cur, &rec);
241 * Get the data from the pointed-to record.
245 struct xfs_btree_cur *cur, /* btree cursor */
246 xfs_agblock_t *bno, /* output: starting block of extent */
247 xfs_extlen_t *len, /* output: length of extent */
248 int *stat) /* output: success/failure */
250 struct xfs_mount *mp = cur->bc_mp;
251 xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
252 union xfs_btree_rec *rec;
255 error = xfs_btree_get_rec(cur, &rec, stat);
256 if (error || !(*stat))
259 *bno = be32_to_cpu(rec->alloc.ar_startblock);
260 *len = be32_to_cpu(rec->alloc.ar_blockcount);
265 /* check for valid extent range, including overflow */
266 if (!xfs_verify_agbno(mp, agno, *bno))
268 if (*bno > *bno + *len)
270 if (!xfs_verify_agbno(mp, agno, *bno + *len - 1))
277 "%s Freespace BTree record corruption in AG %d detected!",
278 cur->bc_btnum == XFS_BTNUM_BNO ? "Block" : "Size", agno);
280 "start block 0x%x block count 0x%x", *bno, *len);
281 return -EFSCORRUPTED;
285 * Compute aligned version of the found extent.
286 * Takes alignment and min length into account.
289 xfs_alloc_compute_aligned(
290 xfs_alloc_arg_t *args, /* allocation argument structure */
291 xfs_agblock_t foundbno, /* starting block in found extent */
292 xfs_extlen_t foundlen, /* length in found extent */
293 xfs_agblock_t *resbno, /* result block number */
294 xfs_extlen_t *reslen, /* result length */
297 xfs_agblock_t bno = foundbno;
298 xfs_extlen_t len = foundlen;
302 /* Trim busy sections out of found extent */
303 busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
306 * If we have a largish extent that happens to start before min_agbno,
307 * see if we can shift it into range...
309 if (bno < args->min_agbno && bno + len > args->min_agbno) {
310 diff = args->min_agbno - bno;
317 if (args->alignment > 1 && len >= args->minlen) {
318 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
320 diff = aligned_bno - bno;
322 *resbno = aligned_bno;
323 *reslen = diff >= len ? 0 : len - diff;
333 * Compute best start block and diff for "near" allocations.
334 * freelen >= wantlen already checked by caller.
336 STATIC xfs_extlen_t /* difference value (absolute) */
337 xfs_alloc_compute_diff(
338 xfs_agblock_t wantbno, /* target starting block */
339 xfs_extlen_t wantlen, /* target length */
340 xfs_extlen_t alignment, /* target alignment */
341 int datatype, /* are we allocating data? */
342 xfs_agblock_t freebno, /* freespace's starting block */
343 xfs_extlen_t freelen, /* freespace's length */
344 xfs_agblock_t *newbnop) /* result: best start block from free */
346 xfs_agblock_t freeend; /* end of freespace extent */
347 xfs_agblock_t newbno1; /* return block number */
348 xfs_agblock_t newbno2; /* other new block number */
349 xfs_extlen_t newlen1=0; /* length with newbno1 */
350 xfs_extlen_t newlen2=0; /* length with newbno2 */
351 xfs_agblock_t wantend; /* end of target extent */
352 bool userdata = datatype & XFS_ALLOC_USERDATA;
354 ASSERT(freelen >= wantlen);
355 freeend = freebno + freelen;
356 wantend = wantbno + wantlen;
358 * We want to allocate from the start of a free extent if it is past
359 * the desired block or if we are allocating user data and the free
360 * extent is before desired block. The second case is there to allow
361 * for contiguous allocation from the remaining free space if the file
362 * grows in the short term.
364 if (freebno >= wantbno || (userdata && freeend < wantend)) {
365 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
366 newbno1 = NULLAGBLOCK;
367 } else if (freeend >= wantend && alignment > 1) {
368 newbno1 = roundup(wantbno, alignment);
369 newbno2 = newbno1 - alignment;
370 if (newbno1 >= freeend)
371 newbno1 = NULLAGBLOCK;
373 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
374 if (newbno2 < freebno)
375 newbno2 = NULLAGBLOCK;
377 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
378 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
379 if (newlen1 < newlen2 ||
380 (newlen1 == newlen2 &&
381 XFS_ABSDIFF(newbno1, wantbno) >
382 XFS_ABSDIFF(newbno2, wantbno)))
384 } else if (newbno2 != NULLAGBLOCK)
386 } else if (freeend >= wantend) {
388 } else if (alignment > 1) {
389 newbno1 = roundup(freeend - wantlen, alignment);
390 if (newbno1 > freeend - wantlen &&
391 newbno1 - alignment >= freebno)
392 newbno1 -= alignment;
393 else if (newbno1 >= freeend)
394 newbno1 = NULLAGBLOCK;
396 newbno1 = freeend - wantlen;
398 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
402 * Fix up the length, based on mod and prod.
403 * len should be k * prod + mod for some k.
404 * If len is too small it is returned unchanged.
405 * If len hits maxlen it is left alone.
409 xfs_alloc_arg_t *args) /* allocation argument structure */
414 ASSERT(args->mod < args->prod);
416 ASSERT(rlen >= args->minlen);
417 ASSERT(rlen <= args->maxlen);
418 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
419 (args->mod == 0 && rlen < args->prod))
421 k = rlen % args->prod;
425 rlen = rlen - (k - args->mod);
427 rlen = rlen - args->prod + (args->mod - k);
428 /* casts to (int) catch length underflows */
429 if ((int)rlen < (int)args->minlen)
431 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
432 ASSERT(rlen % args->prod == args->mod);
433 ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
434 rlen + args->minleft);
439 * Update the two btrees, logically removing from freespace the extent
440 * starting at rbno, rlen blocks. The extent is contained within the
441 * actual (current) free extent fbno for flen blocks.
442 * Flags are passed in indicating whether the cursors are set to the
445 STATIC int /* error code */
446 xfs_alloc_fixup_trees(
447 struct xfs_btree_cur *cnt_cur, /* cursor for by-size btree */
448 struct xfs_btree_cur *bno_cur, /* cursor for by-block btree */
449 xfs_agblock_t fbno, /* starting block of free extent */
450 xfs_extlen_t flen, /* length of free extent */
451 xfs_agblock_t rbno, /* starting block of returned extent */
452 xfs_extlen_t rlen, /* length of returned extent */
453 int flags) /* flags, XFSA_FIXUP_... */
455 int error; /* error code */
456 int i; /* operation results */
457 xfs_agblock_t nfbno1; /* first new free startblock */
458 xfs_agblock_t nfbno2; /* second new free startblock */
459 xfs_extlen_t nflen1=0; /* first new free length */
460 xfs_extlen_t nflen2=0; /* second new free length */
461 struct xfs_mount *mp;
466 * Look up the record in the by-size tree if necessary.
468 if (flags & XFSA_FIXUP_CNT_OK) {
470 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
472 if (XFS_IS_CORRUPT(mp,
476 return -EFSCORRUPTED;
479 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
481 if (XFS_IS_CORRUPT(mp, i != 1))
482 return -EFSCORRUPTED;
485 * Look up the record in the by-block tree if necessary.
487 if (flags & XFSA_FIXUP_BNO_OK) {
489 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
491 if (XFS_IS_CORRUPT(mp,
495 return -EFSCORRUPTED;
498 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
500 if (XFS_IS_CORRUPT(mp, i != 1))
501 return -EFSCORRUPTED;
505 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
506 struct xfs_btree_block *bnoblock;
507 struct xfs_btree_block *cntblock;
509 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_levels[0].bp);
510 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_levels[0].bp);
512 if (XFS_IS_CORRUPT(mp,
513 bnoblock->bb_numrecs !=
514 cntblock->bb_numrecs))
515 return -EFSCORRUPTED;
520 * Deal with all four cases: the allocated record is contained
521 * within the freespace record, so we can have new freespace
522 * at either (or both) end, or no freespace remaining.
524 if (rbno == fbno && rlen == flen)
525 nfbno1 = nfbno2 = NULLAGBLOCK;
526 else if (rbno == fbno) {
527 nfbno1 = rbno + rlen;
528 nflen1 = flen - rlen;
529 nfbno2 = NULLAGBLOCK;
530 } else if (rbno + rlen == fbno + flen) {
532 nflen1 = flen - rlen;
533 nfbno2 = NULLAGBLOCK;
536 nflen1 = rbno - fbno;
537 nfbno2 = rbno + rlen;
538 nflen2 = (fbno + flen) - nfbno2;
541 * Delete the entry from the by-size btree.
543 if ((error = xfs_btree_delete(cnt_cur, &i)))
545 if (XFS_IS_CORRUPT(mp, i != 1))
546 return -EFSCORRUPTED;
548 * Add new by-size btree entry(s).
550 if (nfbno1 != NULLAGBLOCK) {
551 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
553 if (XFS_IS_CORRUPT(mp, i != 0))
554 return -EFSCORRUPTED;
555 if ((error = xfs_btree_insert(cnt_cur, &i)))
557 if (XFS_IS_CORRUPT(mp, i != 1))
558 return -EFSCORRUPTED;
560 if (nfbno2 != NULLAGBLOCK) {
561 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
563 if (XFS_IS_CORRUPT(mp, i != 0))
564 return -EFSCORRUPTED;
565 if ((error = xfs_btree_insert(cnt_cur, &i)))
567 if (XFS_IS_CORRUPT(mp, i != 1))
568 return -EFSCORRUPTED;
571 * Fix up the by-block btree entry(s).
573 if (nfbno1 == NULLAGBLOCK) {
575 * No remaining freespace, just delete the by-block tree entry.
577 if ((error = xfs_btree_delete(bno_cur, &i)))
579 if (XFS_IS_CORRUPT(mp, i != 1))
580 return -EFSCORRUPTED;
583 * Update the by-block entry to start later|be shorter.
585 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
588 if (nfbno2 != NULLAGBLOCK) {
590 * 2 resulting free entries, need to add one.
592 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
594 if (XFS_IS_CORRUPT(mp, i != 0))
595 return -EFSCORRUPTED;
596 if ((error = xfs_btree_insert(bno_cur, &i)))
598 if (XFS_IS_CORRUPT(mp, i != 1))
599 return -EFSCORRUPTED;
604 static xfs_failaddr_t
608 struct xfs_mount *mp = bp->b_mount;
609 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
610 __be32 *agfl_bno = xfs_buf_to_agfl_bno(bp);
614 * There is no verification of non-crc AGFLs because mkfs does not
615 * initialise the AGFL to zero or NULL. Hence the only valid part of the
616 * AGFL is what the AGF says is active. We can't get to the AGF, so we
617 * can't verify just those entries are valid.
619 if (!xfs_has_crc(mp))
622 if (!xfs_verify_magic(bp, agfl->agfl_magicnum))
623 return __this_address;
624 if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
625 return __this_address;
627 * during growfs operations, the perag is not fully initialised,
628 * so we can't use it for any useful checking. growfs ensures we can't
629 * use it by using uncached buffers that don't have the perag attached
630 * so we can detect and avoid this problem.
632 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
633 return __this_address;
635 for (i = 0; i < xfs_agfl_size(mp); i++) {
636 if (be32_to_cpu(agfl_bno[i]) != NULLAGBLOCK &&
637 be32_to_cpu(agfl_bno[i]) >= mp->m_sb.sb_agblocks)
638 return __this_address;
641 if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
642 return __this_address;
647 xfs_agfl_read_verify(
650 struct xfs_mount *mp = bp->b_mount;
654 * There is no verification of non-crc AGFLs because mkfs does not
655 * initialise the AGFL to zero or NULL. Hence the only valid part of the
656 * AGFL is what the AGF says is active. We can't get to the AGF, so we
657 * can't verify just those entries are valid.
659 if (!xfs_has_crc(mp))
662 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
663 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
665 fa = xfs_agfl_verify(bp);
667 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
672 xfs_agfl_write_verify(
675 struct xfs_mount *mp = bp->b_mount;
676 struct xfs_buf_log_item *bip = bp->b_log_item;
679 /* no verification of non-crc AGFLs */
680 if (!xfs_has_crc(mp))
683 fa = xfs_agfl_verify(bp);
685 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
690 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
692 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
695 const struct xfs_buf_ops xfs_agfl_buf_ops = {
697 .magic = { cpu_to_be32(XFS_AGFL_MAGIC), cpu_to_be32(XFS_AGFL_MAGIC) },
698 .verify_read = xfs_agfl_read_verify,
699 .verify_write = xfs_agfl_write_verify,
700 .verify_struct = xfs_agfl_verify,
704 * Read in the allocation group free block array.
708 xfs_mount_t *mp, /* mount point structure */
709 xfs_trans_t *tp, /* transaction pointer */
710 xfs_agnumber_t agno, /* allocation group number */
711 struct xfs_buf **bpp) /* buffer for the ag free block array */
713 struct xfs_buf *bp; /* return value */
716 ASSERT(agno != NULLAGNUMBER);
717 error = xfs_trans_read_buf(
718 mp, tp, mp->m_ddev_targp,
719 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
720 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
723 xfs_buf_set_ref(bp, XFS_AGFL_REF);
729 xfs_alloc_update_counters(
730 struct xfs_trans *tp,
731 struct xfs_buf *agbp,
734 struct xfs_agf *agf = agbp->b_addr;
736 agbp->b_pag->pagf_freeblks += len;
737 be32_add_cpu(&agf->agf_freeblks, len);
739 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
740 be32_to_cpu(agf->agf_length))) {
741 xfs_buf_mark_corrupt(agbp);
742 return -EFSCORRUPTED;
745 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
750 * Block allocation algorithm and data structures.
752 struct xfs_alloc_cur {
753 struct xfs_btree_cur *cnt; /* btree cursors */
754 struct xfs_btree_cur *bnolt;
755 struct xfs_btree_cur *bnogt;
756 xfs_extlen_t cur_len;/* current search length */
757 xfs_agblock_t rec_bno;/* extent startblock */
758 xfs_extlen_t rec_len;/* extent length */
759 xfs_agblock_t bno; /* alloc bno */
760 xfs_extlen_t len; /* alloc len */
761 xfs_extlen_t diff; /* diff from search bno */
762 unsigned int busy_gen;/* busy state */
767 * Set up cursors, etc. in the extent allocation cursor. This function can be
768 * called multiple times to reset an initialized structure without having to
769 * reallocate cursors.
773 struct xfs_alloc_arg *args,
774 struct xfs_alloc_cur *acur)
779 ASSERT(args->alignment == 1 || args->type != XFS_ALLOCTYPE_THIS_BNO);
781 acur->cur_len = args->maxlen;
791 * Perform an initial cntbt lookup to check for availability of maxlen
792 * extents. If this fails, we'll return -ENOSPC to signal the caller to
793 * attempt a small allocation.
796 acur->cnt = xfs_allocbt_init_cursor(args->mp, args->tp,
797 args->agbp, args->pag, XFS_BTNUM_CNT);
798 error = xfs_alloc_lookup_ge(acur->cnt, 0, args->maxlen, &i);
803 * Allocate the bnobt left and right search cursors.
806 acur->bnolt = xfs_allocbt_init_cursor(args->mp, args->tp,
807 args->agbp, args->pag, XFS_BTNUM_BNO);
809 acur->bnogt = xfs_allocbt_init_cursor(args->mp, args->tp,
810 args->agbp, args->pag, XFS_BTNUM_BNO);
811 return i == 1 ? 0 : -ENOSPC;
816 struct xfs_alloc_cur *acur,
819 int cur_error = XFS_BTREE_NOERROR;
822 cur_error = XFS_BTREE_ERROR;
825 xfs_btree_del_cursor(acur->cnt, cur_error);
827 xfs_btree_del_cursor(acur->bnolt, cur_error);
829 xfs_btree_del_cursor(acur->bnogt, cur_error);
830 acur->cnt = acur->bnolt = acur->bnogt = NULL;
834 * Check an extent for allocation and track the best available candidate in the
835 * allocation structure. The cursor is deactivated if it has entered an out of
836 * range state based on allocation arguments. Optionally return the extent
837 * extent geometry and allocation status if requested by the caller.
841 struct xfs_alloc_arg *args,
842 struct xfs_alloc_cur *acur,
843 struct xfs_btree_cur *cur,
847 xfs_agblock_t bno, bnoa, bnew;
848 xfs_extlen_t len, lena, diff = -1;
850 unsigned busy_gen = 0;
851 bool deactivate = false;
852 bool isbnobt = cur->bc_btnum == XFS_BTNUM_BNO;
856 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
859 if (XFS_IS_CORRUPT(args->mp, i != 1))
860 return -EFSCORRUPTED;
863 * Check minlen and deactivate a cntbt cursor if out of acceptable size
864 * range (i.e., walking backwards looking for a minlen extent).
866 if (len < args->minlen) {
867 deactivate = !isbnobt;
871 busy = xfs_alloc_compute_aligned(args, bno, len, &bnoa, &lena,
875 acur->busy_gen = busy_gen;
876 /* deactivate a bnobt cursor outside of locality range */
877 if (bnoa < args->min_agbno || bnoa > args->max_agbno) {
878 deactivate = isbnobt;
881 if (lena < args->minlen)
884 args->len = XFS_EXTLEN_MIN(lena, args->maxlen);
885 xfs_alloc_fix_len(args);
886 ASSERT(args->len >= args->minlen);
887 if (args->len < acur->len)
891 * We have an aligned record that satisfies minlen and beats or matches
892 * the candidate extent size. Compare locality for near allocation mode.
894 ASSERT(args->type == XFS_ALLOCTYPE_NEAR_BNO);
895 diff = xfs_alloc_compute_diff(args->agbno, args->len,
896 args->alignment, args->datatype,
898 if (bnew == NULLAGBLOCK)
902 * Deactivate a bnobt cursor with worse locality than the current best.
904 if (diff > acur->diff) {
905 deactivate = isbnobt;
909 ASSERT(args->len > acur->len ||
910 (args->len == acur->len && diff <= acur->diff));
914 acur->len = args->len;
919 * We're done if we found a perfect allocation. This only deactivates
920 * the current cursor, but this is just an optimization to terminate a
921 * cntbt search that otherwise runs to the edge of the tree.
923 if (acur->diff == 0 && acur->len == args->maxlen)
927 cur->bc_ag.abt.active = false;
928 trace_xfs_alloc_cur_check(args->mp, cur->bc_btnum, bno, len, diff,
934 * Complete an allocation of a candidate extent. Remove the extent from both
935 * trees and update the args structure.
938 xfs_alloc_cur_finish(
939 struct xfs_alloc_arg *args,
940 struct xfs_alloc_cur *acur)
942 struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
945 ASSERT(acur->cnt && acur->bnolt);
946 ASSERT(acur->bno >= acur->rec_bno);
947 ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len);
948 ASSERT(acur->rec_bno + acur->rec_len <= be32_to_cpu(agf->agf_length));
950 error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno,
951 acur->rec_len, acur->bno, acur->len, 0);
955 args->agbno = acur->bno;
956 args->len = acur->len;
959 trace_xfs_alloc_cur(args);
964 * Locality allocation lookup algorithm. This expects a cntbt cursor and uses
965 * bno optimized lookup to search for extents with ideal size and locality.
968 xfs_alloc_cntbt_iter(
969 struct xfs_alloc_arg *args,
970 struct xfs_alloc_cur *acur)
972 struct xfs_btree_cur *cur = acur->cnt;
974 xfs_extlen_t len, cur_len;
978 if (!xfs_alloc_cur_active(cur))
981 /* locality optimized lookup */
982 cur_len = acur->cur_len;
983 error = xfs_alloc_lookup_ge(cur, args->agbno, cur_len, &i);
988 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
992 /* check the current record and update search length from it */
993 error = xfs_alloc_cur_check(args, acur, cur, &i);
996 ASSERT(len >= acur->cur_len);
1000 * We looked up the first record >= [agbno, len] above. The agbno is a
1001 * secondary key and so the current record may lie just before or after
1002 * agbno. If it is past agbno, check the previous record too so long as
1003 * the length matches as it may be closer. Don't check a smaller record
1004 * because that could deactivate our cursor.
1006 if (bno > args->agbno) {
1007 error = xfs_btree_decrement(cur, 0, &i);
1009 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
1010 if (!error && i && len == acur->cur_len)
1011 error = xfs_alloc_cur_check(args, acur, cur,
1019 * Increment the search key until we find at least one allocation
1020 * candidate or if the extent we found was larger. Otherwise, double the
1021 * search key to optimize the search. Efficiency is more important here
1022 * than absolute best locality.
1025 if (!acur->len || acur->cur_len >= cur_len)
1028 acur->cur_len = cur_len;
1034 * Deal with the case where only small freespaces remain. Either return the
1035 * contents of the last freespace record, or allocate space from the freelist if
1036 * there is nothing in the tree.
1038 STATIC int /* error */
1039 xfs_alloc_ag_vextent_small(
1040 struct xfs_alloc_arg *args, /* allocation argument structure */
1041 struct xfs_btree_cur *ccur, /* optional by-size cursor */
1042 xfs_agblock_t *fbnop, /* result block number */
1043 xfs_extlen_t *flenp, /* result length */
1044 int *stat) /* status: 0-freelist, 1-normal/none */
1046 struct xfs_agf *agf = args->agbp->b_addr;
1048 xfs_agblock_t fbno = NULLAGBLOCK;
1049 xfs_extlen_t flen = 0;
1053 * If a cntbt cursor is provided, try to allocate the largest record in
1054 * the tree. Try the AGFL if the cntbt is empty, otherwise fail the
1055 * allocation. Make sure to respect minleft even when pulling from the
1059 error = xfs_btree_decrement(ccur, 0, &i);
1063 error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i);
1066 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1067 error = -EFSCORRUPTED;
1073 if (args->minlen != 1 || args->alignment != 1 ||
1074 args->resv == XFS_AG_RESV_AGFL ||
1075 be32_to_cpu(agf->agf_flcount) <= args->minleft)
1078 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1081 if (fbno == NULLAGBLOCK)
1084 xfs_extent_busy_reuse(args->mp, args->pag, fbno, 1,
1085 (args->datatype & XFS_ALLOC_NOBUSY));
1087 if (args->datatype & XFS_ALLOC_USERDATA) {
1090 error = xfs_trans_get_buf(args->tp, args->mp->m_ddev_targp,
1091 XFS_AGB_TO_DADDR(args->mp, args->agno, fbno),
1092 args->mp->m_bsize, 0, &bp);
1095 xfs_trans_binval(args->tp, bp);
1097 *fbnop = args->agbno = fbno;
1098 *flenp = args->len = 1;
1099 if (XFS_IS_CORRUPT(args->mp, fbno >= be32_to_cpu(agf->agf_length))) {
1100 error = -EFSCORRUPTED;
1103 args->wasfromfl = 1;
1104 trace_xfs_alloc_small_freelist(args);
1107 * If we're feeding an AGFL block to something that doesn't live in the
1108 * free space, we need to clear out the OWN_AG rmap.
1110 error = xfs_rmap_free(args->tp, args->agbp, args->pag, fbno, 1,
1111 &XFS_RMAP_OINFO_AG);
1120 * Can't do the allocation, give up.
1122 if (flen < args->minlen) {
1123 args->agbno = NULLAGBLOCK;
1124 trace_xfs_alloc_small_notenough(args);
1130 trace_xfs_alloc_small_done(args);
1134 trace_xfs_alloc_small_error(args);
1139 * Allocate a variable extent in the allocation group agno.
1140 * Type and bno are used to determine where in the allocation group the
1141 * extent will start.
1142 * Extent's length (returned in *len) will be between minlen and maxlen,
1143 * and of the form k * prod + mod unless there's nothing that large.
1144 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1146 STATIC int /* error */
1147 xfs_alloc_ag_vextent(
1148 xfs_alloc_arg_t *args) /* argument structure for allocation */
1152 ASSERT(args->minlen > 0);
1153 ASSERT(args->maxlen > 0);
1154 ASSERT(args->minlen <= args->maxlen);
1155 ASSERT(args->mod < args->prod);
1156 ASSERT(args->alignment > 0);
1159 * Branch to correct routine based on the type.
1161 args->wasfromfl = 0;
1162 switch (args->type) {
1163 case XFS_ALLOCTYPE_THIS_AG:
1164 error = xfs_alloc_ag_vextent_size(args);
1166 case XFS_ALLOCTYPE_NEAR_BNO:
1167 error = xfs_alloc_ag_vextent_near(args);
1169 case XFS_ALLOCTYPE_THIS_BNO:
1170 error = xfs_alloc_ag_vextent_exact(args);
1177 if (error || args->agbno == NULLAGBLOCK)
1180 ASSERT(args->len >= args->minlen);
1181 ASSERT(args->len <= args->maxlen);
1182 ASSERT(!args->wasfromfl || args->resv != XFS_AG_RESV_AGFL);
1183 ASSERT(args->agbno % args->alignment == 0);
1185 /* if not file data, insert new block into the reverse map btree */
1186 if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
1187 error = xfs_rmap_alloc(args->tp, args->agbp, args->pag,
1188 args->agbno, args->len, &args->oinfo);
1193 if (!args->wasfromfl) {
1194 error = xfs_alloc_update_counters(args->tp, args->agbp,
1195 -((long)(args->len)));
1199 ASSERT(!xfs_extent_busy_search(args->mp, args->pag,
1200 args->agbno, args->len));
1203 xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
1205 XFS_STATS_INC(args->mp, xs_allocx);
1206 XFS_STATS_ADD(args->mp, xs_allocb, args->len);
1211 * Allocate a variable extent at exactly agno/bno.
1212 * Extent's length (returned in *len) will be between minlen and maxlen,
1213 * and of the form k * prod + mod unless there's nothing that large.
1214 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
1216 STATIC int /* error */
1217 xfs_alloc_ag_vextent_exact(
1218 xfs_alloc_arg_t *args) /* allocation argument structure */
1220 struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
1221 struct xfs_btree_cur *bno_cur;/* by block-number btree cursor */
1222 struct xfs_btree_cur *cnt_cur;/* by count btree cursor */
1224 xfs_agblock_t fbno; /* start block of found extent */
1225 xfs_extlen_t flen; /* length of found extent */
1226 xfs_agblock_t tbno; /* start block of busy extent */
1227 xfs_extlen_t tlen; /* length of busy extent */
1228 xfs_agblock_t tend; /* end block of busy extent */
1229 int i; /* success/failure of operation */
1232 ASSERT(args->alignment == 1);
1235 * Allocate/initialize a cursor for the by-number freespace btree.
1237 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1238 args->pag, XFS_BTNUM_BNO);
1241 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
1242 * Look for the closest free block <= bno, it must contain bno
1243 * if any free block does.
1245 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
1252 * Grab the freespace record.
1254 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
1257 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1258 error = -EFSCORRUPTED;
1261 ASSERT(fbno <= args->agbno);
1264 * Check for overlapping busy extents.
1268 xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
1271 * Give up if the start of the extent is busy, or the freespace isn't
1272 * long enough for the minimum request.
1274 if (tbno > args->agbno)
1276 if (tlen < args->minlen)
1279 if (tend < args->agbno + args->minlen)
1283 * End of extent will be smaller of the freespace end and the
1284 * maximal requested end.
1286 * Fix the length according to mod and prod if given.
1288 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
1290 xfs_alloc_fix_len(args);
1291 ASSERT(args->agbno + args->len <= tend);
1294 * We are allocating agbno for args->len
1295 * Allocate/initialize a cursor for the by-size btree.
1297 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1298 args->pag, XFS_BTNUM_CNT);
1299 ASSERT(args->agbno + args->len <= be32_to_cpu(agf->agf_length));
1300 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
1301 args->len, XFSA_FIXUP_BNO_OK);
1303 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1307 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1308 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1310 args->wasfromfl = 0;
1311 trace_xfs_alloc_exact_done(args);
1315 /* Didn't find it, return null. */
1316 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1317 args->agbno = NULLAGBLOCK;
1318 trace_xfs_alloc_exact_notfound(args);
1322 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1323 trace_xfs_alloc_exact_error(args);
1328 * Search a given number of btree records in a given direction. Check each
1329 * record against the good extent we've already found.
1332 xfs_alloc_walk_iter(
1333 struct xfs_alloc_arg *args,
1334 struct xfs_alloc_cur *acur,
1335 struct xfs_btree_cur *cur,
1337 bool find_one, /* quit on first candidate */
1338 int count, /* rec count (-1 for infinite) */
1347 * Search so long as the cursor is active or we find a better extent.
1348 * The cursor is deactivated if it extends beyond the range of the
1349 * current allocation candidate.
1351 while (xfs_alloc_cur_active(cur) && count) {
1352 error = xfs_alloc_cur_check(args, acur, cur, &i);
1360 if (!xfs_alloc_cur_active(cur))
1364 error = xfs_btree_increment(cur, 0, &i);
1366 error = xfs_btree_decrement(cur, 0, &i);
1370 cur->bc_ag.abt.active = false;
1380 * Search the by-bno and by-size btrees in parallel in search of an extent with
1381 * ideal locality based on the NEAR mode ->agbno locality hint.
1384 xfs_alloc_ag_vextent_locality(
1385 struct xfs_alloc_arg *args,
1386 struct xfs_alloc_cur *acur,
1389 struct xfs_btree_cur *fbcur = NULL;
1394 ASSERT(acur->len == 0);
1395 ASSERT(args->type == XFS_ALLOCTYPE_NEAR_BNO);
1399 error = xfs_alloc_lookup_ge(acur->cnt, args->agbno, acur->cur_len, &i);
1402 error = xfs_alloc_lookup_le(acur->bnolt, args->agbno, 0, &i);
1405 error = xfs_alloc_lookup_ge(acur->bnogt, args->agbno, 0, &i);
1410 * Search the bnobt and cntbt in parallel. Search the bnobt left and
1411 * right and lookup the closest extent to the locality hint for each
1412 * extent size key in the cntbt. The entire search terminates
1413 * immediately on a bnobt hit because that means we've found best case
1414 * locality. Otherwise the search continues until the cntbt cursor runs
1415 * off the end of the tree. If no allocation candidate is found at this
1416 * point, give up on locality, walk backwards from the end of the cntbt
1417 * and take the first available extent.
1419 * The parallel tree searches balance each other out to provide fairly
1420 * consistent performance for various situations. The bnobt search can
1421 * have pathological behavior in the worst case scenario of larger
1422 * allocation requests and fragmented free space. On the other hand, the
1423 * bnobt is able to satisfy most smaller allocation requests much more
1424 * quickly than the cntbt. The cntbt search can sift through fragmented
1425 * free space and sets of free extents for larger allocation requests
1426 * more quickly than the bnobt. Since the locality hint is just a hint
1427 * and we don't want to scan the entire bnobt for perfect locality, the
1428 * cntbt search essentially bounds the bnobt search such that we can
1429 * find good enough locality at reasonable performance in most cases.
1431 while (xfs_alloc_cur_active(acur->bnolt) ||
1432 xfs_alloc_cur_active(acur->bnogt) ||
1433 xfs_alloc_cur_active(acur->cnt)) {
1435 trace_xfs_alloc_cur_lookup(args);
1438 * Search the bnobt left and right. In the case of a hit, finish
1439 * the search in the opposite direction and we're done.
1441 error = xfs_alloc_walk_iter(args, acur, acur->bnolt, false,
1446 trace_xfs_alloc_cur_left(args);
1447 fbcur = acur->bnogt;
1451 error = xfs_alloc_walk_iter(args, acur, acur->bnogt, true, true,
1456 trace_xfs_alloc_cur_right(args);
1457 fbcur = acur->bnolt;
1463 * Check the extent with best locality based on the current
1464 * extent size search key and keep track of the best candidate.
1466 error = xfs_alloc_cntbt_iter(args, acur);
1469 if (!xfs_alloc_cur_active(acur->cnt)) {
1470 trace_xfs_alloc_cur_lookup_done(args);
1476 * If we failed to find anything due to busy extents, return empty
1477 * handed so the caller can flush and retry. If no busy extents were
1478 * found, walk backwards from the end of the cntbt as a last resort.
1480 if (!xfs_alloc_cur_active(acur->cnt) && !acur->len && !acur->busy) {
1481 error = xfs_btree_decrement(acur->cnt, 0, &i);
1485 acur->cnt->bc_ag.abt.active = true;
1492 * Search in the opposite direction for a better entry in the case of
1493 * a bnobt hit or walk backwards from the end of the cntbt.
1496 error = xfs_alloc_walk_iter(args, acur, fbcur, fbinc, true, -1,
1508 /* Check the last block of the cnt btree for allocations. */
1510 xfs_alloc_ag_vextent_lastblock(
1511 struct xfs_alloc_arg *args,
1512 struct xfs_alloc_cur *acur,
1521 /* Randomly don't execute the first algorithm. */
1522 if (prandom_u32() & 1)
1527 * Start from the entry that lookup found, sequence through all larger
1528 * free blocks. If we're actually pointing at a record smaller than
1529 * maxlen, go to the start of this block, and skip all those smaller
1532 if (*len || args->alignment > 1) {
1533 acur->cnt->bc_levels[0].ptr = 1;
1535 error = xfs_alloc_get_rec(acur->cnt, bno, len, &i);
1538 if (XFS_IS_CORRUPT(args->mp, i != 1))
1539 return -EFSCORRUPTED;
1540 if (*len >= args->minlen)
1542 error = xfs_btree_increment(acur->cnt, 0, &i);
1546 ASSERT(*len >= args->minlen);
1551 error = xfs_alloc_walk_iter(args, acur, acur->cnt, true, false, -1, &i);
1556 * It didn't work. We COULD be in a case where there's a good record
1557 * somewhere, so try again.
1562 trace_xfs_alloc_near_first(args);
1568 * Allocate a variable extent near bno in the allocation group agno.
1569 * Extent's length (returned in len) will be between minlen and maxlen,
1570 * and of the form k * prod + mod unless there's nothing that large.
1571 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1574 xfs_alloc_ag_vextent_near(
1575 struct xfs_alloc_arg *args)
1577 struct xfs_alloc_cur acur = {};
1578 int error; /* error code */
1579 int i; /* result code, temporary */
1583 /* handle uninitialized agbno range so caller doesn't have to */
1584 if (!args->min_agbno && !args->max_agbno)
1585 args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1586 ASSERT(args->min_agbno <= args->max_agbno);
1588 /* clamp agbno to the range if it's outside */
1589 if (args->agbno < args->min_agbno)
1590 args->agbno = args->min_agbno;
1591 if (args->agbno > args->max_agbno)
1592 args->agbno = args->max_agbno;
1598 * Set up cursors and see if there are any free extents as big as
1599 * maxlen. If not, pick the last entry in the tree unless the tree is
1602 error = xfs_alloc_cur_setup(args, &acur);
1603 if (error == -ENOSPC) {
1604 error = xfs_alloc_ag_vextent_small(args, acur.cnt, &bno,
1608 if (i == 0 || len == 0) {
1609 trace_xfs_alloc_near_noentry(args);
1619 * If the requested extent is large wrt the freespaces available
1620 * in this a.g., then the cursor will be pointing to a btree entry
1621 * near the right edge of the tree. If it's in the last btree leaf
1622 * block, then we just examine all the entries in that block
1623 * that are big enough, and pick the best one.
1625 if (xfs_btree_islastblock(acur.cnt, 0)) {
1626 bool allocated = false;
1628 error = xfs_alloc_ag_vextent_lastblock(args, &acur, &bno, &len,
1637 * Second algorithm. Combined cntbt and bnobt search to find ideal
1640 error = xfs_alloc_ag_vextent_locality(args, &acur, &i);
1645 * If we couldn't get anything, give up.
1649 trace_xfs_alloc_near_busy(args);
1650 xfs_extent_busy_flush(args->mp, args->pag,
1654 trace_xfs_alloc_size_neither(args);
1655 args->agbno = NULLAGBLOCK;
1660 /* fix up btrees on a successful allocation */
1661 error = xfs_alloc_cur_finish(args, &acur);
1664 xfs_alloc_cur_close(&acur, error);
1669 * Allocate a variable extent anywhere in the allocation group agno.
1670 * Extent's length (returned in len) will be between minlen and maxlen,
1671 * and of the form k * prod + mod unless there's nothing that large.
1672 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1674 STATIC int /* error */
1675 xfs_alloc_ag_vextent_size(
1676 xfs_alloc_arg_t *args) /* allocation argument structure */
1678 struct xfs_agf *agf = args->agbp->b_addr;
1679 struct xfs_btree_cur *bno_cur; /* cursor for bno btree */
1680 struct xfs_btree_cur *cnt_cur; /* cursor for cnt btree */
1681 int error; /* error result */
1682 xfs_agblock_t fbno; /* start of found freespace */
1683 xfs_extlen_t flen; /* length of found freespace */
1684 int i; /* temp status variable */
1685 xfs_agblock_t rbno; /* returned block number */
1686 xfs_extlen_t rlen; /* length of returned extent */
1692 * Allocate and initialize a cursor for the by-size btree.
1694 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1695 args->pag, XFS_BTNUM_CNT);
1699 * Look for an entry >= maxlen+alignment-1 blocks.
1701 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1702 args->maxlen + args->alignment - 1, &i)))
1706 * If none then we have to settle for a smaller extent. In the case that
1707 * there are no large extents, this will return the last entry in the
1708 * tree unless the tree is empty. In the case that there are only busy
1709 * large extents, this will return the largest small extent unless there
1710 * are no smaller extents available.
1713 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1717 if (i == 0 || flen == 0) {
1718 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1719 trace_xfs_alloc_size_noentry(args);
1723 busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1727 * Search for a non-busy extent that is large enough.
1730 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1733 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1734 error = -EFSCORRUPTED;
1738 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1739 &rbno, &rlen, &busy_gen);
1741 if (rlen >= args->maxlen)
1744 error = xfs_btree_increment(cnt_cur, 0, &i);
1749 * Our only valid extents must have been busy.
1750 * Make it unbusy by forcing the log out and
1753 xfs_btree_del_cursor(cnt_cur,
1755 trace_xfs_alloc_size_busy(args);
1756 xfs_extent_busy_flush(args->mp,
1757 args->pag, busy_gen);
1764 * In the first case above, we got the last entry in the
1765 * by-size btree. Now we check to see if the space hits maxlen
1766 * once aligned; if not, we search left for something better.
1767 * This can't happen in the second case above.
1769 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1770 if (XFS_IS_CORRUPT(args->mp,
1773 rbno + rlen > fbno + flen))) {
1774 error = -EFSCORRUPTED;
1777 if (rlen < args->maxlen) {
1778 xfs_agblock_t bestfbno;
1779 xfs_extlen_t bestflen;
1780 xfs_agblock_t bestrbno;
1781 xfs_extlen_t bestrlen;
1788 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1792 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1795 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1796 error = -EFSCORRUPTED;
1799 if (flen < bestrlen)
1801 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1802 &rbno, &rlen, &busy_gen);
1803 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1804 if (XFS_IS_CORRUPT(args->mp,
1807 rbno + rlen > fbno + flen))) {
1808 error = -EFSCORRUPTED;
1811 if (rlen > bestrlen) {
1816 if (rlen == args->maxlen)
1820 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1823 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1824 error = -EFSCORRUPTED;
1832 args->wasfromfl = 0;
1834 * Fix up the length.
1837 if (rlen < args->minlen) {
1839 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1840 trace_xfs_alloc_size_busy(args);
1841 xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
1846 xfs_alloc_fix_len(args);
1849 if (XFS_IS_CORRUPT(args->mp, rlen > flen)) {
1850 error = -EFSCORRUPTED;
1854 * Allocate and initialize a cursor for the by-block tree.
1856 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1857 args->pag, XFS_BTNUM_BNO);
1858 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1859 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1861 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1862 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1863 cnt_cur = bno_cur = NULL;
1866 if (XFS_IS_CORRUPT(args->mp,
1867 args->agbno + args->len >
1868 be32_to_cpu(agf->agf_length))) {
1869 error = -EFSCORRUPTED;
1872 trace_xfs_alloc_size_done(args);
1876 trace_xfs_alloc_size_error(args);
1878 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1880 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1884 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1885 trace_xfs_alloc_size_nominleft(args);
1886 args->agbno = NULLAGBLOCK;
1891 * Free the extent starting at agno/bno for length.
1895 struct xfs_trans *tp,
1896 struct xfs_buf *agbp,
1897 xfs_agnumber_t agno,
1900 const struct xfs_owner_info *oinfo,
1901 enum xfs_ag_resv_type type)
1903 struct xfs_mount *mp;
1904 struct xfs_btree_cur *bno_cur;
1905 struct xfs_btree_cur *cnt_cur;
1906 xfs_agblock_t gtbno; /* start of right neighbor */
1907 xfs_extlen_t gtlen; /* length of right neighbor */
1908 xfs_agblock_t ltbno; /* start of left neighbor */
1909 xfs_extlen_t ltlen; /* length of left neighbor */
1910 xfs_agblock_t nbno; /* new starting block of freesp */
1911 xfs_extlen_t nlen; /* new length of freespace */
1912 int haveleft; /* have a left neighbor */
1913 int haveright; /* have a right neighbor */
1916 struct xfs_perag *pag = agbp->b_pag;
1918 bno_cur = cnt_cur = NULL;
1921 if (!xfs_rmap_should_skip_owner_update(oinfo)) {
1922 error = xfs_rmap_free(tp, agbp, pag, bno, len, oinfo);
1928 * Allocate and initialize a cursor for the by-block btree.
1930 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_BNO);
1932 * Look for a neighboring block on the left (lower block numbers)
1933 * that is contiguous with this space.
1935 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1939 * There is a block to our left.
1941 if ((error = xfs_alloc_get_rec(bno_cur, <bno, <len, &i)))
1943 if (XFS_IS_CORRUPT(mp, i != 1)) {
1944 error = -EFSCORRUPTED;
1948 * It's not contiguous, though.
1950 if (ltbno + ltlen < bno)
1954 * If this failure happens the request to free this
1955 * space was invalid, it's (partly) already free.
1958 if (XFS_IS_CORRUPT(mp, ltbno + ltlen > bno)) {
1959 error = -EFSCORRUPTED;
1965 * Look for a neighboring block on the right (higher block numbers)
1966 * that is contiguous with this space.
1968 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1972 * There is a block to our right.
1974 if ((error = xfs_alloc_get_rec(bno_cur, >bno, >len, &i)))
1976 if (XFS_IS_CORRUPT(mp, i != 1)) {
1977 error = -EFSCORRUPTED;
1981 * It's not contiguous, though.
1983 if (bno + len < gtbno)
1987 * If this failure happens the request to free this
1988 * space was invalid, it's (partly) already free.
1991 if (XFS_IS_CORRUPT(mp, bno + len > gtbno)) {
1992 error = -EFSCORRUPTED;
1998 * Now allocate and initialize a cursor for the by-size tree.
2000 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_CNT);
2002 * Have both left and right contiguous neighbors.
2003 * Merge all three into a single free block.
2005 if (haveleft && haveright) {
2007 * Delete the old by-size entry on the left.
2009 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2011 if (XFS_IS_CORRUPT(mp, i != 1)) {
2012 error = -EFSCORRUPTED;
2015 if ((error = xfs_btree_delete(cnt_cur, &i)))
2017 if (XFS_IS_CORRUPT(mp, i != 1)) {
2018 error = -EFSCORRUPTED;
2022 * Delete the old by-size entry on the right.
2024 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2026 if (XFS_IS_CORRUPT(mp, i != 1)) {
2027 error = -EFSCORRUPTED;
2030 if ((error = xfs_btree_delete(cnt_cur, &i)))
2032 if (XFS_IS_CORRUPT(mp, i != 1)) {
2033 error = -EFSCORRUPTED;
2037 * Delete the old by-block entry for the right block.
2039 if ((error = xfs_btree_delete(bno_cur, &i)))
2041 if (XFS_IS_CORRUPT(mp, i != 1)) {
2042 error = -EFSCORRUPTED;
2046 * Move the by-block cursor back to the left neighbor.
2048 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2050 if (XFS_IS_CORRUPT(mp, i != 1)) {
2051 error = -EFSCORRUPTED;
2056 * Check that this is the right record: delete didn't
2057 * mangle the cursor.
2060 xfs_agblock_t xxbno;
2063 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
2066 if (XFS_IS_CORRUPT(mp,
2070 error = -EFSCORRUPTED;
2076 * Update remaining by-block entry to the new, joined block.
2079 nlen = len + ltlen + gtlen;
2080 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2084 * Have only a left contiguous neighbor.
2085 * Merge it together with the new freespace.
2087 else if (haveleft) {
2089 * Delete the old by-size entry on the left.
2091 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2093 if (XFS_IS_CORRUPT(mp, i != 1)) {
2094 error = -EFSCORRUPTED;
2097 if ((error = xfs_btree_delete(cnt_cur, &i)))
2099 if (XFS_IS_CORRUPT(mp, i != 1)) {
2100 error = -EFSCORRUPTED;
2104 * Back up the by-block cursor to the left neighbor, and
2105 * update its length.
2107 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2109 if (XFS_IS_CORRUPT(mp, i != 1)) {
2110 error = -EFSCORRUPTED;
2115 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2119 * Have only a right contiguous neighbor.
2120 * Merge it together with the new freespace.
2122 else if (haveright) {
2124 * Delete the old by-size entry on the right.
2126 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2128 if (XFS_IS_CORRUPT(mp, i != 1)) {
2129 error = -EFSCORRUPTED;
2132 if ((error = xfs_btree_delete(cnt_cur, &i)))
2134 if (XFS_IS_CORRUPT(mp, i != 1)) {
2135 error = -EFSCORRUPTED;
2139 * Update the starting block and length of the right
2140 * neighbor in the by-block tree.
2144 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2148 * No contiguous neighbors.
2149 * Insert the new freespace into the by-block tree.
2154 if ((error = xfs_btree_insert(bno_cur, &i)))
2156 if (XFS_IS_CORRUPT(mp, i != 1)) {
2157 error = -EFSCORRUPTED;
2161 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
2164 * In all cases we need to insert the new freespace in the by-size tree.
2166 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
2168 if (XFS_IS_CORRUPT(mp, i != 0)) {
2169 error = -EFSCORRUPTED;
2172 if ((error = xfs_btree_insert(cnt_cur, &i)))
2174 if (XFS_IS_CORRUPT(mp, i != 1)) {
2175 error = -EFSCORRUPTED;
2178 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
2182 * Update the freespace totals in the ag and superblock.
2184 error = xfs_alloc_update_counters(tp, agbp, len);
2185 xfs_ag_resv_free_extent(agbp->b_pag, type, tp, len);
2189 XFS_STATS_INC(mp, xs_freex);
2190 XFS_STATS_ADD(mp, xs_freeb, len);
2192 trace_xfs_free_extent(mp, agno, bno, len, type, haveleft, haveright);
2197 trace_xfs_free_extent(mp, agno, bno, len, type, -1, -1);
2199 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
2201 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
2206 * Visible (exported) allocation/free functions.
2207 * Some of these are used just by xfs_alloc_btree.c and this file.
2211 * Compute and fill in value of m_alloc_maxlevels.
2214 xfs_alloc_compute_maxlevels(
2215 xfs_mount_t *mp) /* file system mount structure */
2217 mp->m_alloc_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
2218 (mp->m_sb.sb_agblocks + 1) / 2);
2219 ASSERT(mp->m_alloc_maxlevels <= xfs_allocbt_maxlevels_ondisk());
2223 * Find the length of the longest extent in an AG. The 'need' parameter
2224 * specifies how much space we're going to need for the AGFL and the
2225 * 'reserved' parameter tells us how many blocks in this AG are reserved for
2229 xfs_alloc_longest_free_extent(
2230 struct xfs_perag *pag,
2232 xfs_extlen_t reserved)
2234 xfs_extlen_t delta = 0;
2237 * If the AGFL needs a recharge, we'll have to subtract that from the
2240 if (need > pag->pagf_flcount)
2241 delta = need - pag->pagf_flcount;
2244 * If we cannot maintain others' reservations with space from the
2245 * not-longest freesp extents, we'll have to subtract /that/ from
2246 * the longest extent too.
2248 if (pag->pagf_freeblks - pag->pagf_longest < reserved)
2249 delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
2252 * If the longest extent is long enough to satisfy all the
2253 * reservations and AGFL rules in place, we can return this extent.
2255 if (pag->pagf_longest > delta)
2256 return min_t(xfs_extlen_t, pag->pag_mount->m_ag_max_usable,
2257 pag->pagf_longest - delta);
2259 /* Otherwise, let the caller try for 1 block if there's space. */
2260 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
2264 * Compute the minimum length of the AGFL in the given AG. If @pag is NULL,
2265 * return the largest possible minimum length.
2268 xfs_alloc_min_freelist(
2269 struct xfs_mount *mp,
2270 struct xfs_perag *pag)
2272 /* AG btrees have at least 1 level. */
2273 static const uint8_t fake_levels[XFS_BTNUM_AGF] = {1, 1, 1};
2274 const uint8_t *levels = pag ? pag->pagf_levels : fake_levels;
2275 unsigned int min_free;
2277 ASSERT(mp->m_alloc_maxlevels > 0);
2279 /* space needed by-bno freespace btree */
2280 min_free = min_t(unsigned int, levels[XFS_BTNUM_BNOi] + 1,
2281 mp->m_alloc_maxlevels);
2282 /* space needed by-size freespace btree */
2283 min_free += min_t(unsigned int, levels[XFS_BTNUM_CNTi] + 1,
2284 mp->m_alloc_maxlevels);
2285 /* space needed reverse mapping used space btree */
2286 if (xfs_has_rmapbt(mp))
2287 min_free += min_t(unsigned int, levels[XFS_BTNUM_RMAPi] + 1,
2288 mp->m_rmap_maxlevels);
2294 * Check if the operation we are fixing up the freelist for should go ahead or
2295 * not. If we are freeing blocks, we always allow it, otherwise the allocation
2296 * is dependent on whether the size and shape of free space available will
2297 * permit the requested allocation to take place.
2300 xfs_alloc_space_available(
2301 struct xfs_alloc_arg *args,
2302 xfs_extlen_t min_free,
2305 struct xfs_perag *pag = args->pag;
2306 xfs_extlen_t alloc_len, longest;
2307 xfs_extlen_t reservation; /* blocks that are still reserved */
2309 xfs_extlen_t agflcount;
2311 if (flags & XFS_ALLOC_FLAG_FREEING)
2314 reservation = xfs_ag_resv_needed(pag, args->resv);
2316 /* do we have enough contiguous free space for the allocation? */
2317 alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2318 longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
2319 if (longest < alloc_len)
2323 * Do we have enough free space remaining for the allocation? Don't
2324 * account extra agfl blocks because we are about to defer free them,
2325 * making them unavailable until the current transaction commits.
2327 agflcount = min_t(xfs_extlen_t, pag->pagf_flcount, min_free);
2328 available = (int)(pag->pagf_freeblks + agflcount -
2329 reservation - min_free - args->minleft);
2330 if (available < (int)max(args->total, alloc_len))
2334 * Clamp maxlen to the amount of free space available for the actual
2335 * extent allocation.
2337 if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2338 args->maxlen = available;
2339 ASSERT(args->maxlen > 0);
2340 ASSERT(args->maxlen >= args->minlen);
2347 xfs_free_agfl_block(
2348 struct xfs_trans *tp,
2349 xfs_agnumber_t agno,
2350 xfs_agblock_t agbno,
2351 struct xfs_buf *agbp,
2352 struct xfs_owner_info *oinfo)
2357 error = xfs_free_ag_extent(tp, agbp, agno, agbno, 1, oinfo,
2362 error = xfs_trans_get_buf(tp, tp->t_mountp->m_ddev_targp,
2363 XFS_AGB_TO_DADDR(tp->t_mountp, agno, agbno),
2364 tp->t_mountp->m_bsize, 0, &bp);
2367 xfs_trans_binval(tp, bp);
2373 * Check the agfl fields of the agf for inconsistency or corruption. The purpose
2374 * is to detect an agfl header padding mismatch between current and early v5
2375 * kernels. This problem manifests as a 1-slot size difference between the
2376 * on-disk flcount and the active [first, last] range of a wrapped agfl. This
2377 * may also catch variants of agfl count corruption unrelated to padding. Either
2378 * way, we'll reset the agfl and warn the user.
2380 * Return true if a reset is required before the agfl can be used, false
2384 xfs_agfl_needs_reset(
2385 struct xfs_mount *mp,
2386 struct xfs_agf *agf)
2388 uint32_t f = be32_to_cpu(agf->agf_flfirst);
2389 uint32_t l = be32_to_cpu(agf->agf_fllast);
2390 uint32_t c = be32_to_cpu(agf->agf_flcount);
2391 int agfl_size = xfs_agfl_size(mp);
2394 /* no agfl header on v4 supers */
2395 if (!xfs_has_crc(mp))
2399 * The agf read verifier catches severe corruption of these fields.
2400 * Repeat some sanity checks to cover a packed -> unpacked mismatch if
2401 * the verifier allows it.
2403 if (f >= agfl_size || l >= agfl_size)
2409 * Check consistency between the on-disk count and the active range. An
2410 * agfl padding mismatch manifests as an inconsistent flcount.
2415 active = agfl_size - f + l + 1;
2423 * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
2424 * agfl content cannot be trusted. Warn the user that a repair is required to
2425 * recover leaked blocks.
2427 * The purpose of this mechanism is to handle filesystems affected by the agfl
2428 * header padding mismatch problem. A reset keeps the filesystem online with a
2429 * relatively minor free space accounting inconsistency rather than suffer the
2430 * inevitable crash from use of an invalid agfl block.
2434 struct xfs_trans *tp,
2435 struct xfs_buf *agbp,
2436 struct xfs_perag *pag)
2438 struct xfs_mount *mp = tp->t_mountp;
2439 struct xfs_agf *agf = agbp->b_addr;
2441 ASSERT(pag->pagf_agflreset);
2442 trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
2445 "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
2446 "Please unmount and run xfs_repair.",
2447 pag->pag_agno, pag->pagf_flcount);
2449 agf->agf_flfirst = 0;
2450 agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
2451 agf->agf_flcount = 0;
2452 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
2455 pag->pagf_flcount = 0;
2456 pag->pagf_agflreset = false;
2460 * Defer an AGFL block free. This is effectively equivalent to
2461 * xfs_free_extent_later() with some special handling particular to AGFL blocks.
2463 * Deferring AGFL frees helps prevent log reservation overruns due to too many
2464 * allocation operations in a transaction. AGFL frees are prone to this problem
2465 * because for one they are always freed one at a time. Further, an immediate
2466 * AGFL block free can cause a btree join and require another block free before
2467 * the real allocation can proceed. Deferring the free disconnects freeing up
2468 * the AGFL slot from freeing the block.
2471 xfs_defer_agfl_block(
2472 struct xfs_trans *tp,
2473 xfs_agnumber_t agno,
2474 xfs_fsblock_t agbno,
2475 struct xfs_owner_info *oinfo)
2477 struct xfs_mount *mp = tp->t_mountp;
2478 struct xfs_extent_free_item *new; /* new element */
2480 ASSERT(xfs_extfree_item_cache != NULL);
2481 ASSERT(oinfo != NULL);
2483 new = kmem_cache_zalloc(xfs_extfree_item_cache,
2484 GFP_KERNEL | __GFP_NOFAIL);
2485 new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
2486 new->xefi_blockcount = 1;
2487 new->xefi_owner = oinfo->oi_owner;
2489 trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
2491 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list);
2495 * Add the extent to the list of extents to be free at transaction end.
2496 * The list is maintained sorted (by block number).
2499 __xfs_free_extent_later(
2500 struct xfs_trans *tp,
2503 const struct xfs_owner_info *oinfo,
2506 struct xfs_extent_free_item *new; /* new element */
2508 struct xfs_mount *mp = tp->t_mountp;
2509 xfs_agnumber_t agno;
2510 xfs_agblock_t agbno;
2512 ASSERT(bno != NULLFSBLOCK);
2514 ASSERT(len <= MAXEXTLEN);
2515 ASSERT(!isnullstartblock(bno));
2516 agno = XFS_FSB_TO_AGNO(mp, bno);
2517 agbno = XFS_FSB_TO_AGBNO(mp, bno);
2518 ASSERT(agno < mp->m_sb.sb_agcount);
2519 ASSERT(agbno < mp->m_sb.sb_agblocks);
2520 ASSERT(len < mp->m_sb.sb_agblocks);
2521 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
2523 ASSERT(xfs_extfree_item_cache != NULL);
2525 new = kmem_cache_zalloc(xfs_extfree_item_cache,
2526 GFP_KERNEL | __GFP_NOFAIL);
2527 new->xefi_startblock = bno;
2528 new->xefi_blockcount = (xfs_extlen_t)len;
2530 new->xefi_flags |= XFS_EFI_SKIP_DISCARD;
2532 ASSERT(oinfo->oi_offset == 0);
2534 if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK)
2535 new->xefi_flags |= XFS_EFI_ATTR_FORK;
2536 if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK)
2537 new->xefi_flags |= XFS_EFI_BMBT_BLOCK;
2538 new->xefi_owner = oinfo->oi_owner;
2540 new->xefi_owner = XFS_RMAP_OWN_NULL;
2542 trace_xfs_bmap_free_defer(tp->t_mountp,
2543 XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0,
2544 XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len);
2545 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
2550 * Check if an AGF has a free extent record whose length is equal to
2554 xfs_exact_minlen_extent_available(
2555 struct xfs_alloc_arg *args,
2556 struct xfs_buf *agbp,
2559 struct xfs_btree_cur *cnt_cur;
2564 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, agbp,
2565 args->pag, XFS_BTNUM_CNT);
2566 error = xfs_alloc_lookup_ge(cnt_cur, 0, args->minlen, stat);
2571 error = -EFSCORRUPTED;
2575 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, stat);
2579 if (*stat == 1 && flen != args->minlen)
2583 xfs_btree_del_cursor(cnt_cur, error);
2590 * Decide whether to use this allocation group for this allocation.
2591 * If so, fix up the btree freelist's size.
2594 xfs_alloc_fix_freelist(
2595 struct xfs_alloc_arg *args, /* allocation argument structure */
2596 int flags) /* XFS_ALLOC_FLAG_... */
2598 struct xfs_mount *mp = args->mp;
2599 struct xfs_perag *pag = args->pag;
2600 struct xfs_trans *tp = args->tp;
2601 struct xfs_buf *agbp = NULL;
2602 struct xfs_buf *agflbp = NULL;
2603 struct xfs_alloc_arg targs; /* local allocation arguments */
2604 xfs_agblock_t bno; /* freelist block */
2605 xfs_extlen_t need; /* total blocks needed in freelist */
2608 /* deferred ops (AGFL block frees) require permanent transactions */
2609 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
2611 if (!pag->pagf_init) {
2612 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2614 /* Couldn't lock the AGF so skip this AG. */
2615 if (error == -EAGAIN)
2622 * If this is a metadata preferred pag and we are user data then try
2623 * somewhere else if we are not being asked to try harder at this
2626 if (pag->pagf_metadata && (args->datatype & XFS_ALLOC_USERDATA) &&
2627 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2628 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2629 goto out_agbp_relse;
2632 need = xfs_alloc_min_freelist(mp, pag);
2633 if (!xfs_alloc_space_available(args, need, flags |
2634 XFS_ALLOC_FLAG_CHECK))
2635 goto out_agbp_relse;
2638 * Get the a.g. freespace buffer.
2639 * Can fail if we're not blocking on locks, and it's held.
2642 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2644 /* Couldn't lock the AGF so skip this AG. */
2645 if (error == -EAGAIN)
2651 /* reset a padding mismatched agfl before final free space check */
2652 if (pag->pagf_agflreset)
2653 xfs_agfl_reset(tp, agbp, pag);
2655 /* If there isn't enough total space or single-extent, reject it. */
2656 need = xfs_alloc_min_freelist(mp, pag);
2657 if (!xfs_alloc_space_available(args, need, flags))
2658 goto out_agbp_relse;
2661 if (args->alloc_minlen_only) {
2664 error = xfs_exact_minlen_extent_available(args, agbp, &stat);
2666 goto out_agbp_relse;
2670 * Make the freelist shorter if it's too long.
2672 * Note that from this point onwards, we will always release the agf and
2673 * agfl buffers on error. This handles the case where we error out and
2674 * the buffers are clean or may not have been joined to the transaction
2675 * and hence need to be released manually. If they have been joined to
2676 * the transaction, then xfs_trans_brelse() will handle them
2677 * appropriately based on the recursion count and dirty state of the
2680 * XXX (dgc): When we have lots of free space, does this buy us
2681 * anything other than extra overhead when we need to put more blocks
2682 * back on the free list? Maybe we should only do this when space is
2683 * getting low or the AGFL is more than half full?
2685 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2686 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2687 * updating the rmapbt. Both flags are used in xfs_repair while we're
2688 * rebuilding the rmapbt, and neither are used by the kernel. They're
2689 * both required to ensure that rmaps are correctly recorded for the
2690 * regenerated AGFL, bnobt, and cntbt. See repair/phase5.c and
2691 * repair/rmap.c in xfsprogs for details.
2693 memset(&targs, 0, sizeof(targs));
2694 /* struct copy below */
2695 if (flags & XFS_ALLOC_FLAG_NORMAP)
2696 targs.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
2698 targs.oinfo = XFS_RMAP_OINFO_AG;
2699 while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
2700 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
2702 goto out_agbp_relse;
2704 /* defer agfl frees */
2705 xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo);
2711 targs.agno = args->agno;
2712 targs.alignment = targs.minlen = targs.prod = 1;
2713 targs.type = XFS_ALLOCTYPE_THIS_AG;
2715 error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp);
2717 goto out_agbp_relse;
2719 /* Make the freelist longer if it's too short. */
2720 while (pag->pagf_flcount < need) {
2722 targs.maxlen = need - pag->pagf_flcount;
2723 targs.resv = XFS_AG_RESV_AGFL;
2725 /* Allocate as many blocks as possible at once. */
2726 error = xfs_alloc_ag_vextent(&targs);
2728 goto out_agflbp_relse;
2731 * Stop if we run out. Won't happen if callers are obeying
2732 * the restrictions correctly. Can happen for free calls
2733 * on a completely full ag.
2735 if (targs.agbno == NULLAGBLOCK) {
2736 if (flags & XFS_ALLOC_FLAG_FREEING)
2738 goto out_agflbp_relse;
2741 * Put each allocated block on the list.
2743 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
2744 error = xfs_alloc_put_freelist(tp, agbp,
2747 goto out_agflbp_relse;
2750 xfs_trans_brelse(tp, agflbp);
2755 xfs_trans_brelse(tp, agflbp);
2758 xfs_trans_brelse(tp, agbp);
2765 * Get a block from the freelist.
2766 * Returns with the buffer for the block gotten.
2769 xfs_alloc_get_freelist(
2770 struct xfs_trans *tp,
2771 struct xfs_buf *agbp,
2772 xfs_agblock_t *bnop,
2775 struct xfs_agf *agf = agbp->b_addr;
2776 struct xfs_buf *agflbp;
2781 struct xfs_mount *mp = tp->t_mountp;
2782 struct xfs_perag *pag;
2785 * Freelist is empty, give up.
2787 if (!agf->agf_flcount) {
2788 *bnop = NULLAGBLOCK;
2792 * Read the array of free blocks.
2794 error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno),
2801 * Get the block number and update the data structures.
2803 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
2804 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
2805 be32_add_cpu(&agf->agf_flfirst, 1);
2806 xfs_trans_brelse(tp, agflbp);
2807 if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
2808 agf->agf_flfirst = 0;
2811 ASSERT(!pag->pagf_agflreset);
2812 be32_add_cpu(&agf->agf_flcount, -1);
2813 pag->pagf_flcount--;
2815 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2817 be32_add_cpu(&agf->agf_btreeblks, 1);
2818 pag->pagf_btreeblks++;
2819 logflags |= XFS_AGF_BTREEBLKS;
2822 xfs_alloc_log_agf(tp, agbp, logflags);
2829 * Log the given fields from the agf structure.
2833 xfs_trans_t *tp, /* transaction pointer */
2834 struct xfs_buf *bp, /* buffer for a.g. freelist header */
2835 int fields) /* mask of fields to be logged (XFS_AGF_...) */
2837 int first; /* first byte offset */
2838 int last; /* last byte offset */
2839 static const short offsets[] = {
2840 offsetof(xfs_agf_t, agf_magicnum),
2841 offsetof(xfs_agf_t, agf_versionnum),
2842 offsetof(xfs_agf_t, agf_seqno),
2843 offsetof(xfs_agf_t, agf_length),
2844 offsetof(xfs_agf_t, agf_roots[0]),
2845 offsetof(xfs_agf_t, agf_levels[0]),
2846 offsetof(xfs_agf_t, agf_flfirst),
2847 offsetof(xfs_agf_t, agf_fllast),
2848 offsetof(xfs_agf_t, agf_flcount),
2849 offsetof(xfs_agf_t, agf_freeblks),
2850 offsetof(xfs_agf_t, agf_longest),
2851 offsetof(xfs_agf_t, agf_btreeblks),
2852 offsetof(xfs_agf_t, agf_uuid),
2853 offsetof(xfs_agf_t, agf_rmap_blocks),
2854 offsetof(xfs_agf_t, agf_refcount_blocks),
2855 offsetof(xfs_agf_t, agf_refcount_root),
2856 offsetof(xfs_agf_t, agf_refcount_level),
2857 /* needed so that we don't log the whole rest of the structure: */
2858 offsetof(xfs_agf_t, agf_spare64),
2862 trace_xfs_agf(tp->t_mountp, bp->b_addr, fields, _RET_IP_);
2864 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
2866 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2867 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2871 * Interface for inode allocation to force the pag data to be initialized.
2874 xfs_alloc_pagf_init(
2875 xfs_mount_t *mp, /* file system mount structure */
2876 xfs_trans_t *tp, /* transaction pointer */
2877 xfs_agnumber_t agno, /* allocation group number */
2878 int flags) /* XFS_ALLOC_FLAGS_... */
2883 error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp);
2885 xfs_trans_brelse(tp, bp);
2890 * Put the block on the freelist for the allocation group.
2893 xfs_alloc_put_freelist(
2894 struct xfs_trans *tp,
2895 struct xfs_buf *agbp,
2896 struct xfs_buf *agflbp,
2900 struct xfs_mount *mp = tp->t_mountp;
2901 struct xfs_agf *agf = agbp->b_addr;
2902 struct xfs_perag *pag;
2909 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
2910 be32_to_cpu(agf->agf_seqno), &agflbp)))
2912 be32_add_cpu(&agf->agf_fllast, 1);
2913 if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
2914 agf->agf_fllast = 0;
2917 ASSERT(!pag->pagf_agflreset);
2918 be32_add_cpu(&agf->agf_flcount, 1);
2919 pag->pagf_flcount++;
2921 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2923 be32_add_cpu(&agf->agf_btreeblks, -1);
2924 pag->pagf_btreeblks--;
2925 logflags |= XFS_AGF_BTREEBLKS;
2928 xfs_alloc_log_agf(tp, agbp, logflags);
2930 ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
2932 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
2933 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
2934 *blockp = cpu_to_be32(bno);
2935 startoff = (char *)blockp - (char *)agflbp->b_addr;
2937 xfs_alloc_log_agf(tp, agbp, logflags);
2939 xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
2940 xfs_trans_log_buf(tp, agflbp, startoff,
2941 startoff + sizeof(xfs_agblock_t) - 1);
2945 static xfs_failaddr_t
2949 struct xfs_mount *mp = bp->b_mount;
2950 struct xfs_agf *agf = bp->b_addr;
2952 if (xfs_has_crc(mp)) {
2953 if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
2954 return __this_address;
2955 if (!xfs_log_check_lsn(mp, be64_to_cpu(agf->agf_lsn)))
2956 return __this_address;
2959 if (!xfs_verify_magic(bp, agf->agf_magicnum))
2960 return __this_address;
2962 if (!(XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2963 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2964 be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
2965 be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
2966 be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
2967 return __this_address;
2969 if (be32_to_cpu(agf->agf_length) > mp->m_sb.sb_dblocks)
2970 return __this_address;
2972 if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) ||
2973 be32_to_cpu(agf->agf_freeblks) > be32_to_cpu(agf->agf_length))
2974 return __this_address;
2976 if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
2977 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
2978 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) >
2979 mp->m_alloc_maxlevels ||
2980 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) >
2981 mp->m_alloc_maxlevels)
2982 return __this_address;
2984 if (xfs_has_rmapbt(mp) &&
2985 (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
2986 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) >
2987 mp->m_rmap_maxlevels))
2988 return __this_address;
2990 if (xfs_has_rmapbt(mp) &&
2991 be32_to_cpu(agf->agf_rmap_blocks) > be32_to_cpu(agf->agf_length))
2992 return __this_address;
2995 * during growfs operations, the perag is not fully initialised,
2996 * so we can't use it for any useful checking. growfs ensures we can't
2997 * use it by using uncached buffers that don't have the perag attached
2998 * so we can detect and avoid this problem.
3000 if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
3001 return __this_address;
3003 if (xfs_has_lazysbcount(mp) &&
3004 be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
3005 return __this_address;
3007 if (xfs_has_reflink(mp) &&
3008 be32_to_cpu(agf->agf_refcount_blocks) >
3009 be32_to_cpu(agf->agf_length))
3010 return __this_address;
3012 if (xfs_has_reflink(mp) &&
3013 (be32_to_cpu(agf->agf_refcount_level) < 1 ||
3014 be32_to_cpu(agf->agf_refcount_level) > mp->m_refc_maxlevels))
3015 return __this_address;
3022 xfs_agf_read_verify(
3025 struct xfs_mount *mp = bp->b_mount;
3028 if (xfs_has_crc(mp) &&
3029 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
3030 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
3032 fa = xfs_agf_verify(bp);
3033 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
3034 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
3039 xfs_agf_write_verify(
3042 struct xfs_mount *mp = bp->b_mount;
3043 struct xfs_buf_log_item *bip = bp->b_log_item;
3044 struct xfs_agf *agf = bp->b_addr;
3047 fa = xfs_agf_verify(bp);
3049 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
3053 if (!xfs_has_crc(mp))
3057 agf->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
3059 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
3062 const struct xfs_buf_ops xfs_agf_buf_ops = {
3064 .magic = { cpu_to_be32(XFS_AGF_MAGIC), cpu_to_be32(XFS_AGF_MAGIC) },
3065 .verify_read = xfs_agf_read_verify,
3066 .verify_write = xfs_agf_write_verify,
3067 .verify_struct = xfs_agf_verify,
3071 * Read in the allocation group header (free/alloc section).
3075 struct xfs_mount *mp, /* mount point structure */
3076 struct xfs_trans *tp, /* transaction pointer */
3077 xfs_agnumber_t agno, /* allocation group number */
3078 int flags, /* XFS_BUF_ */
3079 struct xfs_buf **bpp) /* buffer for the ag freelist header */
3083 trace_xfs_read_agf(mp, agno);
3085 ASSERT(agno != NULLAGNUMBER);
3086 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
3087 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
3088 XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
3092 ASSERT(!(*bpp)->b_error);
3093 xfs_buf_set_ref(*bpp, XFS_AGF_REF);
3098 * Read in the allocation group header (free/alloc section).
3102 struct xfs_mount *mp, /* mount point structure */
3103 struct xfs_trans *tp, /* transaction pointer */
3104 xfs_agnumber_t agno, /* allocation group number */
3105 int flags, /* XFS_ALLOC_FLAG_... */
3106 struct xfs_buf **bpp) /* buffer for the ag freelist header */
3108 struct xfs_agf *agf; /* ag freelist header */
3109 struct xfs_perag *pag; /* per allocation group data */
3113 trace_xfs_alloc_read_agf(mp, agno);
3115 /* We don't support trylock when freeing. */
3116 ASSERT((flags & (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK)) !=
3117 (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK));
3118 ASSERT(agno != NULLAGNUMBER);
3119 error = xfs_read_agf(mp, tp, agno,
3120 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
3124 ASSERT(!(*bpp)->b_error);
3126 agf = (*bpp)->b_addr;
3127 pag = (*bpp)->b_pag;
3128 if (!pag->pagf_init) {
3129 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
3130 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
3131 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
3132 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
3133 pag->pagf_levels[XFS_BTNUM_BNOi] =
3134 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
3135 pag->pagf_levels[XFS_BTNUM_CNTi] =
3136 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
3137 pag->pagf_levels[XFS_BTNUM_RMAPi] =
3138 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
3139 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
3141 pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
3144 * Update the in-core allocbt counter. Filter out the rmapbt
3145 * subset of the btreeblks counter because the rmapbt is managed
3146 * by perag reservation. Subtract one for the rmapbt root block
3147 * because the rmap counter includes it while the btreeblks
3148 * counter only tracks non-root blocks.
3150 allocbt_blks = pag->pagf_btreeblks;
3151 if (xfs_has_rmapbt(mp))
3152 allocbt_blks -= be32_to_cpu(agf->agf_rmap_blocks) - 1;
3153 if (allocbt_blks > 0)
3154 atomic64_add(allocbt_blks, &mp->m_allocbt_blks);
3157 else if (!xfs_is_shutdown(mp)) {
3158 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
3159 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
3160 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
3161 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
3162 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
3163 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
3164 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
3165 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
3172 * Allocate an extent (variable-size).
3173 * Depending on the allocation type, we either look in a single allocation
3174 * group or loop over the allocation groups to find the result.
3178 struct xfs_alloc_arg *args) /* allocation argument structure */
3180 xfs_agblock_t agsize; /* allocation group size */
3182 int flags; /* XFS_ALLOC_FLAG_... locking flags */
3183 struct xfs_mount *mp; /* mount structure pointer */
3184 xfs_agnumber_t sagno; /* starting allocation group number */
3185 xfs_alloctype_t type; /* input allocation type */
3187 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
3190 type = args->otype = args->type;
3191 args->agbno = NULLAGBLOCK;
3193 * Just fix this up, for the case where the last a.g. is shorter
3194 * (or there's only one a.g.) and the caller couldn't easily figure
3195 * that out (xfs_bmap_alloc).
3197 agsize = mp->m_sb.sb_agblocks;
3198 if (args->maxlen > agsize)
3199 args->maxlen = agsize;
3200 if (args->alignment == 0)
3201 args->alignment = 1;
3202 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
3203 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
3204 ASSERT(args->minlen <= args->maxlen);
3205 ASSERT(args->minlen <= agsize);
3206 ASSERT(args->mod < args->prod);
3207 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
3208 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
3209 args->minlen > args->maxlen || args->minlen > agsize ||
3210 args->mod >= args->prod) {
3211 args->fsbno = NULLFSBLOCK;
3212 trace_xfs_alloc_vextent_badargs(args);
3217 case XFS_ALLOCTYPE_THIS_AG:
3218 case XFS_ALLOCTYPE_NEAR_BNO:
3219 case XFS_ALLOCTYPE_THIS_BNO:
3221 * These three force us into a single a.g.
3223 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
3224 args->pag = xfs_perag_get(mp, args->agno);
3225 error = xfs_alloc_fix_freelist(args, 0);
3227 trace_xfs_alloc_vextent_nofix(args);
3231 trace_xfs_alloc_vextent_noagbp(args);
3234 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
3235 if ((error = xfs_alloc_ag_vextent(args)))
3238 case XFS_ALLOCTYPE_START_BNO:
3240 * Try near allocation first, then anywhere-in-ag after
3241 * the first a.g. fails.
3243 if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
3244 xfs_is_inode32(mp)) {
3245 args->fsbno = XFS_AGB_TO_FSB(mp,
3246 ((mp->m_agfrotor / rotorstep) %
3247 mp->m_sb.sb_agcount), 0);
3250 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
3251 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3253 case XFS_ALLOCTYPE_FIRST_AG:
3255 * Rotate through the allocation groups looking for a winner.
3257 if (type == XFS_ALLOCTYPE_FIRST_AG) {
3259 * Start with allocation group given by bno.
3261 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
3262 args->type = XFS_ALLOCTYPE_THIS_AG;
3267 * Start with the given allocation group.
3269 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
3270 flags = XFS_ALLOC_FLAG_TRYLOCK;
3273 * Loop over allocation groups twice; first time with
3274 * trylock set, second time without.
3277 args->pag = xfs_perag_get(mp, args->agno);
3278 error = xfs_alloc_fix_freelist(args, flags);
3280 trace_xfs_alloc_vextent_nofix(args);
3284 * If we get a buffer back then the allocation will fly.
3287 if ((error = xfs_alloc_ag_vextent(args)))
3292 trace_xfs_alloc_vextent_loopfailed(args);
3295 * Didn't work, figure out the next iteration.
3297 if (args->agno == sagno &&
3298 type == XFS_ALLOCTYPE_START_BNO)
3299 args->type = XFS_ALLOCTYPE_THIS_AG;
3301 * For the first allocation, we can try any AG to get
3302 * space. However, if we already have allocated a
3303 * block, we don't want to try AGs whose number is below
3304 * sagno. Otherwise, we may end up with out-of-order
3305 * locking of AGF, which might cause deadlock.
3307 if (++(args->agno) == mp->m_sb.sb_agcount) {
3308 if (args->tp->t_firstblock != NULLFSBLOCK)
3314 * Reached the starting a.g., must either be done
3315 * or switch to non-trylock mode.
3317 if (args->agno == sagno) {
3319 args->agbno = NULLAGBLOCK;
3320 trace_xfs_alloc_vextent_allfailed(args);
3325 if (type == XFS_ALLOCTYPE_START_BNO) {
3326 args->agbno = XFS_FSB_TO_AGBNO(mp,
3328 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3331 xfs_perag_put(args->pag);
3334 if (args->agno == sagno)
3335 mp->m_agfrotor = (mp->m_agfrotor + 1) %
3336 (mp->m_sb.sb_agcount * rotorstep);
3338 mp->m_agfrotor = (args->agno * rotorstep + 1) %
3339 (mp->m_sb.sb_agcount * rotorstep);
3346 if (args->agbno == NULLAGBLOCK)
3347 args->fsbno = NULLFSBLOCK;
3349 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
3351 ASSERT(args->len >= args->minlen);
3352 ASSERT(args->len <= args->maxlen);
3353 ASSERT(args->agbno % args->alignment == 0);
3354 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
3359 xfs_perag_put(args->pag);
3362 xfs_perag_put(args->pag);
3366 /* Ensure that the freelist is at full capacity. */
3368 xfs_free_extent_fix_freelist(
3369 struct xfs_trans *tp,
3370 struct xfs_perag *pag,
3371 struct xfs_buf **agbp)
3373 struct xfs_alloc_arg args;
3376 memset(&args, 0, sizeof(struct xfs_alloc_arg));
3378 args.mp = tp->t_mountp;
3379 args.agno = pag->pag_agno;
3383 * validate that the block number is legal - the enables us to detect
3384 * and handle a silent filesystem corruption rather than crashing.
3386 if (args.agno >= args.mp->m_sb.sb_agcount)
3387 return -EFSCORRUPTED;
3389 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
3399 * Just break up the extent address and hand off to xfs_free_ag_extent
3400 * after fixing up the freelist.
3404 struct xfs_trans *tp,
3407 const struct xfs_owner_info *oinfo,
3408 enum xfs_ag_resv_type type,
3411 struct xfs_mount *mp = tp->t_mountp;
3412 struct xfs_buf *agbp;
3413 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, bno);
3414 xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, bno);
3415 struct xfs_agf *agf;
3417 unsigned int busy_flags = 0;
3418 struct xfs_perag *pag;
3421 ASSERT(type != XFS_AG_RESV_AGFL);
3423 if (XFS_TEST_ERROR(false, mp,
3424 XFS_ERRTAG_FREE_EXTENT))
3427 pag = xfs_perag_get(mp, agno);
3428 error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
3433 if (XFS_IS_CORRUPT(mp, agbno >= mp->m_sb.sb_agblocks)) {
3434 error = -EFSCORRUPTED;
3438 /* validate the extent size is legal now we have the agf locked */
3439 if (XFS_IS_CORRUPT(mp, agbno + len > be32_to_cpu(agf->agf_length))) {
3440 error = -EFSCORRUPTED;
3444 error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
3449 busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
3450 xfs_extent_busy_insert(tp, pag, agbno, len, busy_flags);
3455 xfs_trans_brelse(tp, agbp);
3461 struct xfs_alloc_query_range_info {
3462 xfs_alloc_query_range_fn fn;
3466 /* Format btree record and pass to our callback. */
3468 xfs_alloc_query_range_helper(
3469 struct xfs_btree_cur *cur,
3470 const union xfs_btree_rec *rec,
3473 struct xfs_alloc_query_range_info *query = priv;
3474 struct xfs_alloc_rec_incore irec;
3476 irec.ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
3477 irec.ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
3478 return query->fn(cur, &irec, query->priv);
3481 /* Find all free space within a given range of blocks. */
3483 xfs_alloc_query_range(
3484 struct xfs_btree_cur *cur,
3485 const struct xfs_alloc_rec_incore *low_rec,
3486 const struct xfs_alloc_rec_incore *high_rec,
3487 xfs_alloc_query_range_fn fn,
3490 union xfs_btree_irec low_brec;
3491 union xfs_btree_irec high_brec;
3492 struct xfs_alloc_query_range_info query;
3494 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3495 low_brec.a = *low_rec;
3496 high_brec.a = *high_rec;
3499 return xfs_btree_query_range(cur, &low_brec, &high_brec,
3500 xfs_alloc_query_range_helper, &query);
3503 /* Find all free space records. */
3505 xfs_alloc_query_all(
3506 struct xfs_btree_cur *cur,
3507 xfs_alloc_query_range_fn fn,
3510 struct xfs_alloc_query_range_info query;
3512 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3515 return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
3518 /* Is there a record covering a given extent? */
3520 xfs_alloc_has_record(
3521 struct xfs_btree_cur *cur,
3526 union xfs_btree_irec low;
3527 union xfs_btree_irec high;
3529 memset(&low, 0, sizeof(low));
3530 low.a.ar_startblock = bno;
3531 memset(&high, 0xFF, sizeof(high));
3532 high.a.ar_startblock = bno + len - 1;
3534 return xfs_btree_has_record(cur, &low, &high, exists);
3538 * Walk all the blocks in the AGFL. The @walk_fn can return any negative
3539 * error code or XFS_ITER_*.
3543 struct xfs_mount *mp,
3544 struct xfs_agf *agf,
3545 struct xfs_buf *agflbp,
3546 xfs_agfl_walk_fn walk_fn,
3553 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
3554 i = be32_to_cpu(agf->agf_flfirst);
3556 /* Nothing to walk in an empty AGFL. */
3557 if (agf->agf_flcount == cpu_to_be32(0))
3560 /* Otherwise, walk from first to last, wrapping as needed. */
3562 error = walk_fn(mp, be32_to_cpu(agfl_bno[i]), priv);
3565 if (i == be32_to_cpu(agf->agf_fllast))
3567 if (++i == xfs_agfl_size(mp))
3575 xfs_extfree_intent_init_cache(void)
3577 xfs_extfree_item_cache = kmem_cache_create("xfs_extfree_intent",
3578 sizeof(struct xfs_extent_free_item),
3581 return xfs_extfree_item_cache != NULL ? 0 : -ENOMEM;
3585 xfs_extfree_intent_destroy_cache(void)
3587 kmem_cache_destroy(xfs_extfree_item_cache);
3588 xfs_extfree_item_cache = NULL;