1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_btree.h"
15 #include "xfs_alloc_btree.h"
16 #include "xfs_alloc.h"
17 #include "xfs_extent_busy.h"
18 #include "xfs_error.h"
19 #include "xfs_trace.h"
20 #include "xfs_trans.h"
23 STATIC struct xfs_btree_cur *
24 xfs_allocbt_dup_cursor(
25 struct xfs_btree_cur *cur)
27 return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
28 cur->bc_private.a.agbp, cur->bc_private.a.agno,
34 struct xfs_btree_cur *cur,
35 union xfs_btree_ptr *ptr,
38 struct xfs_buf *agbp = cur->bc_private.a.agbp;
39 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
40 xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
41 int btnum = cur->bc_btnum;
42 struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
46 agf->agf_roots[btnum] = ptr->s;
47 be32_add_cpu(&agf->agf_levels[btnum], inc);
48 pag->pagf_levels[btnum] += inc;
51 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
55 xfs_allocbt_alloc_block(
56 struct xfs_btree_cur *cur,
57 union xfs_btree_ptr *start,
58 union xfs_btree_ptr *new,
64 /* Allocate the new block from the freelist. If we can't, give up. */
65 error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
70 if (bno == NULLAGBLOCK) {
75 xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false);
77 xfs_trans_agbtree_delta(cur->bc_tp, 1);
78 new->s = cpu_to_be32(bno);
85 xfs_allocbt_free_block(
86 struct xfs_btree_cur *cur,
89 struct xfs_buf *agbp = cur->bc_private.a.agbp;
90 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
94 bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
95 error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
99 xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
100 XFS_EXTENT_BUSY_SKIP_DISCARD);
101 xfs_trans_agbtree_delta(cur->bc_tp, -1);
106 * Update the longest extent in the AGF
109 xfs_allocbt_update_lastrec(
110 struct xfs_btree_cur *cur,
111 struct xfs_btree_block *block,
112 union xfs_btree_rec *rec,
116 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
117 xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
118 struct xfs_perag *pag;
122 ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
127 * If this is the last leaf block and it's the last record,
128 * then update the size of the longest extent in the AG.
130 if (ptr != xfs_btree_get_numrecs(block))
132 len = rec->alloc.ar_blockcount;
135 if (be32_to_cpu(rec->alloc.ar_blockcount) <=
136 be32_to_cpu(agf->agf_longest))
138 len = rec->alloc.ar_blockcount;
141 numrecs = xfs_btree_get_numrecs(block);
144 ASSERT(ptr == numrecs + 1);
147 xfs_alloc_rec_t *rrp;
149 rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
150 len = rrp->ar_blockcount;
161 agf->agf_longest = len;
162 pag = xfs_perag_get(cur->bc_mp, seqno);
163 pag->pagf_longest = be32_to_cpu(len);
165 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
169 xfs_allocbt_get_minrecs(
170 struct xfs_btree_cur *cur,
173 return cur->bc_mp->m_alloc_mnr[level != 0];
177 xfs_allocbt_get_maxrecs(
178 struct xfs_btree_cur *cur,
181 return cur->bc_mp->m_alloc_mxr[level != 0];
185 xfs_allocbt_init_key_from_rec(
186 union xfs_btree_key *key,
187 union xfs_btree_rec *rec)
189 key->alloc.ar_startblock = rec->alloc.ar_startblock;
190 key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
194 xfs_bnobt_init_high_key_from_rec(
195 union xfs_btree_key *key,
196 union xfs_btree_rec *rec)
200 x = be32_to_cpu(rec->alloc.ar_startblock);
201 x += be32_to_cpu(rec->alloc.ar_blockcount) - 1;
202 key->alloc.ar_startblock = cpu_to_be32(x);
203 key->alloc.ar_blockcount = 0;
207 xfs_cntbt_init_high_key_from_rec(
208 union xfs_btree_key *key,
209 union xfs_btree_rec *rec)
211 key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
212 key->alloc.ar_startblock = 0;
216 xfs_allocbt_init_rec_from_cur(
217 struct xfs_btree_cur *cur,
218 union xfs_btree_rec *rec)
220 rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
221 rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
225 xfs_allocbt_init_ptr_from_cur(
226 struct xfs_btree_cur *cur,
227 union xfs_btree_ptr *ptr)
229 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
231 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
233 ptr->s = agf->agf_roots[cur->bc_btnum];
238 struct xfs_btree_cur *cur,
239 union xfs_btree_key *key)
241 xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
242 xfs_alloc_key_t *kp = &key->alloc;
244 return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
249 struct xfs_btree_cur *cur,
250 union xfs_btree_key *key)
252 xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
253 xfs_alloc_key_t *kp = &key->alloc;
256 diff = (int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
260 return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
264 xfs_bnobt_diff_two_keys(
265 struct xfs_btree_cur *cur,
266 union xfs_btree_key *k1,
267 union xfs_btree_key *k2)
269 return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
270 be32_to_cpu(k2->alloc.ar_startblock);
274 xfs_cntbt_diff_two_keys(
275 struct xfs_btree_cur *cur,
276 union xfs_btree_key *k1,
277 union xfs_btree_key *k2)
281 diff = be32_to_cpu(k1->alloc.ar_blockcount) -
282 be32_to_cpu(k2->alloc.ar_blockcount);
286 return be32_to_cpu(k1->alloc.ar_startblock) -
287 be32_to_cpu(k2->alloc.ar_startblock);
290 static xfs_failaddr_t
294 struct xfs_mount *mp = bp->b_mount;
295 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
296 struct xfs_perag *pag = bp->b_pag;
299 xfs_btnum_t btnum = XFS_BTNUM_BNOi;
301 if (!xfs_verify_magic(bp, block->bb_magic))
302 return __this_address;
304 if (xfs_sb_version_hascrc(&mp->m_sb)) {
305 fa = xfs_btree_sblock_v5hdr_verify(bp);
311 * The perag may not be attached during grow operations or fully
312 * initialized from the AGF during log recovery. Therefore we can only
313 * check against maximum tree depth from those contexts.
315 * Otherwise check against the per-tree limit. Peek at one of the
316 * verifier magic values to determine the type of tree we're verifying
319 level = be16_to_cpu(block->bb_level);
320 if (bp->b_ops->magic[0] == cpu_to_be32(XFS_ABTC_MAGIC))
321 btnum = XFS_BTNUM_CNTi;
322 if (pag && pag->pagf_init) {
323 if (level >= pag->pagf_levels[btnum])
324 return __this_address;
325 } else if (level >= mp->m_ag_maxlevels)
326 return __this_address;
328 return xfs_btree_sblock_verify(bp, mp->m_alloc_mxr[level != 0]);
332 xfs_allocbt_read_verify(
337 if (!xfs_btree_sblock_verify_crc(bp))
338 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
340 fa = xfs_allocbt_verify(bp);
342 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
346 trace_xfs_btree_corrupt(bp, _RET_IP_);
350 xfs_allocbt_write_verify(
355 fa = xfs_allocbt_verify(bp);
357 trace_xfs_btree_corrupt(bp, _RET_IP_);
358 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
361 xfs_btree_sblock_calc_crc(bp);
365 const struct xfs_buf_ops xfs_bnobt_buf_ops = {
367 .magic = { cpu_to_be32(XFS_ABTB_MAGIC),
368 cpu_to_be32(XFS_ABTB_CRC_MAGIC) },
369 .verify_read = xfs_allocbt_read_verify,
370 .verify_write = xfs_allocbt_write_verify,
371 .verify_struct = xfs_allocbt_verify,
374 const struct xfs_buf_ops xfs_cntbt_buf_ops = {
376 .magic = { cpu_to_be32(XFS_ABTC_MAGIC),
377 cpu_to_be32(XFS_ABTC_CRC_MAGIC) },
378 .verify_read = xfs_allocbt_read_verify,
379 .verify_write = xfs_allocbt_write_verify,
380 .verify_struct = xfs_allocbt_verify,
384 xfs_bnobt_keys_inorder(
385 struct xfs_btree_cur *cur,
386 union xfs_btree_key *k1,
387 union xfs_btree_key *k2)
389 return be32_to_cpu(k1->alloc.ar_startblock) <
390 be32_to_cpu(k2->alloc.ar_startblock);
394 xfs_bnobt_recs_inorder(
395 struct xfs_btree_cur *cur,
396 union xfs_btree_rec *r1,
397 union xfs_btree_rec *r2)
399 return be32_to_cpu(r1->alloc.ar_startblock) +
400 be32_to_cpu(r1->alloc.ar_blockcount) <=
401 be32_to_cpu(r2->alloc.ar_startblock);
405 xfs_cntbt_keys_inorder(
406 struct xfs_btree_cur *cur,
407 union xfs_btree_key *k1,
408 union xfs_btree_key *k2)
410 return be32_to_cpu(k1->alloc.ar_blockcount) <
411 be32_to_cpu(k2->alloc.ar_blockcount) ||
412 (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
413 be32_to_cpu(k1->alloc.ar_startblock) <
414 be32_to_cpu(k2->alloc.ar_startblock));
418 xfs_cntbt_recs_inorder(
419 struct xfs_btree_cur *cur,
420 union xfs_btree_rec *r1,
421 union xfs_btree_rec *r2)
423 return be32_to_cpu(r1->alloc.ar_blockcount) <
424 be32_to_cpu(r2->alloc.ar_blockcount) ||
425 (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
426 be32_to_cpu(r1->alloc.ar_startblock) <
427 be32_to_cpu(r2->alloc.ar_startblock));
430 static const struct xfs_btree_ops xfs_bnobt_ops = {
431 .rec_len = sizeof(xfs_alloc_rec_t),
432 .key_len = sizeof(xfs_alloc_key_t),
434 .dup_cursor = xfs_allocbt_dup_cursor,
435 .set_root = xfs_allocbt_set_root,
436 .alloc_block = xfs_allocbt_alloc_block,
437 .free_block = xfs_allocbt_free_block,
438 .update_lastrec = xfs_allocbt_update_lastrec,
439 .get_minrecs = xfs_allocbt_get_minrecs,
440 .get_maxrecs = xfs_allocbt_get_maxrecs,
441 .init_key_from_rec = xfs_allocbt_init_key_from_rec,
442 .init_high_key_from_rec = xfs_bnobt_init_high_key_from_rec,
443 .init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
444 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
445 .key_diff = xfs_bnobt_key_diff,
446 .buf_ops = &xfs_bnobt_buf_ops,
447 .diff_two_keys = xfs_bnobt_diff_two_keys,
448 .keys_inorder = xfs_bnobt_keys_inorder,
449 .recs_inorder = xfs_bnobt_recs_inorder,
452 static const struct xfs_btree_ops xfs_cntbt_ops = {
453 .rec_len = sizeof(xfs_alloc_rec_t),
454 .key_len = sizeof(xfs_alloc_key_t),
456 .dup_cursor = xfs_allocbt_dup_cursor,
457 .set_root = xfs_allocbt_set_root,
458 .alloc_block = xfs_allocbt_alloc_block,
459 .free_block = xfs_allocbt_free_block,
460 .update_lastrec = xfs_allocbt_update_lastrec,
461 .get_minrecs = xfs_allocbt_get_minrecs,
462 .get_maxrecs = xfs_allocbt_get_maxrecs,
463 .init_key_from_rec = xfs_allocbt_init_key_from_rec,
464 .init_high_key_from_rec = xfs_cntbt_init_high_key_from_rec,
465 .init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
466 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
467 .key_diff = xfs_cntbt_key_diff,
468 .buf_ops = &xfs_cntbt_buf_ops,
469 .diff_two_keys = xfs_cntbt_diff_two_keys,
470 .keys_inorder = xfs_cntbt_keys_inorder,
471 .recs_inorder = xfs_cntbt_recs_inorder,
475 * Allocate a new allocation btree cursor.
477 struct xfs_btree_cur * /* new alloc btree cursor */
478 xfs_allocbt_init_cursor(
479 struct xfs_mount *mp, /* file system mount point */
480 struct xfs_trans *tp, /* transaction pointer */
481 struct xfs_buf *agbp, /* buffer for agf structure */
482 xfs_agnumber_t agno, /* allocation group number */
483 xfs_btnum_t btnum) /* btree identifier */
485 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
486 struct xfs_btree_cur *cur;
488 ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
490 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
494 cur->bc_btnum = btnum;
495 cur->bc_blocklog = mp->m_sb.sb_blocklog;
497 if (btnum == XFS_BTNUM_CNT) {
498 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
499 cur->bc_ops = &xfs_cntbt_ops;
500 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
501 cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
503 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
504 cur->bc_ops = &xfs_bnobt_ops;
505 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
508 cur->bc_private.a.agbp = agbp;
509 cur->bc_private.a.agno = agno;
510 cur->bc_private.a.priv.abt.active = false;
512 if (xfs_sb_version_hascrc(&mp->m_sb))
513 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
519 * Calculate number of records in an alloc btree block.
523 struct xfs_mount *mp,
527 blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
530 return blocklen / sizeof(xfs_alloc_rec_t);
531 return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t));
534 /* Calculate the freespace btree size for some records. */
536 xfs_allocbt_calc_size(
537 struct xfs_mount *mp,
538 unsigned long long len)
540 return xfs_btree_calc_size(mp->m_alloc_mnr, len);