2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_btree.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_alloc.h"
40 #include "xfs_rtalloc.h"
42 #include "xfs_error.h"
44 #include "xfs_quota.h"
45 #include "xfs_fsops.h"
47 STATIC void xfs_mount_log_sbunit(xfs_mount_t *, __int64_t);
48 STATIC int xfs_uuid_mount(xfs_mount_t *);
49 STATIC void xfs_uuid_unmount(xfs_mount_t *mp);
50 STATIC void xfs_unmountfs_wait(xfs_mount_t *);
54 STATIC void xfs_icsb_destroy_counters(xfs_mount_t *);
55 STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
57 STATIC void xfs_icsb_sync_counters(xfs_mount_t *);
58 STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t,
60 STATIC int xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
64 #define xfs_icsb_destroy_counters(mp) do { } while (0)
65 #define xfs_icsb_balance_counter(mp, a, b, c) do { } while (0)
66 #define xfs_icsb_sync_counters(mp) do { } while (0)
67 #define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0)
73 short type; /* 0 = integer
74 * 1 = binary / string (no translation)
77 { offsetof(xfs_sb_t, sb_magicnum), 0 },
78 { offsetof(xfs_sb_t, sb_blocksize), 0 },
79 { offsetof(xfs_sb_t, sb_dblocks), 0 },
80 { offsetof(xfs_sb_t, sb_rblocks), 0 },
81 { offsetof(xfs_sb_t, sb_rextents), 0 },
82 { offsetof(xfs_sb_t, sb_uuid), 1 },
83 { offsetof(xfs_sb_t, sb_logstart), 0 },
84 { offsetof(xfs_sb_t, sb_rootino), 0 },
85 { offsetof(xfs_sb_t, sb_rbmino), 0 },
86 { offsetof(xfs_sb_t, sb_rsumino), 0 },
87 { offsetof(xfs_sb_t, sb_rextsize), 0 },
88 { offsetof(xfs_sb_t, sb_agblocks), 0 },
89 { offsetof(xfs_sb_t, sb_agcount), 0 },
90 { offsetof(xfs_sb_t, sb_rbmblocks), 0 },
91 { offsetof(xfs_sb_t, sb_logblocks), 0 },
92 { offsetof(xfs_sb_t, sb_versionnum), 0 },
93 { offsetof(xfs_sb_t, sb_sectsize), 0 },
94 { offsetof(xfs_sb_t, sb_inodesize), 0 },
95 { offsetof(xfs_sb_t, sb_inopblock), 0 },
96 { offsetof(xfs_sb_t, sb_fname[0]), 1 },
97 { offsetof(xfs_sb_t, sb_blocklog), 0 },
98 { offsetof(xfs_sb_t, sb_sectlog), 0 },
99 { offsetof(xfs_sb_t, sb_inodelog), 0 },
100 { offsetof(xfs_sb_t, sb_inopblog), 0 },
101 { offsetof(xfs_sb_t, sb_agblklog), 0 },
102 { offsetof(xfs_sb_t, sb_rextslog), 0 },
103 { offsetof(xfs_sb_t, sb_inprogress), 0 },
104 { offsetof(xfs_sb_t, sb_imax_pct), 0 },
105 { offsetof(xfs_sb_t, sb_icount), 0 },
106 { offsetof(xfs_sb_t, sb_ifree), 0 },
107 { offsetof(xfs_sb_t, sb_fdblocks), 0 },
108 { offsetof(xfs_sb_t, sb_frextents), 0 },
109 { offsetof(xfs_sb_t, sb_uquotino), 0 },
110 { offsetof(xfs_sb_t, sb_gquotino), 0 },
111 { offsetof(xfs_sb_t, sb_qflags), 0 },
112 { offsetof(xfs_sb_t, sb_flags), 0 },
113 { offsetof(xfs_sb_t, sb_shared_vn), 0 },
114 { offsetof(xfs_sb_t, sb_inoalignmt), 0 },
115 { offsetof(xfs_sb_t, sb_unit), 0 },
116 { offsetof(xfs_sb_t, sb_width), 0 },
117 { offsetof(xfs_sb_t, sb_dirblklog), 0 },
118 { offsetof(xfs_sb_t, sb_logsectlog), 0 },
119 { offsetof(xfs_sb_t, sb_logsectsize),0 },
120 { offsetof(xfs_sb_t, sb_logsunit), 0 },
121 { offsetof(xfs_sb_t, sb_features2), 0 },
122 { sizeof(xfs_sb_t), 0 }
126 * Return a pointer to an initialized xfs_mount structure.
133 mp = kmem_zalloc(sizeof(xfs_mount_t), KM_SLEEP);
135 if (xfs_icsb_init_counters(mp)) {
136 mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB;
139 AIL_LOCKINIT(&mp->m_ail_lock, "xfs_ail");
140 spinlock_init(&mp->m_sb_lock, "xfs_sb");
141 mutex_init(&mp->m_ilock);
142 initnsema(&mp->m_growlock, 1, "xfs_grow");
144 * Initialize the AIL.
146 xfs_trans_ail_init(mp);
148 atomic_set(&mp->m_active_trans, 0);
154 * Free up the resources associated with a mount structure. Assume that
155 * the structure was initially zeroed, so we can tell which fields got
171 for (agno = 0; agno < mp->m_maxagi; agno++)
172 if (mp->m_perag[agno].pagb_list)
173 kmem_free(mp->m_perag[agno].pagb_list,
174 sizeof(xfs_perag_busy_t) *
176 kmem_free(mp->m_perag,
177 sizeof(xfs_perag_t) * mp->m_sb.sb_agcount);
180 AIL_LOCK_DESTROY(&mp->m_ail_lock);
181 spinlock_destroy(&mp->m_sb_lock);
182 mutex_destroy(&mp->m_ilock);
183 freesema(&mp->m_growlock);
187 if (mp->m_fsname != NULL)
188 kmem_free(mp->m_fsname, mp->m_fsname_len);
189 if (mp->m_rtname != NULL)
190 kmem_free(mp->m_rtname, strlen(mp->m_rtname) + 1);
191 if (mp->m_logname != NULL)
192 kmem_free(mp->m_logname, strlen(mp->m_logname) + 1);
195 struct bhv_vfs *vfsp = XFS_MTOVFS(mp);
197 bhv_remove_all_vfsops(vfsp, 0);
198 VFS_REMOVEBHV(vfsp, &mp->m_bhv);
201 xfs_icsb_destroy_counters(mp);
202 kmem_free(mp, sizeof(xfs_mount_t));
206 * Check size of device based on the (data/realtime) block count.
207 * Note: this check is used by the growfs code as well as mount.
210 xfs_sb_validate_fsb_count(
214 ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
215 ASSERT(sbp->sb_blocklog >= BBSHIFT);
217 #if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */
218 if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
220 #else /* Limited by UINT_MAX of sectors */
221 if (nblocks << (sbp->sb_blocklog - BBSHIFT) > UINT_MAX)
228 * Check the validity of the SB found.
231 xfs_mount_validate_sb(
237 * If the log device and data device have the
238 * same device number, the log is internal.
239 * Consequently, the sb_logstart should be non-zero. If
240 * we have a zero sb_logstart in this case, we may be trying to mount
241 * a volume filesystem in a non-volume manner.
243 if (sbp->sb_magicnum != XFS_SB_MAGIC) {
244 xfs_fs_mount_cmn_err(flags, "bad magic number");
245 return XFS_ERROR(EWRONGFS);
248 if (!XFS_SB_GOOD_VERSION(sbp)) {
249 xfs_fs_mount_cmn_err(flags, "bad version");
250 return XFS_ERROR(EWRONGFS);
254 sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) {
255 xfs_fs_mount_cmn_err(flags,
256 "filesystem is marked as having an external log; "
257 "specify logdev on the\nmount command line.");
258 return XFS_ERROR(EINVAL);
262 sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) {
263 xfs_fs_mount_cmn_err(flags,
264 "filesystem is marked as having an internal log; "
265 "do not specify logdev on\nthe mount command line.");
266 return XFS_ERROR(EINVAL);
270 * More sanity checking. These were stolen directly from
274 sbp->sb_agcount <= 0 ||
275 sbp->sb_sectsize < XFS_MIN_SECTORSIZE ||
276 sbp->sb_sectsize > XFS_MAX_SECTORSIZE ||
277 sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG ||
278 sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG ||
279 sbp->sb_blocksize < XFS_MIN_BLOCKSIZE ||
280 sbp->sb_blocksize > XFS_MAX_BLOCKSIZE ||
281 sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG ||
282 sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
283 sbp->sb_inodesize < XFS_DINODE_MIN_SIZE ||
284 sbp->sb_inodesize > XFS_DINODE_MAX_SIZE ||
285 sbp->sb_inodelog < XFS_DINODE_MIN_LOG ||
286 sbp->sb_inodelog > XFS_DINODE_MAX_LOG ||
287 (sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog) ||
288 (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) ||
289 (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) ||
290 (sbp->sb_imax_pct > 100 /* zero sb_imax_pct is valid */))) {
291 xfs_fs_mount_cmn_err(flags, "SB sanity check 1 failed");
292 return XFS_ERROR(EFSCORRUPTED);
296 * Sanity check AG count, size fields against data size field
299 sbp->sb_dblocks == 0 ||
301 (xfs_drfsbno_t)sbp->sb_agcount * sbp->sb_agblocks ||
302 sbp->sb_dblocks < (xfs_drfsbno_t)(sbp->sb_agcount - 1) *
303 sbp->sb_agblocks + XFS_MIN_AG_BLOCKS)) {
304 xfs_fs_mount_cmn_err(flags, "SB sanity check 2 failed");
305 return XFS_ERROR(EFSCORRUPTED);
308 if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) ||
309 xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) {
310 xfs_fs_mount_cmn_err(flags,
311 "file system too large to be mounted on this system.");
312 return XFS_ERROR(E2BIG);
315 if (unlikely(sbp->sb_inprogress)) {
316 xfs_fs_mount_cmn_err(flags, "file system busy");
317 return XFS_ERROR(EFSCORRUPTED);
321 * Version 1 directory format has never worked on Linux.
323 if (unlikely(!XFS_SB_VERSION_HASDIRV2(sbp))) {
324 xfs_fs_mount_cmn_err(flags,
325 "file system using version 1 directory format");
326 return XFS_ERROR(ENOSYS);
330 * Until this is fixed only page-sized or smaller data blocks work.
332 if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) {
333 xfs_fs_mount_cmn_err(flags,
334 "file system with blocksize %d bytes",
336 xfs_fs_mount_cmn_err(flags,
337 "only pagesize (%ld) or less will currently work.",
339 return XFS_ERROR(ENOSYS);
346 xfs_initialize_perag(
349 xfs_agnumber_t agcount)
351 xfs_agnumber_t index, max_metadata;
355 xfs_sb_t *sbp = &mp->m_sb;
356 xfs_ino_t max_inum = XFS_MAXINUMBER_32;
358 /* Check to see if the filesystem can overflow 32 bit inodes */
359 agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
360 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
362 /* Clear the mount flag if no inode can overflow 32 bits
363 * on this filesystem, or if specifically requested..
365 if ((vfs->vfs_flag & VFS_32BITINODES) && ino > max_inum) {
366 mp->m_flags |= XFS_MOUNT_32BITINODES;
368 mp->m_flags &= ~XFS_MOUNT_32BITINODES;
371 /* If we can overflow then setup the ag headers accordingly */
372 if (mp->m_flags & XFS_MOUNT_32BITINODES) {
373 /* Calculate how much should be reserved for inodes to
374 * meet the max inode percentage.
376 if (mp->m_maxicount) {
379 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
381 icount += sbp->sb_agblocks - 1;
382 do_div(icount, sbp->sb_agblocks);
383 max_metadata = icount;
385 max_metadata = agcount;
387 for (index = 0; index < agcount; index++) {
388 ino = XFS_AGINO_TO_INO(mp, index, agino);
389 if (ino > max_inum) {
394 /* This ag is preferred for inodes */
395 pag = &mp->m_perag[index];
396 pag->pagi_inodeok = 1;
397 if (index < max_metadata)
398 pag->pagf_metadata = 1;
401 /* Setup default behavior for smaller filesystems */
402 for (index = 0; index < agcount; index++) {
403 pag = &mp->m_perag[index];
404 pag->pagi_inodeok = 1;
413 * data - on disk version of sb
415 * dir - conversion direction: <0 - convert sb to buf
416 * >0 - convert buf to sb
417 * fields - which fields to copy (bitmask)
438 buf_ptr = (xfs_caddr_t)data;
439 mem_ptr = (xfs_caddr_t)sb;
442 f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
443 first = xfs_sb_info[f].offset;
444 size = xfs_sb_info[f + 1].offset - first;
446 ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1);
448 if (size == 1 || xfs_sb_info[f].type == 1) {
450 memcpy(mem_ptr + first, buf_ptr + first, size);
452 memcpy(buf_ptr + first, mem_ptr + first, size);
457 INT_XLATE(*(__uint16_t*)(buf_ptr+first),
458 *(__uint16_t*)(mem_ptr+first),
462 INT_XLATE(*(__uint32_t*)(buf_ptr+first),
463 *(__uint32_t*)(mem_ptr+first),
467 INT_XLATE(*(__uint64_t*)(buf_ptr+first),
468 *(__uint64_t*)(mem_ptr+first), dir, ARCH_CONVERT);
475 fields &= ~(1LL << f);
482 * Does the initial read of the superblock.
485 xfs_readsb(xfs_mount_t *mp, int flags)
487 unsigned int sector_size;
488 unsigned int extra_flags;
493 ASSERT(mp->m_sb_bp == NULL);
494 ASSERT(mp->m_ddev_targp != NULL);
497 * Allocate a (locked) buffer to hold the superblock.
498 * This will be kept around at all times to optimize
499 * access to the superblock.
501 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
502 extra_flags = XFS_BUF_LOCK | XFS_BUF_MANAGE | XFS_BUF_MAPPED;
504 bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR,
505 BTOBB(sector_size), extra_flags);
506 if (!bp || XFS_BUF_ISERROR(bp)) {
507 xfs_fs_mount_cmn_err(flags, "SB read failed");
508 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
511 ASSERT(XFS_BUF_ISBUSY(bp));
512 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
515 * Initialize the mount structure from the superblock.
516 * But first do some basic consistency checking.
518 sbp = XFS_BUF_TO_SBP(bp);
519 xfs_xlatesb(XFS_BUF_PTR(bp), &(mp->m_sb), 1, XFS_SB_ALL_BITS);
521 error = xfs_mount_validate_sb(mp, &(mp->m_sb), flags);
523 xfs_fs_mount_cmn_err(flags, "SB validate failed");
528 * We must be able to do sector-sized and sector-aligned IO.
530 if (sector_size > mp->m_sb.sb_sectsize) {
531 xfs_fs_mount_cmn_err(flags,
532 "device supports only %u byte sectors (not %u)",
533 sector_size, mp->m_sb.sb_sectsize);
539 * If device sector size is smaller than the superblock size,
540 * re-read the superblock so the buffer is correctly sized.
542 if (sector_size < mp->m_sb.sb_sectsize) {
543 XFS_BUF_UNMANAGE(bp);
545 sector_size = mp->m_sb.sb_sectsize;
546 bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR,
547 BTOBB(sector_size), extra_flags);
548 if (!bp || XFS_BUF_ISERROR(bp)) {
549 xfs_fs_mount_cmn_err(flags, "SB re-read failed");
550 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
553 ASSERT(XFS_BUF_ISBUSY(bp));
554 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
557 /* Initialize per-cpu counters */
558 xfs_icsb_reinit_counters(mp);
562 ASSERT(XFS_BUF_VALUSEMA(bp) > 0);
567 XFS_BUF_UNMANAGE(bp);
577 * Mount initialization code establishing various mount
578 * fields from the superblock associated with the given
582 xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
586 mp->m_agfrotor = mp->m_agirotor = 0;
587 spinlock_init(&mp->m_agirotor_lock, "m_agirotor_lock");
588 mp->m_maxagi = mp->m_sb.sb_agcount;
589 mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG;
590 mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT;
591 mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
592 mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
593 mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
594 mp->m_litino = sbp->sb_inodesize -
595 ((uint)sizeof(xfs_dinode_core_t) + (uint)sizeof(xfs_agino_t));
596 mp->m_blockmask = sbp->sb_blocksize - 1;
597 mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
598 mp->m_blockwmask = mp->m_blockwsize - 1;
599 INIT_LIST_HEAD(&mp->m_del_inodes);
602 * Setup for attributes, in case they get created.
603 * This value is for inodes getting attributes for the first time,
604 * the per-inode value is for old attribute values.
606 ASSERT(sbp->sb_inodesize >= 256 && sbp->sb_inodesize <= 2048);
607 switch (sbp->sb_inodesize) {
609 mp->m_attroffset = XFS_LITINO(mp) -
610 XFS_BMDR_SPACE_CALC(MINABTPTRS);
615 mp->m_attroffset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
620 ASSERT(mp->m_attroffset < XFS_LITINO(mp));
622 for (i = 0; i < 2; i++) {
623 mp->m_alloc_mxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize,
625 mp->m_alloc_mnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize,
628 for (i = 0; i < 2; i++) {
629 mp->m_bmap_dmxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize,
631 mp->m_bmap_dmnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize,
634 for (i = 0; i < 2; i++) {
635 mp->m_inobt_mxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize,
637 mp->m_inobt_mnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize,
641 mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
642 mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK,
644 mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog;
648 * xfs_initialize_perag_data
650 * Read in each per-ag structure so we can count up the number of
651 * allocated inodes, free inodes and used filesystem blocks as this
652 * information is no longer persistent in the superblock. Once we have
653 * this information, write it into the in-core superblock structure.
656 xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
658 xfs_agnumber_t index;
660 xfs_sb_t *sbp = &mp->m_sb;
664 uint64_t bfreelst = 0;
669 for (index = 0; index < agcount; index++) {
671 * read the agf, then the agi. This gets us
672 * all the inforamtion we need and populates the
673 * per-ag structures for us.
675 error = xfs_alloc_pagf_init(mp, NULL, index, 0);
679 error = xfs_ialloc_pagi_init(mp, NULL, index);
682 pag = &mp->m_perag[index];
683 ifree += pag->pagi_freecount;
684 ialloc += pag->pagi_count;
685 bfree += pag->pagf_freeblks;
686 bfreelst += pag->pagf_flcount;
687 btree += pag->pagf_btreeblks;
690 * Overwrite incore superblock counters with just-read data
693 sbp->sb_ifree = ifree;
694 sbp->sb_icount = ialloc;
695 sbp->sb_fdblocks = bfree + bfreelst + btree;
696 XFS_SB_UNLOCK(mp, s);
698 /* Fixup the per-cpu counters as well. */
699 xfs_icsb_reinit_counters(mp);
707 * This function does the following on an initial mount of a file system:
708 * - reads the superblock from disk and init the mount struct
709 * - if we're a 32-bit kernel, do a size check on the superblock
710 * so we don't mount terabyte filesystems
711 * - init mount struct realtime fields
712 * - allocate inode hash table for fs
713 * - init directory manager
714 * - perform recovery and init the log manager
723 xfs_sb_t *sbp = &(mp->m_sb);
725 bhv_vnode_t *rvp = NULL;
726 int readio_log, writeio_log;
729 __int64_t update_flags;
730 uint quotamount, quotaflags;
732 int uuid_mounted = 0;
735 if (mp->m_sb_bp == NULL) {
736 if ((error = xfs_readsb(mp, mfsi_flags))) {
740 xfs_mount_common(mp, sbp);
743 * Check if sb_agblocks is aligned at stripe boundary
744 * If sb_agblocks is NOT aligned turn off m_dalign since
745 * allocator alignment is within an ag, therefore ag has
746 * to be aligned at stripe boundary.
749 if (mp->m_dalign && !(mfsi_flags & XFS_MFSI_SECOND)) {
751 * If stripe unit and stripe width are not multiples
752 * of the fs blocksize turn off alignment.
754 if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
755 (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
756 if (mp->m_flags & XFS_MOUNT_RETERR) {
758 "XFS: alignment check 1 failed");
759 error = XFS_ERROR(EINVAL);
762 mp->m_dalign = mp->m_swidth = 0;
765 * Convert the stripe unit and width to FSBs.
767 mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
768 if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) {
769 if (mp->m_flags & XFS_MOUNT_RETERR) {
770 error = XFS_ERROR(EINVAL);
773 xfs_fs_cmn_err(CE_WARN, mp,
774 "stripe alignment turned off: sunit(%d)/swidth(%d) incompatible with agsize(%d)",
775 mp->m_dalign, mp->m_swidth,
780 } else if (mp->m_dalign) {
781 mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
783 if (mp->m_flags & XFS_MOUNT_RETERR) {
784 xfs_fs_cmn_err(CE_WARN, mp,
785 "stripe alignment turned off: sunit(%d) less than bsize(%d)",
788 error = XFS_ERROR(EINVAL);
796 * Update superblock with new values
799 if (XFS_SB_VERSION_HASDALIGN(sbp)) {
800 if (sbp->sb_unit != mp->m_dalign) {
801 sbp->sb_unit = mp->m_dalign;
802 update_flags |= XFS_SB_UNIT;
804 if (sbp->sb_width != mp->m_swidth) {
805 sbp->sb_width = mp->m_swidth;
806 update_flags |= XFS_SB_WIDTH;
809 } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
810 XFS_SB_VERSION_HASDALIGN(&mp->m_sb)) {
811 mp->m_dalign = sbp->sb_unit;
812 mp->m_swidth = sbp->sb_width;
815 xfs_alloc_compute_maxlevels(mp);
816 xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
817 xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
818 xfs_ialloc_compute_maxlevels(mp);
820 if (sbp->sb_imax_pct) {
823 /* Make sure the maximum inode count is a multiple of the
824 * units we allocate inodes in.
827 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
829 do_div(icount, mp->m_ialloc_blks);
830 mp->m_maxicount = (icount * mp->m_ialloc_blks) <<
835 mp->m_maxioffset = xfs_max_file_offset(sbp->sb_blocklog);
838 * XFS uses the uuid from the superblock as the unique
839 * identifier for fsid. We can not use the uuid from the volume
840 * since a single partition filesystem is identical to a single
841 * partition volume/filesystem.
843 if ((mfsi_flags & XFS_MFSI_SECOND) == 0 &&
844 (mp->m_flags & XFS_MOUNT_NOUUID) == 0) {
845 if (xfs_uuid_mount(mp)) {
846 error = XFS_ERROR(EINVAL);
850 ret64 = uuid_hash64(&sbp->sb_uuid);
851 memcpy(&vfsp->vfs_fsid, &ret64, sizeof(ret64));
855 * Set the default minimum read and write sizes unless
856 * already specified in a mount option.
857 * We use smaller I/O sizes when the file system
858 * is being used for NFS service (wsync mount option).
860 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
861 if (mp->m_flags & XFS_MOUNT_WSYNC) {
862 readio_log = XFS_WSYNC_READIO_LOG;
863 writeio_log = XFS_WSYNC_WRITEIO_LOG;
865 readio_log = XFS_READIO_LOG_LARGE;
866 writeio_log = XFS_WRITEIO_LOG_LARGE;
869 readio_log = mp->m_readio_log;
870 writeio_log = mp->m_writeio_log;
874 * Set the number of readahead buffers to use based on
875 * physical memory size.
877 if (xfs_physmem <= 4096) /* <= 16MB */
878 mp->m_nreadaheads = XFS_RW_NREADAHEAD_16MB;
879 else if (xfs_physmem <= 8192) /* <= 32MB */
880 mp->m_nreadaheads = XFS_RW_NREADAHEAD_32MB;
882 mp->m_nreadaheads = XFS_RW_NREADAHEAD_K32;
883 if (sbp->sb_blocklog > readio_log) {
884 mp->m_readio_log = sbp->sb_blocklog;
886 mp->m_readio_log = readio_log;
888 mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog);
889 if (sbp->sb_blocklog > writeio_log) {
890 mp->m_writeio_log = sbp->sb_blocklog;
892 mp->m_writeio_log = writeio_log;
894 mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog);
897 * Set the inode cluster size based on the physical memory
898 * size. This may still be overridden by the file system
899 * block size if it is larger than the chosen cluster size.
901 if (xfs_physmem <= btoc(32 * 1024 * 1024)) { /* <= 32 MB */
902 mp->m_inode_cluster_size = XFS_INODE_SMALL_CLUSTER_SIZE;
904 mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE;
907 * Set whether we're using inode alignment.
909 if (XFS_SB_VERSION_HASALIGN(&mp->m_sb) &&
910 mp->m_sb.sb_inoalignmt >=
911 XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
912 mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
914 mp->m_inoalign_mask = 0;
916 * If we are using stripe alignment, check whether
917 * the stripe unit is a multiple of the inode alignment
919 if (mp->m_dalign && mp->m_inoalign_mask &&
920 !(mp->m_dalign & mp->m_inoalign_mask))
921 mp->m_sinoalign = mp->m_dalign;
925 * Check that the data (and log if separate) are an ok size.
927 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
928 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
929 cmn_err(CE_WARN, "XFS: size check 1 failed");
930 error = XFS_ERROR(E2BIG);
933 error = xfs_read_buf(mp, mp->m_ddev_targp,
934 d - XFS_FSS_TO_BB(mp, 1),
935 XFS_FSS_TO_BB(mp, 1), 0, &bp);
939 cmn_err(CE_WARN, "XFS: size check 2 failed");
940 if (error == ENOSPC) {
941 error = XFS_ERROR(E2BIG);
946 if (((mfsi_flags & XFS_MFSI_CLIENT) == 0) &&
947 mp->m_logdev_targp != mp->m_ddev_targp) {
948 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
949 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
950 cmn_err(CE_WARN, "XFS: size check 3 failed");
951 error = XFS_ERROR(E2BIG);
954 error = xfs_read_buf(mp, mp->m_logdev_targp,
955 d - XFS_FSB_TO_BB(mp, 1),
956 XFS_FSB_TO_BB(mp, 1), 0, &bp);
960 cmn_err(CE_WARN, "XFS: size check 3 failed");
961 if (error == ENOSPC) {
962 error = XFS_ERROR(E2BIG);
969 * Initialize realtime fields in the mount structure
971 if ((error = xfs_rtmount_init(mp))) {
972 cmn_err(CE_WARN, "XFS: RT mount failed");
977 * For client case we are done now
979 if (mfsi_flags & XFS_MFSI_CLIENT) {
984 * Copies the low order bits of the timestamp and the randomly
985 * set "sequence" number out of a UUID.
987 uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid);
990 * The vfs structure needs to have a file system independent
991 * way of checking for the invariant file system ID. Since it
992 * can't look at mount structures it has a pointer to the data
993 * in the mount structure.
995 * File systems that don't support user level file handles (i.e.
996 * all of them except for XFS) will leave vfs_altfsid as NULL.
998 vfsp->vfs_altfsid = (xfs_fsid_t *)mp->m_fixedfsid;
999 mp->m_dmevmask = 0; /* not persistent; set after each mount */
1004 * Initialize the attribute manager's entries.
1006 mp->m_attr_magicpct = (mp->m_sb.sb_blocksize * 37) / 100;
1009 * Initialize the precomputed transaction reservations values.
1014 * Allocate and initialize the inode hash table for this
1021 * Allocate and initialize the per-ag data.
1023 init_rwsem(&mp->m_peraglock);
1025 kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t), KM_SLEEP);
1027 mp->m_maxagi = xfs_initialize_perag(vfsp, mp, sbp->sb_agcount);
1030 * log's mount-time initialization. Perform 1st part recovery if needed
1032 if (likely(sbp->sb_logblocks > 0)) { /* check for volume case */
1033 error = xfs_log_mount(mp, mp->m_logdev_targp,
1034 XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
1035 XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
1037 cmn_err(CE_WARN, "XFS: log mount failed");
1040 } else { /* No log has been defined */
1041 cmn_err(CE_WARN, "XFS: no log defined");
1042 XFS_ERROR_REPORT("xfs_mountfs_int(1)", XFS_ERRLEVEL_LOW, mp);
1043 error = XFS_ERROR(EFSCORRUPTED);
1048 * Now the log is mounted, we know if it was an unclean shutdown or
1049 * not. If it was, with the first phase of recovery has completed, we
1050 * have consistent AG blocks on disk. We have not recovered EFIs yet,
1051 * but they are recovered transactionally in the second recovery phase
1054 * Hence we can safely re-initialise incore superblock counters from
1055 * the per-ag data. These may not be correct if the filesystem was not
1056 * cleanly unmounted, so we need to wait for recovery to finish before
1059 * If the filesystem was cleanly unmounted, then we can trust the
1060 * values in the superblock to be correct and we don't need to do
1063 * If we are currently making the filesystem, the initialisation will
1064 * fail as the perag data is in an undefined state.
1067 if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
1068 !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
1069 !mp->m_sb.sb_inprogress) {
1070 error = xfs_initialize_perag_data(mp, sbp->sb_agcount);
1076 * Get and sanity-check the root inode.
1077 * Save the pointer to it in the mount structure.
1079 error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip, 0);
1081 cmn_err(CE_WARN, "XFS: failed to read root inode");
1085 ASSERT(rip != NULL);
1086 rvp = XFS_ITOV(rip);
1088 if (unlikely((rip->i_d.di_mode & S_IFMT) != S_IFDIR)) {
1089 cmn_err(CE_WARN, "XFS: corrupted root inode");
1090 cmn_err(CE_WARN, "Device %s - root %llu is not a directory",
1091 XFS_BUFTARG_NAME(mp->m_ddev_targp),
1092 (unsigned long long)rip->i_ino);
1093 xfs_iunlock(rip, XFS_ILOCK_EXCL);
1094 XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW,
1096 error = XFS_ERROR(EFSCORRUPTED);
1099 mp->m_rootip = rip; /* save it */
1101 xfs_iunlock(rip, XFS_ILOCK_EXCL);
1104 * Initialize realtime inode pointers in the mount structure
1106 if ((error = xfs_rtmount_inodes(mp))) {
1108 * Free up the root inode.
1110 cmn_err(CE_WARN, "XFS: failed to read RT inodes");
1115 * If fs is not mounted readonly, then update the superblock
1116 * unit and width changes.
1118 if (update_flags && !(vfsp->vfs_flag & VFS_RDONLY))
1119 xfs_mount_log_sbunit(mp, update_flags);
1122 * Initialise the XFS quota management subsystem for this mount
1124 if ((error = XFS_QM_INIT(mp, "amount, "aflags)))
1128 * Finish recovering the file system. This part needed to be
1129 * delayed until after the root and real-time bitmap inodes
1130 * were consistently read in.
1132 error = xfs_log_mount_finish(mp, mfsi_flags);
1134 cmn_err(CE_WARN, "XFS: log mount finish failed");
1140 * Complete the quota initialisation, post-log-replay component.
1142 if ((error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags)))
1149 * Free up the root inode.
1153 xfs_log_unmount_dealloc(mp);
1157 for (agno = 0; agno < sbp->sb_agcount; agno++)
1158 if (mp->m_perag[agno].pagb_list)
1159 kmem_free(mp->m_perag[agno].pagb_list,
1160 sizeof(xfs_perag_busy_t) * XFS_PAGB_NUM_SLOTS);
1161 kmem_free(mp->m_perag, sbp->sb_agcount * sizeof(xfs_perag_t));
1166 xfs_uuid_unmount(mp);
1174 * This flushes out the inodes,dquots and the superblock, unmounts the
1175 * log and makes sure that incore structures are freed.
1178 xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
1180 struct bhv_vfs *vfsp = XFS_MTOVFS(mp);
1181 #if defined(DEBUG) || defined(INDUCE_IO_ERROR)
1187 XFS_QM_DQPURGEALL(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING);
1190 * Flush out the log synchronously so that we know for sure
1191 * that nothing is pinned. This is important because bflush()
1192 * will skip pinned buffers.
1194 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
1196 xfs_binval(mp->m_ddev_targp);
1197 if (mp->m_rtdev_targp) {
1198 xfs_binval(mp->m_rtdev_targp);
1201 xfs_log_sbcount(mp, 1);
1202 xfs_unmountfs_writesb(mp);
1203 xfs_unmountfs_wait(mp); /* wait for async bufs */
1204 xfs_log_unmount(mp); /* Done! No more fs ops. */
1209 * All inodes from this mount point should be freed.
1211 ASSERT(mp->m_inodes == NULL);
1213 xfs_unmountfs_close(mp, cr);
1214 if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0)
1215 xfs_uuid_unmount(mp);
1217 #if defined(DEBUG) || defined(INDUCE_IO_ERROR)
1219 * clear all error tags on this filesystem
1221 memcpy(&fsid, &vfsp->vfs_fsid, sizeof(int64_t));
1222 xfs_errortag_clearall_umount(fsid, mp->m_fsname, 0);
1225 xfs_mount_free(mp, 1);
1230 xfs_unmountfs_close(xfs_mount_t *mp, struct cred *cr)
1232 if (mp->m_logdev_targp != mp->m_ddev_targp)
1233 xfs_free_buftarg(mp->m_logdev_targp, 1);
1234 if (mp->m_rtdev_targp)
1235 xfs_free_buftarg(mp->m_rtdev_targp, 1);
1236 xfs_free_buftarg(mp->m_ddev_targp, 0);
1240 xfs_unmountfs_wait(xfs_mount_t *mp)
1242 if (mp->m_logdev_targp != mp->m_ddev_targp)
1243 xfs_wait_buftarg(mp->m_logdev_targp);
1244 if (mp->m_rtdev_targp)
1245 xfs_wait_buftarg(mp->m_rtdev_targp);
1246 xfs_wait_buftarg(mp->m_ddev_targp);
1250 xfs_fs_writable(xfs_mount_t *mp)
1252 bhv_vfs_t *vfsp = XFS_MTOVFS(mp);
1254 return !(vfs_test_for_freeze(vfsp) || XFS_FORCED_SHUTDOWN(mp) ||
1255 (vfsp->vfs_flag & VFS_RDONLY));
1261 * Called either periodically to keep the on disk superblock values
1262 * roughly up to date or from unmount to make sure the values are
1263 * correct on a clean unmount.
1265 * Note this code can be called during the process of freezing, so
1266 * we may need to use the transaction allocator which does not not
1267 * block when the transaction subsystem is in its frozen state.
1277 if (!xfs_fs_writable(mp))
1280 xfs_icsb_sync_counters(mp);
1283 * we don't need to do this if we are updating the superblock
1284 * counters on every modification.
1286 if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1289 tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT);
1290 error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
1291 XFS_DEFAULT_LOG_COUNT);
1293 xfs_trans_cancel(tp, 0);
1297 xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS);
1299 xfs_trans_set_sync(tp);
1300 xfs_trans_commit(tp, 0);
1306 xfs_unmountfs_writesb(xfs_mount_t *mp)
1313 * skip superblock write if fs is read-only, or
1314 * if we are doing a forced umount.
1316 if (!(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY ||
1317 XFS_FORCED_SHUTDOWN(mp))) {
1319 sbp = xfs_getsb(mp, 0);
1320 sb = XFS_BUF_TO_SBP(sbp);
1323 * mark shared-readonly if desired
1325 if (mp->m_mk_sharedro) {
1326 if (!(sb->sb_flags & XFS_SBF_READONLY))
1327 sb->sb_flags |= XFS_SBF_READONLY;
1328 if (!XFS_SB_VERSION_HASSHARED(sb))
1329 XFS_SB_VERSION_ADDSHARED(sb);
1330 xfs_fs_cmn_err(CE_NOTE, mp,
1331 "Unmounting, marking shared read-only");
1334 XFS_BUF_UNDONE(sbp);
1335 XFS_BUF_UNREAD(sbp);
1336 XFS_BUF_UNDELAYWRITE(sbp);
1338 XFS_BUF_UNASYNC(sbp);
1339 ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp);
1340 xfsbdstrat(mp, sbp);
1341 /* Nevermind errors we might get here. */
1342 error = xfs_iowait(sbp);
1344 xfs_ioerror_alert("xfs_unmountfs_writesb",
1345 mp, sbp, XFS_BUF_ADDR(sbp));
1346 if (error && mp->m_mk_sharedro)
1347 xfs_fs_cmn_err(CE_ALERT, mp, "Superblock write error detected while unmounting. Filesystem may not be marked shared readonly");
1354 * xfs_mod_sb() can be used to copy arbitrary changes to the
1355 * in-core superblock into the superblock buffer to be logged.
1356 * It does not provide the higher level of locking that is
1357 * needed to protect the in-core superblock from concurrent
1361 xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
1374 bp = xfs_trans_getsb(tp, mp, 0);
1375 sbp = XFS_BUF_TO_SBP(bp);
1376 first = sizeof(xfs_sb_t);
1379 /* translate/copy */
1381 xfs_xlatesb(XFS_BUF_PTR(bp), &(mp->m_sb), -1, fields);
1383 /* find modified range */
1385 f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
1386 ASSERT((1LL << f) & XFS_SB_MOD_BITS);
1387 first = xfs_sb_info[f].offset;
1389 f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
1390 ASSERT((1LL << f) & XFS_SB_MOD_BITS);
1391 last = xfs_sb_info[f + 1].offset - 1;
1393 xfs_trans_log_buf(tp, bp, first, last);
1398 * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply
1399 * a delta to a specified field in the in-core superblock. Simply
1400 * switch on the field indicated and apply the delta to that field.
1401 * Fields are not allowed to dip below zero, so if the delta would
1402 * do this do not apply it and return EINVAL.
1404 * The SB_LOCK must be held when this routine is called.
1407 xfs_mod_incore_sb_unlocked(
1409 xfs_sb_field_t field,
1413 int scounter; /* short counter for 32 bit fields */
1414 long long lcounter; /* long counter for 64 bit fields */
1415 long long res_used, rem;
1418 * With the in-core superblock spin lock held, switch
1419 * on the indicated field. Apply the delta to the
1420 * proper field. If the fields value would dip below
1421 * 0, then do not apply the delta and return EINVAL.
1424 case XFS_SBS_ICOUNT:
1425 lcounter = (long long)mp->m_sb.sb_icount;
1429 return XFS_ERROR(EINVAL);
1431 mp->m_sb.sb_icount = lcounter;
1434 lcounter = (long long)mp->m_sb.sb_ifree;
1438 return XFS_ERROR(EINVAL);
1440 mp->m_sb.sb_ifree = lcounter;
1442 case XFS_SBS_FDBLOCKS:
1443 lcounter = (long long)
1444 mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1445 res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
1447 if (delta > 0) { /* Putting blocks back */
1448 if (res_used > delta) {
1449 mp->m_resblks_avail += delta;
1451 rem = delta - res_used;
1452 mp->m_resblks_avail = mp->m_resblks;
1455 } else { /* Taking blocks away */
1460 * If were out of blocks, use any available reserved blocks if
1466 lcounter = (long long)mp->m_resblks_avail + delta;
1468 return XFS_ERROR(ENOSPC);
1470 mp->m_resblks_avail = lcounter;
1472 } else { /* not reserved */
1473 return XFS_ERROR(ENOSPC);
1478 mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
1480 case XFS_SBS_FREXTENTS:
1481 lcounter = (long long)mp->m_sb.sb_frextents;
1484 return XFS_ERROR(ENOSPC);
1486 mp->m_sb.sb_frextents = lcounter;
1488 case XFS_SBS_DBLOCKS:
1489 lcounter = (long long)mp->m_sb.sb_dblocks;
1493 return XFS_ERROR(EINVAL);
1495 mp->m_sb.sb_dblocks = lcounter;
1497 case XFS_SBS_AGCOUNT:
1498 scounter = mp->m_sb.sb_agcount;
1502 return XFS_ERROR(EINVAL);
1504 mp->m_sb.sb_agcount = scounter;
1506 case XFS_SBS_IMAX_PCT:
1507 scounter = mp->m_sb.sb_imax_pct;
1511 return XFS_ERROR(EINVAL);
1513 mp->m_sb.sb_imax_pct = scounter;
1515 case XFS_SBS_REXTSIZE:
1516 scounter = mp->m_sb.sb_rextsize;
1520 return XFS_ERROR(EINVAL);
1522 mp->m_sb.sb_rextsize = scounter;
1524 case XFS_SBS_RBMBLOCKS:
1525 scounter = mp->m_sb.sb_rbmblocks;
1529 return XFS_ERROR(EINVAL);
1531 mp->m_sb.sb_rbmblocks = scounter;
1533 case XFS_SBS_RBLOCKS:
1534 lcounter = (long long)mp->m_sb.sb_rblocks;
1538 return XFS_ERROR(EINVAL);
1540 mp->m_sb.sb_rblocks = lcounter;
1542 case XFS_SBS_REXTENTS:
1543 lcounter = (long long)mp->m_sb.sb_rextents;
1547 return XFS_ERROR(EINVAL);
1549 mp->m_sb.sb_rextents = lcounter;
1551 case XFS_SBS_REXTSLOG:
1552 scounter = mp->m_sb.sb_rextslog;
1556 return XFS_ERROR(EINVAL);
1558 mp->m_sb.sb_rextslog = scounter;
1562 return XFS_ERROR(EINVAL);
1567 * xfs_mod_incore_sb() is used to change a field in the in-core
1568 * superblock structure by the specified delta. This modification
1569 * is protected by the SB_LOCK. Just use the xfs_mod_incore_sb_unlocked()
1570 * routine to do the work.
1575 xfs_sb_field_t field,
1582 /* check for per-cpu counters */
1584 #ifdef HAVE_PERCPU_SB
1585 case XFS_SBS_ICOUNT:
1587 case XFS_SBS_FDBLOCKS:
1588 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1589 status = xfs_icsb_modify_counters(mp, field,
1596 s = XFS_SB_LOCK(mp);
1597 status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
1598 XFS_SB_UNLOCK(mp, s);
1606 * xfs_mod_incore_sb_batch() is used to change more than one field
1607 * in the in-core superblock structure at a time. This modification
1608 * is protected by a lock internal to this module. The fields and
1609 * changes to those fields are specified in the array of xfs_mod_sb
1610 * structures passed in.
1612 * Either all of the specified deltas will be applied or none of
1613 * them will. If any modified field dips below 0, then all modifications
1614 * will be backed out and EINVAL will be returned.
1617 xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
1624 * Loop through the array of mod structures and apply each
1625 * individually. If any fail, then back out all those
1626 * which have already been applied. Do all of this within
1627 * the scope of the SB_LOCK so that all of the changes will
1630 s = XFS_SB_LOCK(mp);
1632 for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) {
1634 * Apply the delta at index n. If it fails, break
1635 * from the loop so we'll fall into the undo loop
1638 switch (msbp->msb_field) {
1639 #ifdef HAVE_PERCPU_SB
1640 case XFS_SBS_ICOUNT:
1642 case XFS_SBS_FDBLOCKS:
1643 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1644 XFS_SB_UNLOCK(mp, s);
1645 status = xfs_icsb_modify_counters(mp,
1647 msbp->msb_delta, rsvd);
1648 s = XFS_SB_LOCK(mp);
1654 status = xfs_mod_incore_sb_unlocked(mp,
1656 msbp->msb_delta, rsvd);
1666 * If we didn't complete the loop above, then back out
1667 * any changes made to the superblock. If you add code
1668 * between the loop above and here, make sure that you
1669 * preserve the value of status. Loop back until
1670 * we step below the beginning of the array. Make sure
1671 * we don't touch anything back there.
1675 while (msbp >= msb) {
1676 switch (msbp->msb_field) {
1677 #ifdef HAVE_PERCPU_SB
1678 case XFS_SBS_ICOUNT:
1680 case XFS_SBS_FDBLOCKS:
1681 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1682 XFS_SB_UNLOCK(mp, s);
1683 status = xfs_icsb_modify_counters(mp,
1687 s = XFS_SB_LOCK(mp);
1693 status = xfs_mod_incore_sb_unlocked(mp,
1699 ASSERT(status == 0);
1703 XFS_SB_UNLOCK(mp, s);
1708 * xfs_getsb() is called to obtain the buffer for the superblock.
1709 * The buffer is returned locked and read in from disk.
1710 * The buffer should be released with a call to xfs_brelse().
1712 * If the flags parameter is BUF_TRYLOCK, then we'll only return
1713 * the superblock buffer if it can be locked without sleeping.
1714 * If it can't then we'll return NULL.
1723 ASSERT(mp->m_sb_bp != NULL);
1725 if (flags & XFS_BUF_TRYLOCK) {
1726 if (!XFS_BUF_CPSEMA(bp)) {
1730 XFS_BUF_PSEMA(bp, PRIBIO);
1733 ASSERT(XFS_BUF_ISDONE(bp));
1738 * Used to free the superblock along various error paths.
1747 * Use xfs_getsb() so that the buffer will be locked
1748 * when we call xfs_buf_relse().
1750 bp = xfs_getsb(mp, 0);
1751 XFS_BUF_UNMANAGE(bp);
1757 * See if the UUID is unique among mounted XFS filesystems.
1758 * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
1764 if (uuid_is_nil(&mp->m_sb.sb_uuid)) {
1766 "XFS: Filesystem %s has nil UUID - can't mount",
1770 if (!uuid_table_insert(&mp->m_sb.sb_uuid)) {
1772 "XFS: Filesystem %s has duplicate UUID - can't mount",
1780 * Remove filesystem from the UUID table.
1786 uuid_table_remove(&mp->m_sb.sb_uuid);
1790 * Used to log changes to the superblock unit and width fields which could
1791 * be altered by the mount options. Only the first superblock is updated.
1794 xfs_mount_log_sbunit(
1800 ASSERT(fields & (XFS_SB_UNIT|XFS_SB_WIDTH|XFS_SB_UUID));
1802 tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
1803 if (xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
1804 XFS_DEFAULT_LOG_COUNT)) {
1805 xfs_trans_cancel(tp, 0);
1808 xfs_mod_sb(tp, fields);
1809 xfs_trans_commit(tp, 0);
1813 #ifdef HAVE_PERCPU_SB
1815 * Per-cpu incore superblock counters
1817 * Simple concept, difficult implementation
1819 * Basically, replace the incore superblock counters with a distributed per cpu
1820 * counter for contended fields (e.g. free block count).
1822 * Difficulties arise in that the incore sb is used for ENOSPC checking, and
1823 * hence needs to be accurately read when we are running low on space. Hence
1824 * there is a method to enable and disable the per-cpu counters based on how
1825 * much "stuff" is available in them.
1827 * Basically, a counter is enabled if there is enough free resource to justify
1828 * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local
1829 * ENOSPC), then we disable the counters to synchronise all callers and
1830 * re-distribute the available resources.
1832 * If, once we redistributed the available resources, we still get a failure,
1833 * we disable the per-cpu counter and go through the slow path.
1835 * The slow path is the current xfs_mod_incore_sb() function. This means that
1836 * when we disable a per-cpu counter, we need to drain it's resources back to
1837 * the global superblock. We do this after disabling the counter to prevent
1838 * more threads from queueing up on the counter.
1840 * Essentially, this means that we still need a lock in the fast path to enable
1841 * synchronisation between the global counters and the per-cpu counters. This
1842 * is not a problem because the lock will be local to a CPU almost all the time
1843 * and have little contention except when we get to ENOSPC conditions.
1845 * Basically, this lock becomes a barrier that enables us to lock out the fast
1846 * path while we do things like enabling and disabling counters and
1847 * synchronising the counters.
1851 * 1. XFS_SB_LOCK() before picking up per-cpu locks
1852 * 2. per-cpu locks always picked up via for_each_online_cpu() order
1853 * 3. accurate counter sync requires XFS_SB_LOCK + per cpu locks
1854 * 4. modifying per-cpu counters requires holding per-cpu lock
1855 * 5. modifying global counters requires holding XFS_SB_LOCK
1856 * 6. enabling or disabling a counter requires holding the XFS_SB_LOCK
1857 * and _none_ of the per-cpu locks.
1859 * Disabled counters are only ever re-enabled by a balance operation
1860 * that results in more free resources per CPU than a given threshold.
1861 * To ensure counters don't remain disabled, they are rebalanced when
1862 * the global resource goes above a higher threshold (i.e. some hysteresis
1863 * is present to prevent thrashing).
1866 #ifdef CONFIG_HOTPLUG_CPU
1868 * hot-plug CPU notifier support.
1870 * We need a notifier per filesystem as we need to be able to identify
1871 * the filesystem to balance the counters out. This is achieved by
1872 * having a notifier block embedded in the xfs_mount_t and doing pointer
1873 * magic to get the mount pointer from the notifier block address.
1876 xfs_icsb_cpu_notify(
1877 struct notifier_block *nfb,
1878 unsigned long action,
1881 xfs_icsb_cnts_t *cntp;
1885 mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier);
1886 cntp = (xfs_icsb_cnts_t *)
1887 per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu);
1889 case CPU_UP_PREPARE:
1890 case CPU_UP_PREPARE_FROZEN:
1891 /* Easy Case - initialize the area and locks, and
1892 * then rebalance when online does everything else for us. */
1893 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1896 case CPU_ONLINE_FROZEN:
1898 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
1899 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
1900 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
1901 xfs_icsb_unlock(mp);
1904 case CPU_DEAD_FROZEN:
1905 /* Disable all the counters, then fold the dead cpu's
1906 * count into the total on the global superblock and
1907 * re-enable the counters. */
1909 s = XFS_SB_LOCK(mp);
1910 xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT);
1911 xfs_icsb_disable_counter(mp, XFS_SBS_IFREE);
1912 xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS);
1914 mp->m_sb.sb_icount += cntp->icsb_icount;
1915 mp->m_sb.sb_ifree += cntp->icsb_ifree;
1916 mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks;
1918 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1920 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT,
1921 XFS_ICSB_SB_LOCKED, 0);
1922 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE,
1923 XFS_ICSB_SB_LOCKED, 0);
1924 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS,
1925 XFS_ICSB_SB_LOCKED, 0);
1926 XFS_SB_UNLOCK(mp, s);
1927 xfs_icsb_unlock(mp);
1933 #endif /* CONFIG_HOTPLUG_CPU */
1936 xfs_icsb_init_counters(
1939 xfs_icsb_cnts_t *cntp;
1942 mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t);
1943 if (mp->m_sb_cnts == NULL)
1946 #ifdef CONFIG_HOTPLUG_CPU
1947 mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify;
1948 mp->m_icsb_notifier.priority = 0;
1949 register_hotcpu_notifier(&mp->m_icsb_notifier);
1950 #endif /* CONFIG_HOTPLUG_CPU */
1952 for_each_online_cpu(i) {
1953 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1954 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1957 mutex_init(&mp->m_icsb_mutex);
1960 * start with all counters disabled so that the
1961 * initial balance kicks us off correctly
1963 mp->m_icsb_counters = -1;
1968 xfs_icsb_reinit_counters(
1973 * start with all counters disabled so that the
1974 * initial balance kicks us off correctly
1976 mp->m_icsb_counters = -1;
1977 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
1978 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
1979 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
1980 xfs_icsb_unlock(mp);
1984 xfs_icsb_destroy_counters(
1987 if (mp->m_sb_cnts) {
1988 unregister_hotcpu_notifier(&mp->m_icsb_notifier);
1989 free_percpu(mp->m_sb_cnts);
1991 mutex_destroy(&mp->m_icsb_mutex);
1996 xfs_icsb_cnts_t *icsbp)
1998 while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) {
2004 xfs_icsb_unlock_cntr(
2005 xfs_icsb_cnts_t *icsbp)
2007 clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags);
2012 xfs_icsb_lock_all_counters(
2015 xfs_icsb_cnts_t *cntp;
2018 for_each_online_cpu(i) {
2019 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
2020 xfs_icsb_lock_cntr(cntp);
2025 xfs_icsb_unlock_all_counters(
2028 xfs_icsb_cnts_t *cntp;
2031 for_each_online_cpu(i) {
2032 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
2033 xfs_icsb_unlock_cntr(cntp);
2040 xfs_icsb_cnts_t *cnt,
2043 xfs_icsb_cnts_t *cntp;
2046 memset(cnt, 0, sizeof(xfs_icsb_cnts_t));
2048 if (!(flags & XFS_ICSB_LAZY_COUNT))
2049 xfs_icsb_lock_all_counters(mp);
2051 for_each_online_cpu(i) {
2052 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
2053 cnt->icsb_icount += cntp->icsb_icount;
2054 cnt->icsb_ifree += cntp->icsb_ifree;
2055 cnt->icsb_fdblocks += cntp->icsb_fdblocks;
2058 if (!(flags & XFS_ICSB_LAZY_COUNT))
2059 xfs_icsb_unlock_all_counters(mp);
2063 xfs_icsb_counter_disabled(
2065 xfs_sb_field_t field)
2067 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
2068 return test_bit(field, &mp->m_icsb_counters);
2072 xfs_icsb_disable_counter(
2074 xfs_sb_field_t field)
2076 xfs_icsb_cnts_t cnt;
2078 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
2081 * If we are already disabled, then there is nothing to do
2082 * here. We check before locking all the counters to avoid
2083 * the expensive lock operation when being called in the
2084 * slow path and the counter is already disabled. This is
2085 * safe because the only time we set or clear this state is under
2088 if (xfs_icsb_counter_disabled(mp, field))
2091 xfs_icsb_lock_all_counters(mp);
2092 if (!test_and_set_bit(field, &mp->m_icsb_counters)) {
2093 /* drain back to superblock */
2095 xfs_icsb_count(mp, &cnt, XFS_ICSB_SB_LOCKED|XFS_ICSB_LAZY_COUNT);
2097 case XFS_SBS_ICOUNT:
2098 mp->m_sb.sb_icount = cnt.icsb_icount;
2101 mp->m_sb.sb_ifree = cnt.icsb_ifree;
2103 case XFS_SBS_FDBLOCKS:
2104 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
2111 xfs_icsb_unlock_all_counters(mp);
2117 xfs_icsb_enable_counter(
2119 xfs_sb_field_t field,
2123 xfs_icsb_cnts_t *cntp;
2126 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
2128 xfs_icsb_lock_all_counters(mp);
2129 for_each_online_cpu(i) {
2130 cntp = per_cpu_ptr(mp->m_sb_cnts, i);
2132 case XFS_SBS_ICOUNT:
2133 cntp->icsb_icount = count + resid;
2136 cntp->icsb_ifree = count + resid;
2138 case XFS_SBS_FDBLOCKS:
2139 cntp->icsb_fdblocks = count + resid;
2147 clear_bit(field, &mp->m_icsb_counters);
2148 xfs_icsb_unlock_all_counters(mp);
2152 xfs_icsb_sync_counters_flags(
2156 xfs_icsb_cnts_t cnt;
2159 /* Pass 1: lock all counters */
2160 if ((flags & XFS_ICSB_SB_LOCKED) == 0)
2161 s = XFS_SB_LOCK(mp);
2163 xfs_icsb_count(mp, &cnt, flags);
2165 /* Step 3: update mp->m_sb fields */
2166 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT))
2167 mp->m_sb.sb_icount = cnt.icsb_icount;
2168 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE))
2169 mp->m_sb.sb_ifree = cnt.icsb_ifree;
2170 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS))
2171 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
2173 if ((flags & XFS_ICSB_SB_LOCKED) == 0)
2174 XFS_SB_UNLOCK(mp, s);
2178 * Accurate update of per-cpu counters to incore superblock
2181 xfs_icsb_sync_counters(
2184 xfs_icsb_sync_counters_flags(mp, 0);
2188 * Balance and enable/disable counters as necessary.
2190 * Thresholds for re-enabling counters are somewhat magic. inode counts are
2191 * chosen to be the same number as single on disk allocation chunk per CPU, and
2192 * free blocks is something far enough zero that we aren't going thrash when we
2193 * get near ENOSPC. We also need to supply a minimum we require per cpu to
2194 * prevent looping endlessly when xfs_alloc_space asks for more than will
2195 * be distributed to a single CPU but each CPU has enough blocks to be
2198 * Note that we can be called when counters are already disabled.
2199 * xfs_icsb_disable_counter() optimises the counter locking in this case to
2200 * prevent locking every per-cpu counter needlessly.
2203 #define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64
2204 #define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
2205 (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
2207 xfs_icsb_balance_counter(
2209 xfs_sb_field_t field,
2213 uint64_t count, resid;
2214 int weight = num_online_cpus();
2216 uint64_t min = (uint64_t)min_per_cpu;
2218 if (!(flags & XFS_ICSB_SB_LOCKED))
2219 s = XFS_SB_LOCK(mp);
2221 /* disable counter and sync counter */
2222 xfs_icsb_disable_counter(mp, field);
2224 /* update counters - first CPU gets residual*/
2226 case XFS_SBS_ICOUNT:
2227 count = mp->m_sb.sb_icount;
2228 resid = do_div(count, weight);
2229 if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
2233 count = mp->m_sb.sb_ifree;
2234 resid = do_div(count, weight);
2235 if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
2238 case XFS_SBS_FDBLOCKS:
2239 count = mp->m_sb.sb_fdblocks;
2240 resid = do_div(count, weight);
2241 if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp)))
2246 count = resid = 0; /* quiet, gcc */
2250 xfs_icsb_enable_counter(mp, field, count, resid);
2252 if (!(flags & XFS_ICSB_SB_LOCKED))
2253 XFS_SB_UNLOCK(mp, s);
2257 xfs_icsb_modify_counters(
2259 xfs_sb_field_t field,
2263 xfs_icsb_cnts_t *icsbp;
2264 long long lcounter; /* long counter for 64 bit fields */
2265 int cpu, ret = 0, s;
2270 icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu);
2273 * if the counter is disabled, go to slow path
2275 if (unlikely(xfs_icsb_counter_disabled(mp, field)))
2277 xfs_icsb_lock_cntr(icsbp);
2278 if (unlikely(xfs_icsb_counter_disabled(mp, field))) {
2279 xfs_icsb_unlock_cntr(icsbp);
2284 case XFS_SBS_ICOUNT:
2285 lcounter = icsbp->icsb_icount;
2287 if (unlikely(lcounter < 0))
2288 goto balance_counter;
2289 icsbp->icsb_icount = lcounter;
2293 lcounter = icsbp->icsb_ifree;
2295 if (unlikely(lcounter < 0))
2296 goto balance_counter;
2297 icsbp->icsb_ifree = lcounter;
2300 case XFS_SBS_FDBLOCKS:
2301 BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0);
2303 lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
2305 if (unlikely(lcounter < 0))
2306 goto balance_counter;
2307 icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
2313 xfs_icsb_unlock_cntr(icsbp);
2321 * serialise with a mutex so we don't burn lots of cpu on
2322 * the superblock lock. We still need to hold the superblock
2323 * lock, however, when we modify the global structures.
2328 * Now running atomically.
2330 * If the counter is enabled, someone has beaten us to rebalancing.
2331 * Drop the lock and try again in the fast path....
2333 if (!(xfs_icsb_counter_disabled(mp, field))) {
2334 xfs_icsb_unlock(mp);
2339 * The counter is currently disabled. Because we are
2340 * running atomically here, we know a rebalance cannot
2341 * be in progress. Hence we can go straight to operating
2342 * on the global superblock. We do not call xfs_mod_incore_sb()
2343 * here even though we need to get the SB_LOCK. Doing so
2344 * will cause us to re-enter this function and deadlock.
2345 * Hence we get the SB_LOCK ourselves and then call
2346 * xfs_mod_incore_sb_unlocked() as the unlocked path operates
2347 * directly on the global counters.
2349 s = XFS_SB_LOCK(mp);
2350 ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
2351 XFS_SB_UNLOCK(mp, s);
2354 * Now that we've modified the global superblock, we
2355 * may be able to re-enable the distributed counters
2356 * (e.g. lots of space just got freed). After that
2360 xfs_icsb_balance_counter(mp, field, 0, 0);
2361 xfs_icsb_unlock(mp);
2365 xfs_icsb_unlock_cntr(icsbp);
2369 * We may have multiple threads here if multiple per-cpu
2370 * counters run dry at the same time. This will mean we can
2371 * do more balances than strictly necessary but it is not
2372 * the common slowpath case.
2377 * running atomically.
2379 * This will leave the counter in the correct state for future
2380 * accesses. After the rebalance, we simply try again and our retry
2381 * will either succeed through the fast path or slow path without
2382 * another balance operation being required.
2384 xfs_icsb_balance_counter(mp, field, 0, delta);
2385 xfs_icsb_unlock(mp);