2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
25 #include "xfs_trans.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dinode.h"
33 #include "xfs_inode.h"
34 #include "xfs_btree.h"
35 #include "xfs_ialloc.h"
36 #include "xfs_quota.h"
37 #include "xfs_utils.h"
38 #include "xfs_trans_priv.h"
39 #include "xfs_inode_item.h"
41 #include "xfs_btree_trace.h"
42 #include "xfs_trace.h"
46 * Define xfs inode iolock lockdep classes. We need to ensure that all active
47 * inodes are considered the same for lockdep purposes, including inodes that
48 * are recycled through the XFS_IRECLAIMABLE state. This is the the only way to
49 * guarantee the locks are considered the same when there are multiple lock
50 * initialisation siteѕ. Also, define a reclaimable inode class so it is
51 * obvious in lockdep reports which class the report is against.
53 static struct lock_class_key xfs_iolock_active;
54 struct lock_class_key xfs_iolock_reclaimable;
57 * Allocate and initialise an xfs_inode.
59 STATIC struct xfs_inode *
67 * if this didn't occur in transactions, we could use
68 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
69 * code up to do this anyway.
71 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
74 if (inode_init_always(mp->m_super, VFS_I(ip))) {
75 kmem_zone_free(xfs_inode_zone, ip);
79 ASSERT(atomic_read(&ip->i_iocount) == 0);
80 ASSERT(atomic_read(&ip->i_pincount) == 0);
81 ASSERT(!spin_is_locked(&ip->i_flags_lock));
82 ASSERT(completion_done(&ip->i_flush));
84 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
85 lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
86 &xfs_iolock_active, "xfs_iolock_active");
88 /* initialise the xfs inode */
91 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
93 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
95 ip->i_update_core = 0;
96 ip->i_delayed_blks = 0;
97 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
101 /* prevent anyone from using this yet */
102 VFS_I(ip)->i_state = I_NEW;
109 struct rcu_head *head)
111 struct inode *inode = container_of((void *)head,
112 struct inode, i_dentry);
113 struct xfs_inode *ip = XFS_I(inode);
115 INIT_LIST_HEAD(&inode->i_dentry);
116 kmem_zone_free(xfs_inode_zone, ip);
121 struct xfs_inode *ip)
123 switch (ip->i_d.di_mode & S_IFMT) {
127 xfs_idestroy_fork(ip, XFS_DATA_FORK);
132 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
136 * Only if we are shutting down the fs will we see an
137 * inode still in the AIL. If it is there, we should remove
138 * it to prevent a use-after-free from occurring.
140 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
141 struct xfs_ail *ailp = lip->li_ailp;
143 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
144 XFS_FORCED_SHUTDOWN(ip->i_mount));
145 if (lip->li_flags & XFS_LI_IN_AIL) {
146 spin_lock(&ailp->xa_lock);
147 if (lip->li_flags & XFS_LI_IN_AIL)
148 xfs_trans_ail_delete(ailp, lip);
150 spin_unlock(&ailp->xa_lock);
152 xfs_inode_item_destroy(ip);
156 /* asserts to verify all state is correct here */
157 ASSERT(atomic_read(&ip->i_iocount) == 0);
158 ASSERT(atomic_read(&ip->i_pincount) == 0);
159 ASSERT(!spin_is_locked(&ip->i_flags_lock));
160 ASSERT(completion_done(&ip->i_flush));
162 call_rcu((struct rcu_head *)&VFS_I(ip)->i_dentry, __xfs_inode_free);
166 * Check the validity of the inode we just found it the cache
170 struct xfs_perag *pag,
171 struct xfs_inode *ip,
173 int lock_flags) __releases(pag->pag_ici_lock)
175 struct inode *inode = VFS_I(ip);
176 struct xfs_mount *mp = ip->i_mount;
179 spin_lock(&ip->i_flags_lock);
182 * If we are racing with another cache hit that is currently
183 * instantiating this inode or currently recycling it out of
184 * reclaimabe state, wait for the initialisation to complete
187 * XXX(hch): eventually we should do something equivalent to
188 * wait_on_inode to wait for these flags to be cleared
189 * instead of polling for it.
191 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
192 trace_xfs_iget_skip(ip);
193 XFS_STATS_INC(xs_ig_frecycle);
199 * If lookup is racing with unlink return an error immediately.
201 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
207 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
208 * Need to carefully get it back into useable state.
210 if (ip->i_flags & XFS_IRECLAIMABLE) {
211 trace_xfs_iget_reclaim(ip);
214 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
215 * from stomping over us while we recycle the inode. We can't
216 * clear the radix tree reclaimable tag yet as it requires
217 * pag_ici_lock to be held exclusive.
219 ip->i_flags |= XFS_IRECLAIM;
221 spin_unlock(&ip->i_flags_lock);
222 read_unlock(&pag->pag_ici_lock);
224 error = -inode_init_always(mp->m_super, inode);
227 * Re-initializing the inode failed, and we are in deep
228 * trouble. Try to re-add it to the reclaim list.
230 read_lock(&pag->pag_ici_lock);
231 spin_lock(&ip->i_flags_lock);
233 ip->i_flags &= ~XFS_INEW;
234 ip->i_flags |= XFS_IRECLAIMABLE;
235 __xfs_inode_set_reclaim_tag(pag, ip);
236 trace_xfs_iget_reclaim_fail(ip);
240 write_lock(&pag->pag_ici_lock);
241 spin_lock(&ip->i_flags_lock);
242 ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM);
243 ip->i_flags |= XFS_INEW;
244 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
245 inode->i_state = I_NEW;
247 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
248 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
249 lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
250 &xfs_iolock_active, "xfs_iolock_active");
252 spin_unlock(&ip->i_flags_lock);
253 write_unlock(&pag->pag_ici_lock);
255 /* If the VFS inode is being torn down, pause and try again. */
257 trace_xfs_iget_skip(ip);
262 /* We've got a live one. */
263 spin_unlock(&ip->i_flags_lock);
264 read_unlock(&pag->pag_ici_lock);
265 trace_xfs_iget_hit(ip);
269 xfs_ilock(ip, lock_flags);
271 xfs_iflags_clear(ip, XFS_ISTALE);
272 XFS_STATS_INC(xs_ig_found);
277 spin_unlock(&ip->i_flags_lock);
278 read_unlock(&pag->pag_ici_lock);
285 struct xfs_mount *mp,
286 struct xfs_perag *pag,
289 struct xfs_inode **ipp,
293 struct xfs_inode *ip;
295 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
297 ip = xfs_inode_alloc(mp, ino);
301 error = xfs_iread(mp, tp, ip, flags);
305 trace_xfs_iget_miss(ip);
307 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
313 * Preload the radix tree so we can insert safely under the
314 * write spinlock. Note that we cannot sleep inside the preload
317 if (radix_tree_preload(GFP_KERNEL)) {
323 * Because the inode hasn't been added to the radix-tree yet it can't
324 * be found by another thread, so we can do the non-sleeping lock here.
327 if (!xfs_ilock_nowait(ip, lock_flags))
331 write_lock(&pag->pag_ici_lock);
333 /* insert the new inode */
334 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
335 if (unlikely(error)) {
336 WARN_ON(error != -EEXIST);
337 XFS_STATS_INC(xs_ig_dup);
339 goto out_preload_end;
342 /* These values _must_ be set before releasing the radix tree lock! */
343 ip->i_udquot = ip->i_gdquot = NULL;
344 xfs_iflags_set(ip, XFS_INEW);
346 write_unlock(&pag->pag_ici_lock);
347 radix_tree_preload_end();
353 write_unlock(&pag->pag_ici_lock);
354 radix_tree_preload_end();
356 xfs_iunlock(ip, lock_flags);
358 __destroy_inode(VFS_I(ip));
364 * Look up an inode by number in the given file system.
365 * The inode is looked up in the cache held in each AG.
366 * If the inode is found in the cache, initialise the vfs inode
369 * If it is not in core, read it in from the file system's device,
370 * add it to the cache and initialise the vfs inode.
372 * The inode is locked according to the value of the lock_flags parameter.
373 * This flag parameter indicates how and if the inode's IO lock and inode lock
376 * mp -- the mount point structure for the current file system. It points
377 * to the inode hash table.
378 * tp -- a pointer to the current transaction if there is one. This is
379 * simply passed through to the xfs_iread() call.
380 * ino -- the number of the inode desired. This is the unique identifier
381 * within the file system for the inode being requested.
382 * lock_flags -- flags indicating how to lock the inode. See the comment
383 * for xfs_ilock() for a list of valid values.
399 /* reject inode numbers outside existing AGs */
400 if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
403 /* get the perag structure and ensure that it's inode capable */
404 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
405 agino = XFS_INO_TO_AGINO(mp, ino);
409 read_lock(&pag->pag_ici_lock);
410 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
413 error = xfs_iget_cache_hit(pag, ip, flags, lock_flags);
415 goto out_error_or_again;
417 read_unlock(&pag->pag_ici_lock);
418 XFS_STATS_INC(xs_ig_missed);
420 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
423 goto out_error_or_again;
429 ASSERT(ip->i_df.if_ext_max ==
430 XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
432 * If we have a real type for an on-disk inode, we can set ops(&unlock)
433 * now. If it's a new inode being created, xfs_ialloc will handle it.
435 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
440 if (error == EAGAIN) {
449 * This is a wrapper routine around the xfs_ilock() routine
450 * used to centralize some grungy code. It is used in places
451 * that wish to lock the inode solely for reading the extents.
452 * The reason these places can't just call xfs_ilock(SHARED)
453 * is that the inode lock also guards to bringing in of the
454 * extents from disk for a file in b-tree format. If the inode
455 * is in b-tree format, then we need to lock the inode exclusively
456 * until the extents are read in. Locking it exclusively all
457 * the time would limit our parallelism unnecessarily, though.
458 * What we do instead is check to see if the extents have been
459 * read in yet, and only lock the inode exclusively if they
462 * The function returns a value which should be given to the
463 * corresponding xfs_iunlock_map_shared(). This value is
464 * the mode in which the lock was actually taken.
467 xfs_ilock_map_shared(
472 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
473 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
474 lock_mode = XFS_ILOCK_EXCL;
476 lock_mode = XFS_ILOCK_SHARED;
479 xfs_ilock(ip, lock_mode);
485 * This is simply the unlock routine to go with xfs_ilock_map_shared().
486 * All it does is call xfs_iunlock() with the given lock_mode.
489 xfs_iunlock_map_shared(
491 unsigned int lock_mode)
493 xfs_iunlock(ip, lock_mode);
497 * The xfs inode contains 2 locks: a multi-reader lock called the
498 * i_iolock and a multi-reader lock called the i_lock. This routine
499 * allows either or both of the locks to be obtained.
501 * The 2 locks should always be ordered so that the IO lock is
502 * obtained first in order to prevent deadlock.
504 * ip -- the inode being locked
505 * lock_flags -- this parameter indicates the inode's locks
506 * to be locked. It can be:
511 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
512 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
513 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
514 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
522 * You can't set both SHARED and EXCL for the same lock,
523 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
524 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
526 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
527 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
528 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
529 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
530 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
532 if (lock_flags & XFS_IOLOCK_EXCL)
533 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
534 else if (lock_flags & XFS_IOLOCK_SHARED)
535 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
537 if (lock_flags & XFS_ILOCK_EXCL)
538 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
539 else if (lock_flags & XFS_ILOCK_SHARED)
540 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
542 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
546 * This is just like xfs_ilock(), except that the caller
547 * is guaranteed not to sleep. It returns 1 if it gets
548 * the requested locks and 0 otherwise. If the IO lock is
549 * obtained but the inode lock cannot be, then the IO lock
550 * is dropped before returning.
552 * ip -- the inode being locked
553 * lock_flags -- this parameter indicates the inode's locks to be
554 * to be locked. See the comment for xfs_ilock() for a list
563 * You can't set both SHARED and EXCL for the same lock,
564 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
565 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
567 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
568 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
569 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
570 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
571 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
573 if (lock_flags & XFS_IOLOCK_EXCL) {
574 if (!mrtryupdate(&ip->i_iolock))
576 } else if (lock_flags & XFS_IOLOCK_SHARED) {
577 if (!mrtryaccess(&ip->i_iolock))
580 if (lock_flags & XFS_ILOCK_EXCL) {
581 if (!mrtryupdate(&ip->i_lock))
582 goto out_undo_iolock;
583 } else if (lock_flags & XFS_ILOCK_SHARED) {
584 if (!mrtryaccess(&ip->i_lock))
585 goto out_undo_iolock;
587 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
591 if (lock_flags & XFS_IOLOCK_EXCL)
592 mrunlock_excl(&ip->i_iolock);
593 else if (lock_flags & XFS_IOLOCK_SHARED)
594 mrunlock_shared(&ip->i_iolock);
600 * xfs_iunlock() is used to drop the inode locks acquired with
601 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
602 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
603 * that we know which locks to drop.
605 * ip -- the inode being unlocked
606 * lock_flags -- this parameter indicates the inode's locks to be
607 * to be unlocked. See the comment for xfs_ilock() for a list
608 * of valid values for this parameter.
617 * You can't set both SHARED and EXCL for the same lock,
618 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
619 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
621 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
622 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
623 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
624 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
625 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY |
626 XFS_LOCK_DEP_MASK)) == 0);
627 ASSERT(lock_flags != 0);
629 if (lock_flags & XFS_IOLOCK_EXCL)
630 mrunlock_excl(&ip->i_iolock);
631 else if (lock_flags & XFS_IOLOCK_SHARED)
632 mrunlock_shared(&ip->i_iolock);
634 if (lock_flags & XFS_ILOCK_EXCL)
635 mrunlock_excl(&ip->i_lock);
636 else if (lock_flags & XFS_ILOCK_SHARED)
637 mrunlock_shared(&ip->i_lock);
639 if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) &&
640 !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) {
642 * Let the AIL know that this item has been unlocked in case
643 * it is in the AIL and anyone is waiting on it. Don't do
644 * this if the caller has asked us not to.
646 xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp,
647 (xfs_log_item_t*)(ip->i_itemp));
649 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
653 * give up write locks. the i/o lock cannot be held nested
654 * if it is being demoted.
661 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
662 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
664 if (lock_flags & XFS_ILOCK_EXCL)
665 mrdemote(&ip->i_lock);
666 if (lock_flags & XFS_IOLOCK_EXCL)
667 mrdemote(&ip->i_iolock);
669 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
678 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
679 if (!(lock_flags & XFS_ILOCK_SHARED))
680 return !!ip->i_lock.mr_writer;
681 return rwsem_is_locked(&ip->i_lock.mr_lock);
684 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
685 if (!(lock_flags & XFS_IOLOCK_SHARED))
686 return !!ip->i_iolock.mr_writer;
687 return rwsem_is_locked(&ip->i_iolock.mr_lock);