]> Git Repo - J-linux.git/blob - fs/xfs/xfs_drain.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / fs / xfs / xfs_drain.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2022-2023 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <[email protected]>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_ag.h"
13 #include "xfs_trace.h"
14
15 /*
16  * Use a static key here to reduce the overhead of xfs_drain_rele.  If the
17  * compiler supports jump labels, the static branch will be replaced by a nop
18  * sled when there are no xfs_drain_wait callers.  Online fsck is currently
19  * the only caller, so this is a reasonable tradeoff.
20  *
21  * Note: Patching the kernel code requires taking the cpu hotplug lock.  Other
22  * parts of the kernel allocate memory with that lock held, which means that
23  * XFS callers cannot hold any locks that might be used by memory reclaim or
24  * writeback when calling the static_branch_{inc,dec} functions.
25  */
26 static DEFINE_STATIC_KEY_FALSE(xfs_drain_waiter_gate);
27
28 void
29 xfs_drain_wait_disable(void)
30 {
31         static_branch_dec(&xfs_drain_waiter_gate);
32 }
33
34 void
35 xfs_drain_wait_enable(void)
36 {
37         static_branch_inc(&xfs_drain_waiter_gate);
38 }
39
40 void
41 xfs_defer_drain_init(
42         struct xfs_defer_drain  *dr)
43 {
44         atomic_set(&dr->dr_count, 0);
45         init_waitqueue_head(&dr->dr_waiters);
46 }
47
48 void
49 xfs_defer_drain_free(struct xfs_defer_drain     *dr)
50 {
51         ASSERT(atomic_read(&dr->dr_count) == 0);
52 }
53
54 /* Increase the pending intent count. */
55 static inline void xfs_defer_drain_grab(struct xfs_defer_drain *dr)
56 {
57         atomic_inc(&dr->dr_count);
58 }
59
60 static inline bool has_waiters(struct wait_queue_head *wq_head)
61 {
62         /*
63          * This memory barrier is paired with the one in set_current_state on
64          * the waiting side.
65          */
66         smp_mb__after_atomic();
67         return waitqueue_active(wq_head);
68 }
69
70 /* Decrease the pending intent count, and wake any waiters, if appropriate. */
71 static inline void xfs_defer_drain_rele(struct xfs_defer_drain *dr)
72 {
73         if (atomic_dec_and_test(&dr->dr_count) &&
74             static_branch_unlikely(&xfs_drain_waiter_gate) &&
75             has_waiters(&dr->dr_waiters))
76                 wake_up(&dr->dr_waiters);
77 }
78
79 /* Are there intents pending? */
80 static inline bool xfs_defer_drain_busy(struct xfs_defer_drain *dr)
81 {
82         return atomic_read(&dr->dr_count) > 0;
83 }
84
85 /*
86  * Wait for the pending intent count for a drain to hit zero.
87  *
88  * Callers must not hold any locks that would prevent intents from being
89  * finished.
90  */
91 static inline int xfs_defer_drain_wait(struct xfs_defer_drain *dr)
92 {
93         return wait_event_killable(dr->dr_waiters, !xfs_defer_drain_busy(dr));
94 }
95
96 /*
97  * Get a passive reference to the group that contains a fsbno and declare an
98  * intent to update its metadata.
99  *
100  * Other threads that need exclusive access can decide to back off if they see
101  * declared intentions.
102  */
103 struct xfs_group *
104 xfs_group_intent_get(
105         struct xfs_mount        *mp,
106         xfs_fsblock_t           fsbno,
107         enum xfs_group_type     type)
108 {
109         struct xfs_group        *xg;
110
111         xg = xfs_group_get_by_fsb(mp, fsbno, type);
112         if (!xg)
113                 return NULL;
114         trace_xfs_group_intent_hold(xg, __return_address);
115         xfs_defer_drain_grab(&xg->xg_intents_drain);
116         return xg;
117 }
118
119 /*
120  * Release our intent to update this groups metadata, and then release our
121  * passive ref to it.
122  */
123 void
124 xfs_group_intent_put(
125         struct xfs_group        *xg)
126 {
127         trace_xfs_group_intent_rele(xg, __return_address);
128         xfs_defer_drain_rele(&xg->xg_intents_drain);
129         xfs_group_put(xg);
130 }
131
132 /*
133  * Wait for the intent update count for this AG to hit zero.
134  * Callers must not hold any AG header buffers.
135  */
136 int
137 xfs_group_intent_drain(
138         struct xfs_group        *xg)
139 {
140         trace_xfs_group_wait_intents(xg, __return_address);
141         return xfs_defer_drain_wait(&xg->xg_intents_drain);
142 }
143
144 /*
145  * Has anyone declared an intent to update this group?
146  */
147 bool
148 xfs_group_intent_busy(
149         struct xfs_group        *xg)
150 {
151         return xfs_defer_drain_busy(&xg->xg_intents_drain);
152 }
This page took 0.035247 seconds and 4 git commands to generate.