]>
Commit | Line | Data |
---|---|---|
5444e298 EP |
1 | /* |
2 | * Copyright (C) 2008 Red Hat, Inc., Eric Paris <[email protected]> | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2, or (at your option) | |
7 | * any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; see the file COPYING. If not, write to | |
16 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
17 | */ | |
18 | ||
19 | /* | |
20 | * fsnotify inode mark locking/lifetime/and refcnting | |
21 | * | |
22 | * REFCNT: | |
9756b918 LS |
23 | * The group->recnt and mark->refcnt tell how many "things" in the kernel |
24 | * currently are referencing the objects. Both kind of objects typically will | |
25 | * live inside the kernel with a refcnt of 2, one for its creation and one for | |
26 | * the reference a group and a mark hold to each other. | |
27 | * If you are holding the appropriate locks, you can take a reference and the | |
28 | * object itself is guaranteed to survive until the reference is dropped. | |
5444e298 EP |
29 | * |
30 | * LOCKING: | |
9756b918 LS |
31 | * There are 3 locks involved with fsnotify inode marks and they MUST be taken |
32 | * in order as follows: | |
5444e298 | 33 | * |
9756b918 | 34 | * group->mark_mutex |
5444e298 | 35 | * mark->lock |
5444e298 EP |
36 | * inode->i_lock |
37 | * | |
9756b918 LS |
38 | * group->mark_mutex protects the marks_list anchored inside a given group and |
39 | * each mark is hooked via the g_list. It also protects the groups private | |
40 | * data (i.e group limits). | |
41 | ||
42 | * mark->lock protects the marks attributes like its masks and flags. | |
43 | * Furthermore it protects the access to a reference of the group that the mark | |
44 | * is assigned to as well as the access to a reference of the inode/vfsmount | |
45 | * that is being watched by the mark. | |
5444e298 EP |
46 | * |
47 | * inode->i_lock protects the i_fsnotify_marks list anchored inside a | |
48 | * given inode and each mark is hooked via the i_list. (and sorta the | |
49 | * free_i_list) | |
50 | * | |
51 | * | |
52 | * LIFETIME: | |
53 | * Inode marks survive between when they are added to an inode and when their | |
54 | * refcnt==0. | |
55 | * | |
56 | * The inode mark can be cleared for a number of different reasons including: | |
57 | * - The inode is unlinked for the last time. (fsnotify_inode_remove) | |
58 | * - The inode is being evicted from cache. (fsnotify_inode_delete) | |
59 | * - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes) | |
60 | * - Something explicitly requests that it be removed. (fsnotify_destroy_mark) | |
61 | * - The fsnotify_group associated with the mark is going away and all such marks | |
62 | * need to be cleaned up. (fsnotify_clear_marks_by_group) | |
63 | * | |
64 | * Worst case we are given an inode and need to clean up all the marks on that | |
65 | * inode. We take i_lock and walk the i_fsnotify_marks safely. For each | |
66 | * mark on the list we take a reference (so the mark can't disappear under us). | |
67 | * We remove that mark form the inode's list of marks and we add this mark to a | |
9756b918 LS |
68 | * private list anchored on the stack using i_free_list; we walk i_free_list |
69 | * and before we destroy the mark we make sure that we dont race with a | |
70 | * concurrent destroy_group by getting a ref to the marks group and taking the | |
71 | * groups mutex. | |
72 | ||
5444e298 EP |
73 | * Very similarly for freeing by group, except we use free_g_list. |
74 | * | |
75 | * This has the very interesting property of being able to run concurrently with | |
76 | * any (or all) other directions. | |
77 | */ | |
78 | ||
79 | #include <linux/fs.h> | |
80 | #include <linux/init.h> | |
81 | #include <linux/kernel.h> | |
75c1be48 | 82 | #include <linux/kthread.h> |
5444e298 EP |
83 | #include <linux/module.h> |
84 | #include <linux/mutex.h> | |
85 | #include <linux/slab.h> | |
86 | #include <linux/spinlock.h> | |
75c1be48 | 87 | #include <linux/srcu.h> |
5444e298 | 88 | |
60063497 | 89 | #include <linux/atomic.h> |
5444e298 EP |
90 | |
91 | #include <linux/fsnotify_backend.h> | |
92 | #include "fsnotify.h" | |
93 | ||
75c1be48 EP |
94 | struct srcu_struct fsnotify_mark_srcu; |
95 | static DEFINE_SPINLOCK(destroy_lock); | |
96 | static LIST_HEAD(destroy_list); | |
97 | static DECLARE_WAIT_QUEUE_HEAD(destroy_waitq); | |
98 | ||
5444e298 EP |
99 | void fsnotify_get_mark(struct fsnotify_mark *mark) |
100 | { | |
101 | atomic_inc(&mark->refcnt); | |
102 | } | |
103 | ||
104 | void fsnotify_put_mark(struct fsnotify_mark *mark) | |
105 | { | |
23e964c2 LS |
106 | if (atomic_dec_and_test(&mark->refcnt)) { |
107 | if (mark->group) | |
108 | fsnotify_put_group(mark->group); | |
5444e298 | 109 | mark->free_mark(mark); |
23e964c2 | 110 | } |
5444e298 EP |
111 | } |
112 | ||
113 | /* | |
114 | * Any time a mark is getting freed we end up here. | |
115 | * The caller had better be holding a reference to this mark so we don't actually | |
116 | * do the final put under the mark->lock | |
117 | */ | |
d5a335b8 LS |
118 | void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark, |
119 | struct fsnotify_group *group) | |
5444e298 | 120 | { |
0d48b7f0 | 121 | struct inode *inode = NULL; |
5444e298 | 122 | |
d5a335b8 LS |
123 | BUG_ON(!mutex_is_locked(&group->mark_mutex)); |
124 | ||
104d06f0 | 125 | spin_lock(&mark->lock); |
5444e298 | 126 | |
700307a2 EP |
127 | /* something else already called this function on this mark */ |
128 | if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) { | |
5444e298 | 129 | spin_unlock(&mark->lock); |
e2a29943 | 130 | return; |
5444e298 EP |
131 | } |
132 | ||
700307a2 EP |
133 | mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE; |
134 | ||
0d48b7f0 | 135 | if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) { |
0d48b7f0 | 136 | inode = mark->i.inode; |
b31d397e | 137 | fsnotify_destroy_inode_mark(mark); |
0d48b7f0 EP |
138 | } else if (mark->flags & FSNOTIFY_MARK_FLAG_VFSMOUNT) |
139 | fsnotify_destroy_vfsmount_mark(mark); | |
5444e298 EP |
140 | else |
141 | BUG(); | |
142 | ||
143 | list_del_init(&mark->g_list); | |
5444e298 | 144 | |
5444e298 | 145 | spin_unlock(&mark->lock); |
d5a335b8 | 146 | |
6960b0d9 LS |
147 | if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) |
148 | iput(inode); | |
d5a335b8 | 149 | /* release lock temporarily */ |
986ab098 | 150 | mutex_unlock(&group->mark_mutex); |
5444e298 | 151 | |
75c1be48 EP |
152 | spin_lock(&destroy_lock); |
153 | list_add(&mark->destroy_list, &destroy_list); | |
154 | spin_unlock(&destroy_lock); | |
155 | wake_up(&destroy_waitq); | |
6960b0d9 LS |
156 | /* |
157 | * We don't necessarily have a ref on mark from caller so the above destroy | |
158 | * may have actually freed it, unless this group provides a 'freeing_mark' | |
159 | * function which must be holding a reference. | |
160 | */ | |
75c1be48 | 161 | |
5444e298 EP |
162 | /* |
163 | * Some groups like to know that marks are being freed. This is a | |
164 | * callback to the group function to let it know that this mark | |
165 | * is being freed. | |
166 | */ | |
167 | if (group->ops->freeing_mark) | |
168 | group->ops->freeing_mark(mark, group); | |
169 | ||
170 | /* | |
171 | * __fsnotify_update_child_dentry_flags(inode); | |
172 | * | |
173 | * I really want to call that, but we can't, we have no idea if the inode | |
174 | * still exists the second we drop the mark->lock. | |
175 | * | |
176 | * The next time an event arrive to this inode from one of it's children | |
177 | * __fsnotify_parent will see that the inode doesn't care about it's | |
178 | * children and will update all of these flags then. So really this | |
179 | * is just a lazy update (and could be a perf win...) | |
180 | */ | |
181 | ||
23e964c2 | 182 | atomic_dec(&group->num_marks); |
d5a335b8 | 183 | |
6960b0d9 | 184 | mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); |
d5a335b8 LS |
185 | } |
186 | ||
187 | void fsnotify_destroy_mark(struct fsnotify_mark *mark, | |
188 | struct fsnotify_group *group) | |
189 | { | |
6960b0d9 | 190 | mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); |
d5a335b8 LS |
191 | fsnotify_destroy_mark_locked(mark, group); |
192 | mutex_unlock(&group->mark_mutex); | |
5444e298 EP |
193 | } |
194 | ||
90b1e7a5 EP |
195 | void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask) |
196 | { | |
197 | assert_spin_locked(&mark->lock); | |
198 | ||
199 | mark->mask = mask; | |
200 | ||
201 | if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) | |
202 | fsnotify_set_inode_mark_mask_locked(mark, mask); | |
203 | } | |
204 | ||
33af5e32 EP |
205 | void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask) |
206 | { | |
207 | assert_spin_locked(&mark->lock); | |
208 | ||
209 | mark->ignored_mask = mask; | |
210 | } | |
90b1e7a5 | 211 | |
8edc6e16 JK |
212 | /* |
213 | * Sorting function for lists of fsnotify marks. | |
214 | * | |
215 | * Fanotify supports different notification classes (reflected as priority of | |
216 | * notification group). Events shall be passed to notification groups in | |
217 | * decreasing priority order. To achieve this marks in notification lists for | |
218 | * inodes and vfsmounts are sorted so that priorities of corresponding groups | |
219 | * are descending. | |
220 | * | |
221 | * Furthermore correct handling of the ignore mask requires processing inode | |
222 | * and vfsmount marks of each group together. Using the group address as | |
223 | * further sort criterion provides a unique sorting order and thus we can | |
224 | * merge inode and vfsmount lists of marks in linear time and find groups | |
225 | * present in both lists. | |
226 | * | |
227 | * A return value of 1 signifies that b has priority over a. | |
228 | * A return value of 0 signifies that the two marks have to be handled together. | |
229 | * A return value of -1 signifies that a has priority over b. | |
230 | */ | |
231 | int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b) | |
232 | { | |
233 | if (a == b) | |
234 | return 0; | |
235 | if (!a) | |
236 | return 1; | |
237 | if (!b) | |
238 | return -1; | |
239 | if (a->priority < b->priority) | |
240 | return 1; | |
241 | if (a->priority > b->priority) | |
242 | return -1; | |
243 | if (a < b) | |
244 | return 1; | |
245 | return -1; | |
246 | } | |
247 | ||
5444e298 EP |
248 | /* |
249 | * Attach an initialized mark to a given group and fs object. | |
250 | * These marks may be used for the fsnotify backend to determine which | |
251 | * event types should be delivered to which group. | |
252 | */ | |
d5a335b8 LS |
253 | int fsnotify_add_mark_locked(struct fsnotify_mark *mark, |
254 | struct fsnotify_group *group, struct inode *inode, | |
255 | struct vfsmount *mnt, int allow_dups) | |
5444e298 EP |
256 | { |
257 | int ret = 0; | |
258 | ||
5444e298 EP |
259 | BUG_ON(inode && mnt); |
260 | BUG_ON(!inode && !mnt); | |
d5a335b8 | 261 | BUG_ON(!mutex_is_locked(&group->mark_mutex)); |
5444e298 | 262 | |
5444e298 EP |
263 | /* |
264 | * LOCKING ORDER!!!! | |
986ab098 | 265 | * group->mark_mutex |
104d06f0 | 266 | * mark->lock |
5444e298 EP |
267 | * inode->i_lock |
268 | */ | |
104d06f0 | 269 | spin_lock(&mark->lock); |
700307a2 EP |
270 | mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE; |
271 | ||
23e964c2 | 272 | fsnotify_get_group(group); |
5444e298 EP |
273 | mark->group = group; |
274 | list_add(&mark->g_list, &group->marks_list); | |
275 | atomic_inc(&group->num_marks); | |
276 | fsnotify_get_mark(mark); /* for i_list and g_list */ | |
277 | ||
278 | if (inode) { | |
279 | ret = fsnotify_add_inode_mark(mark, group, inode, allow_dups); | |
280 | if (ret) | |
281 | goto err; | |
0d48b7f0 EP |
282 | } else if (mnt) { |
283 | ret = fsnotify_add_vfsmount_mark(mark, group, mnt, allow_dups); | |
284 | if (ret) | |
285 | goto err; | |
5444e298 EP |
286 | } else { |
287 | BUG(); | |
288 | } | |
289 | ||
90b1e7a5 EP |
290 | /* this will pin the object if appropriate */ |
291 | fsnotify_set_mark_mask_locked(mark, mark->mask); | |
5444e298 EP |
292 | spin_unlock(&mark->lock); |
293 | ||
294 | if (inode) | |
295 | __fsnotify_update_child_dentry_flags(inode); | |
296 | ||
297 | return ret; | |
298 | err: | |
700307a2 | 299 | mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE; |
5444e298 | 300 | list_del_init(&mark->g_list); |
23e964c2 | 301 | fsnotify_put_group(group); |
75c1be48 | 302 | mark->group = NULL; |
5444e298 | 303 | atomic_dec(&group->num_marks); |
5444e298 | 304 | |
5444e298 EP |
305 | spin_unlock(&mark->lock); |
306 | ||
75c1be48 EP |
307 | spin_lock(&destroy_lock); |
308 | list_add(&mark->destroy_list, &destroy_list); | |
309 | spin_unlock(&destroy_lock); | |
310 | wake_up(&destroy_waitq); | |
311 | ||
5444e298 EP |
312 | return ret; |
313 | } | |
314 | ||
d5a335b8 LS |
315 | int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group, |
316 | struct inode *inode, struct vfsmount *mnt, int allow_dups) | |
317 | { | |
318 | int ret; | |
319 | mutex_lock(&group->mark_mutex); | |
320 | ret = fsnotify_add_mark_locked(mark, group, inode, mnt, allow_dups); | |
321 | mutex_unlock(&group->mark_mutex); | |
322 | return ret; | |
323 | } | |
324 | ||
5444e298 | 325 | /* |
4d92604c | 326 | * clear any marks in a group in which mark->flags & flags is true |
5444e298 | 327 | */ |
4d92604c EP |
328 | void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, |
329 | unsigned int flags) | |
5444e298 EP |
330 | { |
331 | struct fsnotify_mark *lmark, *mark; | |
5444e298 | 332 | |
6960b0d9 | 333 | mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); |
5444e298 | 334 | list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { |
4d92604c | 335 | if (mark->flags & flags) { |
4d92604c | 336 | fsnotify_get_mark(mark); |
64c20d2a LS |
337 | fsnotify_destroy_mark_locked(mark, group); |
338 | fsnotify_put_mark(mark); | |
4d92604c | 339 | } |
5444e298 | 340 | } |
986ab098 | 341 | mutex_unlock(&group->mark_mutex); |
5444e298 EP |
342 | } |
343 | ||
4d92604c EP |
344 | /* |
345 | * Given a group, destroy all of the marks associated with that group. | |
346 | */ | |
347 | void fsnotify_clear_marks_by_group(struct fsnotify_group *group) | |
348 | { | |
349 | fsnotify_clear_marks_by_group_flags(group, (unsigned int)-1); | |
350 | } | |
351 | ||
5444e298 EP |
352 | void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old) |
353 | { | |
354 | assert_spin_locked(&old->lock); | |
355 | new->i.inode = old->i.inode; | |
356 | new->m.mnt = old->m.mnt; | |
23e964c2 LS |
357 | if (old->group) |
358 | fsnotify_get_group(old->group); | |
5444e298 EP |
359 | new->group = old->group; |
360 | new->mask = old->mask; | |
361 | new->free_mark = old->free_mark; | |
362 | } | |
363 | ||
364 | /* | |
365 | * Nothing fancy, just initialize lists and locks and counters. | |
366 | */ | |
367 | void fsnotify_init_mark(struct fsnotify_mark *mark, | |
368 | void (*free_mark)(struct fsnotify_mark *mark)) | |
369 | { | |
ba643f04 | 370 | memset(mark, 0, sizeof(*mark)); |
5444e298 EP |
371 | spin_lock_init(&mark->lock); |
372 | atomic_set(&mark->refcnt, 1); | |
5444e298 EP |
373 | mark->free_mark = free_mark; |
374 | } | |
75c1be48 EP |
375 | |
376 | static int fsnotify_mark_destroy(void *ignored) | |
377 | { | |
378 | struct fsnotify_mark *mark, *next; | |
efa8f7e5 | 379 | struct list_head private_destroy_list; |
75c1be48 EP |
380 | |
381 | for (;;) { | |
382 | spin_lock(&destroy_lock); | |
8778abb9 AG |
383 | /* exchange the list head */ |
384 | list_replace_init(&destroy_list, &private_destroy_list); | |
75c1be48 EP |
385 | spin_unlock(&destroy_lock); |
386 | ||
387 | synchronize_srcu(&fsnotify_mark_srcu); | |
388 | ||
389 | list_for_each_entry_safe(mark, next, &private_destroy_list, destroy_list) { | |
390 | list_del_init(&mark->destroy_list); | |
391 | fsnotify_put_mark(mark); | |
392 | } | |
393 | ||
394 | wait_event_interruptible(destroy_waitq, !list_empty(&destroy_list)); | |
395 | } | |
396 | ||
397 | return 0; | |
398 | } | |
399 | ||
400 | static int __init fsnotify_mark_init(void) | |
401 | { | |
402 | struct task_struct *thread; | |
403 | ||
404 | thread = kthread_run(fsnotify_mark_destroy, NULL, | |
405 | "fsnotify_mark"); | |
406 | if (IS_ERR(thread)) | |
407 | panic("unable to start fsnotify mark destruction thread."); | |
408 | ||
409 | return 0; | |
410 | } | |
411 | device_initcall(fsnotify_mark_init); |