]> Git Repo - linux.git/blame - fs/notify/mark.c
inotify: Do not drop mark reference under idr_lock
[linux.git] / fs / notify / mark.c
CommitLineData
5444e298
EP
1/*
2 * Copyright (C) 2008 Red Hat, Inc., Eric Paris <[email protected]>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/*
20 * fsnotify inode mark locking/lifetime/and refcnting
21 *
22 * REFCNT:
9756b918
LS
23 * The group->recnt and mark->refcnt tell how many "things" in the kernel
24 * currently are referencing the objects. Both kind of objects typically will
25 * live inside the kernel with a refcnt of 2, one for its creation and one for
26 * the reference a group and a mark hold to each other.
27 * If you are holding the appropriate locks, you can take a reference and the
28 * object itself is guaranteed to survive until the reference is dropped.
5444e298
EP
29 *
30 * LOCKING:
9756b918
LS
31 * There are 3 locks involved with fsnotify inode marks and they MUST be taken
32 * in order as follows:
5444e298 33 *
9756b918 34 * group->mark_mutex
5444e298 35 * mark->lock
04662cab 36 * mark->connector->lock
5444e298 37 *
9756b918
LS
38 * group->mark_mutex protects the marks_list anchored inside a given group and
39 * each mark is hooked via the g_list. It also protects the groups private
40 * data (i.e group limits).
41
42 * mark->lock protects the marks attributes like its masks and flags.
43 * Furthermore it protects the access to a reference of the group that the mark
44 * is assigned to as well as the access to a reference of the inode/vfsmount
45 * that is being watched by the mark.
5444e298 46 *
04662cab
JK
47 * mark->connector->lock protects the list of marks anchored inside an
48 * inode / vfsmount and each mark is hooked via the i_list.
5444e298 49 *
04662cab
JK
50 * A list of notification marks relating to inode / mnt is contained in
51 * fsnotify_mark_connector. That structure is alive as long as there are any
52 * marks in the list and is also protected by fsnotify_mark_srcu.
5444e298
EP
53 *
54 * LIFETIME:
55 * Inode marks survive between when they are added to an inode and when their
c1f33073 56 * refcnt==0. Marks are also protected by fsnotify_mark_srcu.
5444e298
EP
57 *
58 * The inode mark can be cleared for a number of different reasons including:
59 * - The inode is unlinked for the last time. (fsnotify_inode_remove)
60 * - The inode is being evicted from cache. (fsnotify_inode_delete)
61 * - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes)
62 * - Something explicitly requests that it be removed. (fsnotify_destroy_mark)
63 * - The fsnotify_group associated with the mark is going away and all such marks
64 * need to be cleaned up. (fsnotify_clear_marks_by_group)
65 *
5444e298
EP
66 * This has the very interesting property of being able to run concurrently with
67 * any (or all) other directions.
68 */
69
70#include <linux/fs.h>
71#include <linux/init.h>
72#include <linux/kernel.h>
75c1be48 73#include <linux/kthread.h>
5444e298
EP
74#include <linux/module.h>
75#include <linux/mutex.h>
76#include <linux/slab.h>
77#include <linux/spinlock.h>
75c1be48 78#include <linux/srcu.h>
5444e298 79
60063497 80#include <linux/atomic.h>
5444e298
EP
81
82#include <linux/fsnotify_backend.h>
83#include "fsnotify.h"
84
0918f1c3
JL
85#define FSNOTIFY_REAPER_DELAY (1) /* 1 jiffy */
86
75c1be48 87struct srcu_struct fsnotify_mark_srcu;
9dd813c1
JK
88struct kmem_cache *fsnotify_mark_connector_cachep;
89
13d34ac6
JL
90static DEFINE_SPINLOCK(destroy_lock);
91static LIST_HEAD(destroy_list);
08991e83 92static struct fsnotify_mark_connector *connector_destroy_list;
0918f1c3 93
35e48176
JK
94static void fsnotify_mark_destroy_workfn(struct work_struct *work);
95static DECLARE_DELAYED_WORK(reaper_work, fsnotify_mark_destroy_workfn);
75c1be48 96
08991e83
JK
97static void fsnotify_connector_destroy_workfn(struct work_struct *work);
98static DECLARE_WORK(connector_reaper_work, fsnotify_connector_destroy_workfn);
99
5444e298
EP
100void fsnotify_get_mark(struct fsnotify_mark *mark)
101{
102 atomic_inc(&mark->refcnt);
103}
104
105void fsnotify_put_mark(struct fsnotify_mark *mark)
106{
23e964c2
LS
107 if (atomic_dec_and_test(&mark->refcnt)) {
108 if (mark->group)
109 fsnotify_put_group(mark->group);
5444e298 110 mark->free_mark(mark);
23e964c2 111 }
5444e298
EP
112}
113
a242677b 114static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
0809ab69
JK
115{
116 u32 new_mask = 0;
117 struct fsnotify_mark *mark;
118
04662cab 119 assert_spin_locked(&conn->lock);
9dd813c1 120 hlist_for_each_entry(mark, &conn->list, obj_list)
0809ab69 121 new_mask |= mark->mask;
04662cab 122
a242677b
JK
123 if (conn->flags & FSNOTIFY_OBJ_TYPE_INODE)
124 conn->inode->i_fsnotify_mask = new_mask;
125 else if (conn->flags & FSNOTIFY_OBJ_TYPE_VFSMOUNT)
126 real_mount(conn->mnt)->mnt_fsnotify_mask = new_mask;
127}
128
129/*
130 * Calculate mask of events for a list of marks. The caller must make sure
131 * connector cannot disappear under us (usually by holding a mark->lock or
132 * mark->group->mark_mutex for a mark on this list).
133 */
134void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
135{
136 if (!conn)
137 return;
138
04662cab 139 spin_lock(&conn->lock);
a242677b 140 __fsnotify_recalc_mask(conn);
04662cab
JK
141 spin_unlock(&conn->lock);
142 if (conn->flags & FSNOTIFY_OBJ_TYPE_INODE)
a242677b 143 __fsnotify_update_child_dentry_flags(conn->inode);
0809ab69
JK
144}
145
08991e83
JK
146/* Free all connectors queued for freeing once SRCU period ends */
147static void fsnotify_connector_destroy_workfn(struct work_struct *work)
148{
149 struct fsnotify_mark_connector *conn, *free;
150
151 spin_lock(&destroy_lock);
152 conn = connector_destroy_list;
153 connector_destroy_list = NULL;
154 spin_unlock(&destroy_lock);
155
156 synchronize_srcu(&fsnotify_mark_srcu);
157 while (conn) {
158 free = conn;
159 conn = conn->destroy_next;
160 kmem_cache_free(fsnotify_mark_connector_cachep, free);
161 }
162}
163
164
165static struct inode *fsnotify_detach_connector_from_object(
166 struct fsnotify_mark_connector *conn)
167{
168 struct inode *inode = NULL;
169
170 if (conn->flags & FSNOTIFY_OBJ_TYPE_INODE) {
171 inode = conn->inode;
172 rcu_assign_pointer(inode->i_fsnotify_marks, NULL);
173 inode->i_fsnotify_mask = 0;
174 conn->inode = NULL;
175 conn->flags &= ~FSNOTIFY_OBJ_TYPE_INODE;
176 } else if (conn->flags & FSNOTIFY_OBJ_TYPE_VFSMOUNT) {
177 rcu_assign_pointer(real_mount(conn->mnt)->mnt_fsnotify_marks,
178 NULL);
179 real_mount(conn->mnt)->mnt_fsnotify_mask = 0;
180 conn->mnt = NULL;
181 conn->flags &= ~FSNOTIFY_OBJ_TYPE_VFSMOUNT;
182 }
183
184 return inode;
185}
186
8212a609
JK
187static struct inode *fsnotify_detach_from_object(struct fsnotify_mark *mark)
188{
189 struct fsnotify_mark_connector *conn;
190 struct inode *inode = NULL;
08991e83 191 bool free_conn = false;
8212a609
JK
192
193 conn = mark->connector;
04662cab 194 spin_lock(&conn->lock);
8212a609
JK
195 hlist_del_init_rcu(&mark->obj_list);
196 if (hlist_empty(&conn->list)) {
08991e83
JK
197 inode = fsnotify_detach_connector_from_object(conn);
198 free_conn = true;
199 } else {
200 __fsnotify_recalc_mask(conn);
8212a609
JK
201 }
202 mark->connector = NULL;
04662cab 203 spin_unlock(&conn->lock);
8212a609 204
08991e83
JK
205 if (free_conn) {
206 spin_lock(&destroy_lock);
207 conn->destroy_next = connector_destroy_list;
208 connector_destroy_list = conn;
209 spin_unlock(&destroy_lock);
210 queue_work(system_unbound_wq, &connector_reaper_work);
211 }
212
8212a609
JK
213 return inode;
214}
215
5444e298 216/*
4712e722
JK
217 * Remove mark from inode / vfsmount list, group list, drop inode reference
218 * if we got one.
219 *
220 * Must be called with group->mark_mutex held.
5444e298 221 */
4712e722 222void fsnotify_detach_mark(struct fsnotify_mark *mark)
5444e298 223{
0d48b7f0 224 struct inode *inode = NULL;
4712e722 225 struct fsnotify_group *group = mark->group;
5444e298 226
d5a335b8
LS
227 BUG_ON(!mutex_is_locked(&group->mark_mutex));
228
104d06f0 229 spin_lock(&mark->lock);
5444e298 230
700307a2 231 /* something else already called this function on this mark */
4712e722 232 if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
5444e298 233 spin_unlock(&mark->lock);
e2a29943 234 return;
5444e298
EP
235 }
236
4712e722 237 mark->flags &= ~FSNOTIFY_MARK_FLAG_ATTACHED;
700307a2 238
8212a609
JK
239 inode = fsnotify_detach_from_object(mark);
240
4712e722
JK
241 /*
242 * Note that we didn't update flags telling whether inode cares about
243 * what's happening with children. We update these flags from
244 * __fsnotify_parent() lazily when next event happens on one of our
245 * children.
246 */
5444e298
EP
247
248 list_del_init(&mark->g_list);
d725e66c 249
5444e298 250 spin_unlock(&mark->lock);
d5a335b8 251
e911d8af 252 if (inode)
6960b0d9 253 iput(inode);
4712e722
JK
254
255 atomic_dec(&group->num_marks);
256}
257
258/*
35e48176
JK
259 * Prepare mark for freeing and add it to the list of marks prepared for
260 * freeing. The actual freeing must happen after SRCU period ends and the
261 * caller is responsible for this.
262 *
263 * The function returns true if the mark was added to the list of marks for
264 * freeing. The function returns false if someone else has already called
265 * __fsnotify_free_mark() for the mark.
4712e722 266 */
35e48176 267static bool __fsnotify_free_mark(struct fsnotify_mark *mark)
4712e722
JK
268{
269 struct fsnotify_group *group = mark->group;
270
271 spin_lock(&mark->lock);
272 /* something else already called this function on this mark */
273 if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
274 spin_unlock(&mark->lock);
35e48176 275 return false;
4712e722
JK
276 }
277 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
278 spin_unlock(&mark->lock);
5444e298 279
d725e66c
LT
280 /*
281 * Some groups like to know that marks are being freed. This is a
282 * callback to the group function to let it know that this mark
283 * is being freed.
284 */
285 if (group->ops->freeing_mark)
286 group->ops->freeing_mark(mark, group);
35e48176
JK
287
288 spin_lock(&destroy_lock);
289 list_add(&mark->g_list, &destroy_list);
290 spin_unlock(&destroy_lock);
291
292 return true;
293}
294
295/*
296 * Free fsnotify mark. The freeing is actually happening from a workqueue which
297 * first waits for srcu period end. Caller must have a reference to the mark
298 * or be protected by fsnotify_mark_srcu.
299 */
300void fsnotify_free_mark(struct fsnotify_mark *mark)
301{
302 if (__fsnotify_free_mark(mark)) {
303 queue_delayed_work(system_unbound_wq, &reaper_work,
304 FSNOTIFY_REAPER_DELAY);
305 }
d5a335b8
LS
306}
307
308void fsnotify_destroy_mark(struct fsnotify_mark *mark,
309 struct fsnotify_group *group)
310{
6960b0d9 311 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
4712e722 312 fsnotify_detach_mark(mark);
d5a335b8 313 mutex_unlock(&group->mark_mutex);
4712e722 314 fsnotify_free_mark(mark);
5444e298
EP
315}
316
90b1e7a5
EP
317void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask)
318{
319 assert_spin_locked(&mark->lock);
320
321 mark->mask = mask;
90b1e7a5
EP
322}
323
33af5e32
EP
324void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask)
325{
326 assert_spin_locked(&mark->lock);
327
328 mark->ignored_mask = mask;
329}
90b1e7a5 330
8edc6e16
JK
331/*
332 * Sorting function for lists of fsnotify marks.
333 *
334 * Fanotify supports different notification classes (reflected as priority of
335 * notification group). Events shall be passed to notification groups in
336 * decreasing priority order. To achieve this marks in notification lists for
337 * inodes and vfsmounts are sorted so that priorities of corresponding groups
338 * are descending.
339 *
340 * Furthermore correct handling of the ignore mask requires processing inode
341 * and vfsmount marks of each group together. Using the group address as
342 * further sort criterion provides a unique sorting order and thus we can
343 * merge inode and vfsmount lists of marks in linear time and find groups
344 * present in both lists.
345 *
346 * A return value of 1 signifies that b has priority over a.
347 * A return value of 0 signifies that the two marks have to be handled together.
348 * A return value of -1 signifies that a has priority over b.
349 */
350int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b)
351{
352 if (a == b)
353 return 0;
354 if (!a)
355 return 1;
356 if (!b)
357 return -1;
358 if (a->priority < b->priority)
359 return 1;
360 if (a->priority > b->priority)
361 return -1;
362 if (a < b)
363 return 1;
364 return -1;
365}
366
9dd813c1 367static int fsnotify_attach_connector_to_object(
08991e83
JK
368 struct fsnotify_mark_connector __rcu **connp,
369 struct inode *inode,
370 struct vfsmount *mnt)
9dd813c1
JK
371{
372 struct fsnotify_mark_connector *conn;
373
755b5bc6 374 conn = kmem_cache_alloc(fsnotify_mark_connector_cachep, GFP_KERNEL);
9dd813c1
JK
375 if (!conn)
376 return -ENOMEM;
04662cab 377 spin_lock_init(&conn->lock);
9dd813c1 378 INIT_HLIST_HEAD(&conn->list);
86ffe245
JK
379 if (inode) {
380 conn->flags = FSNOTIFY_OBJ_TYPE_INODE;
08991e83 381 conn->inode = igrab(inode);
86ffe245
JK
382 } else {
383 conn->flags = FSNOTIFY_OBJ_TYPE_VFSMOUNT;
384 conn->mnt = mnt;
385 }
9dd813c1 386 /*
04662cab
JK
387 * cmpxchg() provides the barrier so that readers of *connp can see
388 * only initialized structure
9dd813c1 389 */
04662cab
JK
390 if (cmpxchg(connp, NULL, conn)) {
391 /* Someone else created list structure for us */
08991e83
JK
392 if (inode)
393 iput(inode);
755b5bc6 394 kmem_cache_free(fsnotify_mark_connector_cachep, conn);
04662cab 395 }
9dd813c1
JK
396
397 return 0;
398}
399
08991e83
JK
400/*
401 * Get mark connector, make sure it is alive and return with its lock held.
402 * This is for users that get connector pointer from inode or mount. Users that
403 * hold reference to a mark on the list may directly lock connector->lock as
404 * they are sure list cannot go away under them.
405 */
406static struct fsnotify_mark_connector *fsnotify_grab_connector(
407 struct fsnotify_mark_connector __rcu **connp)
408{
409 struct fsnotify_mark_connector *conn;
410 int idx;
411
412 idx = srcu_read_lock(&fsnotify_mark_srcu);
413 conn = srcu_dereference(*connp, &fsnotify_mark_srcu);
414 if (!conn)
415 goto out;
416 spin_lock(&conn->lock);
417 if (!(conn->flags & (FSNOTIFY_OBJ_TYPE_INODE |
418 FSNOTIFY_OBJ_TYPE_VFSMOUNT))) {
419 spin_unlock(&conn->lock);
420 srcu_read_unlock(&fsnotify_mark_srcu, idx);
421 return NULL;
422 }
423out:
424 srcu_read_unlock(&fsnotify_mark_srcu, idx);
425 return conn;
426}
427
9dd813c1
JK
428/*
429 * Add mark into proper place in given list of marks. These marks may be used
430 * for the fsnotify backend to determine which event types should be delivered
431 * to which group and for which inodes. These marks are ordered according to
432 * priority, highest number first, and then by the group's location in memory.
433 */
755b5bc6
JK
434static int fsnotify_add_mark_list(struct fsnotify_mark *mark,
435 struct inode *inode, struct vfsmount *mnt,
436 int allow_dups)
0809ab69
JK
437{
438 struct fsnotify_mark *lmark, *last = NULL;
9dd813c1 439 struct fsnotify_mark_connector *conn;
08991e83 440 struct fsnotify_mark_connector __rcu **connp;
0809ab69 441 int cmp;
755b5bc6
JK
442 int err = 0;
443
444 if (WARN_ON(!inode && !mnt))
445 return -EINVAL;
04662cab 446 if (inode)
755b5bc6 447 connp = &inode->i_fsnotify_marks;
04662cab 448 else
755b5bc6 449 connp = &real_mount(mnt)->mnt_fsnotify_marks;
08991e83
JK
450restart:
451 spin_lock(&mark->lock);
452 conn = fsnotify_grab_connector(connp);
453 if (!conn) {
454 spin_unlock(&mark->lock);
04662cab 455 err = fsnotify_attach_connector_to_object(connp, inode, mnt);
9dd813c1
JK
456 if (err)
457 return err;
08991e83 458 goto restart;
9dd813c1 459 }
0809ab69
JK
460
461 /* is mark the first mark? */
9dd813c1
JK
462 if (hlist_empty(&conn->list)) {
463 hlist_add_head_rcu(&mark->obj_list, &conn->list);
86ffe245 464 goto added;
0809ab69
JK
465 }
466
467 /* should mark be in the middle of the current list? */
9dd813c1 468 hlist_for_each_entry(lmark, &conn->list, obj_list) {
0809ab69
JK
469 last = lmark;
470
755b5bc6
JK
471 if ((lmark->group == mark->group) && !allow_dups) {
472 err = -EEXIST;
473 goto out_err;
474 }
0809ab69
JK
475
476 cmp = fsnotify_compare_groups(lmark->group, mark->group);
477 if (cmp >= 0) {
478 hlist_add_before_rcu(&mark->obj_list, &lmark->obj_list);
86ffe245 479 goto added;
0809ab69
JK
480 }
481 }
482
483 BUG_ON(last == NULL);
484 /* mark should be the last entry. last is the current last entry */
485 hlist_add_behind_rcu(&mark->obj_list, &last->obj_list);
86ffe245
JK
486added:
487 mark->connector = conn;
755b5bc6 488out_err:
04662cab 489 spin_unlock(&conn->lock);
755b5bc6
JK
490 spin_unlock(&mark->lock);
491 return err;
0809ab69
JK
492}
493
5444e298
EP
494/*
495 * Attach an initialized mark to a given group and fs object.
496 * These marks may be used for the fsnotify backend to determine which
497 * event types should be delivered to which group.
498 */
d5a335b8
LS
499int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
500 struct fsnotify_group *group, struct inode *inode,
501 struct vfsmount *mnt, int allow_dups)
5444e298
EP
502{
503 int ret = 0;
504
5444e298
EP
505 BUG_ON(inode && mnt);
506 BUG_ON(!inode && !mnt);
d5a335b8 507 BUG_ON(!mutex_is_locked(&group->mark_mutex));
5444e298 508
5444e298
EP
509 /*
510 * LOCKING ORDER!!!!
986ab098 511 * group->mark_mutex
104d06f0 512 * mark->lock
04662cab 513 * mark->connector->lock
5444e298 514 */
104d06f0 515 spin_lock(&mark->lock);
4712e722 516 mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE | FSNOTIFY_MARK_FLAG_ATTACHED;
700307a2 517
23e964c2 518 fsnotify_get_group(group);
5444e298
EP
519 mark->group = group;
520 list_add(&mark->g_list, &group->marks_list);
521 atomic_inc(&group->num_marks);
522 fsnotify_get_mark(mark); /* for i_list and g_list */
5444e298
EP
523 spin_unlock(&mark->lock);
524
755b5bc6
JK
525 ret = fsnotify_add_mark_list(mark, inode, mnt, allow_dups);
526 if (ret)
527 goto err;
528
a242677b
JK
529 if (mark->mask)
530 fsnotify_recalc_mask(mark->connector);
5444e298
EP
531
532 return ret;
533err:
700307a2 534 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
5444e298 535 list_del_init(&mark->g_list);
23e964c2 536 fsnotify_put_group(group);
75c1be48 537 mark->group = NULL;
5444e298 538 atomic_dec(&group->num_marks);
5444e298 539
5444e298
EP
540 spin_unlock(&mark->lock);
541
13d34ac6
JL
542 spin_lock(&destroy_lock);
543 list_add(&mark->g_list, &destroy_list);
544 spin_unlock(&destroy_lock);
0918f1c3
JL
545 queue_delayed_work(system_unbound_wq, &reaper_work,
546 FSNOTIFY_REAPER_DELAY);
13d34ac6 547
5444e298
EP
548 return ret;
549}
550
d5a335b8
LS
551int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
552 struct inode *inode, struct vfsmount *mnt, int allow_dups)
553{
554 int ret;
555 mutex_lock(&group->mark_mutex);
556 ret = fsnotify_add_mark_locked(mark, group, inode, mnt, allow_dups);
557 mutex_unlock(&group->mark_mutex);
558 return ret;
559}
560
0809ab69
JK
561/*
562 * Given a list of marks, find the mark associated with given group. If found
563 * take a reference to that mark and return it, else return NULL.
564 */
08991e83
JK
565struct fsnotify_mark *fsnotify_find_mark(
566 struct fsnotify_mark_connector __rcu **connp,
567 struct fsnotify_group *group)
0809ab69 568{
08991e83 569 struct fsnotify_mark_connector *conn;
0809ab69
JK
570 struct fsnotify_mark *mark;
571
08991e83 572 conn = fsnotify_grab_connector(connp);
9dd813c1
JK
573 if (!conn)
574 return NULL;
575
576 hlist_for_each_entry(mark, &conn->list, obj_list) {
0809ab69
JK
577 if (mark->group == group) {
578 fsnotify_get_mark(mark);
04662cab 579 spin_unlock(&conn->lock);
0809ab69
JK
580 return mark;
581 }
582 }
04662cab 583 spin_unlock(&conn->lock);
0809ab69
JK
584 return NULL;
585}
586
5444e298 587/*
d725e66c 588 * clear any marks in a group in which mark->flags & flags is true
5444e298 589 */
4d92604c
EP
590void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
591 unsigned int flags)
5444e298
EP
592{
593 struct fsnotify_mark *lmark, *mark;
8f2f3eb5 594 LIST_HEAD(to_free);
5444e298 595
8f2f3eb5
JK
596 /*
597 * We have to be really careful here. Anytime we drop mark_mutex, e.g.
598 * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
599 * to_free list so we have to use mark_mutex even when accessing that
600 * list. And freeing mark requires us to drop mark_mutex. So we can
601 * reliably free only the first mark in the list. That's why we first
602 * move marks to free to to_free list in one go and then free marks in
603 * to_free list one by one.
604 */
6960b0d9 605 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
5444e298 606 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
86ffe245 607 if (mark->connector->flags & flags)
8f2f3eb5 608 list_move(&mark->g_list, &to_free);
5444e298 609 }
986ab098 610 mutex_unlock(&group->mark_mutex);
8f2f3eb5
JK
611
612 while (1) {
613 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
614 if (list_empty(&to_free)) {
615 mutex_unlock(&group->mark_mutex);
616 break;
617 }
618 mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
619 fsnotify_get_mark(mark);
4712e722 620 fsnotify_detach_mark(mark);
8f2f3eb5 621 mutex_unlock(&group->mark_mutex);
4712e722 622 fsnotify_free_mark(mark);
8f2f3eb5
JK
623 fsnotify_put_mark(mark);
624 }
5444e298
EP
625}
626
4d92604c 627/*
35e48176
JK
628 * Given a group, prepare for freeing all the marks associated with that group.
629 * The marks are attached to the list of marks prepared for destruction, the
630 * caller is responsible for freeing marks in that list after SRCU period has
631 * ended.
4d92604c 632 */
35e48176 633void fsnotify_detach_group_marks(struct fsnotify_group *group)
4d92604c 634{
35e48176
JK
635 struct fsnotify_mark *mark;
636
637 while (1) {
638 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
639 if (list_empty(&group->marks_list)) {
640 mutex_unlock(&group->mark_mutex);
641 break;
642 }
643 mark = list_first_entry(&group->marks_list,
644 struct fsnotify_mark, g_list);
645 fsnotify_get_mark(mark);
646 fsnotify_detach_mark(mark);
647 mutex_unlock(&group->mark_mutex);
648 __fsnotify_free_mark(mark);
649 fsnotify_put_mark(mark);
650 }
4d92604c
EP
651}
652
08991e83
JK
653/* Destroy all marks attached to inode / vfsmount */
654void fsnotify_destroy_marks(struct fsnotify_mark_connector __rcu **connp)
0810b4f9 655{
08991e83 656 struct fsnotify_mark_connector *conn;
0810b4f9
JK
657 struct fsnotify_mark *mark;
658
08991e83 659 while ((conn = fsnotify_grab_connector(connp))) {
0810b4f9
JK
660 /*
661 * We have to be careful since we can race with e.g.
08991e83
JK
662 * fsnotify_clear_marks_by_group() and once we drop the list
663 * lock, mark can get removed from the obj_list and destroyed.
664 * But we are holding mark reference so mark cannot be freed
665 * and calling fsnotify_destroy_mark() more than once is fine.
0810b4f9 666 */
0810b4f9
JK
667 mark = hlist_entry(conn->list.first, struct fsnotify_mark,
668 obj_list);
0810b4f9 669 fsnotify_get_mark(mark);
04662cab 670 spin_unlock(&conn->lock);
0810b4f9
JK
671 fsnotify_destroy_mark(mark, mark->group);
672 fsnotify_put_mark(mark);
673 }
674}
675
5444e298
EP
676/*
677 * Nothing fancy, just initialize lists and locks and counters.
678 */
679void fsnotify_init_mark(struct fsnotify_mark *mark,
680 void (*free_mark)(struct fsnotify_mark *mark))
681{
ba643f04 682 memset(mark, 0, sizeof(*mark));
5444e298
EP
683 spin_lock_init(&mark->lock);
684 atomic_set(&mark->refcnt, 1);
5444e298
EP
685 mark->free_mark = free_mark;
686}
13d34ac6 687
35e48176
JK
688/*
689 * Destroy all marks in destroy_list, waits for SRCU period to finish before
690 * actually freeing marks.
691 */
692void fsnotify_mark_destroy_list(void)
13d34ac6
JL
693{
694 struct fsnotify_mark *mark, *next;
695 struct list_head private_destroy_list;
696
0918f1c3
JL
697 spin_lock(&destroy_lock);
698 /* exchange the list head */
699 list_replace_init(&destroy_list, &private_destroy_list);
700 spin_unlock(&destroy_lock);
13d34ac6 701
0918f1c3 702 synchronize_srcu(&fsnotify_mark_srcu);
13d34ac6 703
0918f1c3
JL
704 list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
705 list_del_init(&mark->g_list);
706 fsnotify_put_mark(mark);
13d34ac6 707 }
13d34ac6 708}
35e48176
JK
709
710static void fsnotify_mark_destroy_workfn(struct work_struct *work)
711{
712 fsnotify_mark_destroy_list();
713}
This page took 0.471686 seconds and 4 git commands to generate.