]>
Commit | Line | Data |
---|---|---|
20c8ccb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
cddb8a5c AA |
2 | /* |
3 | * linux/mm/mmu_notifier.c | |
4 | * | |
5 | * Copyright (C) 2008 Qumranet, Inc. | |
6 | * Copyright (C) 2008 SGI | |
93e205a7 | 7 | * Christoph Lameter <[email protected]> |
cddb8a5c AA |
8 | */ |
9 | ||
10 | #include <linux/rculist.h> | |
11 | #include <linux/mmu_notifier.h> | |
b95f1b31 | 12 | #include <linux/export.h> |
cddb8a5c AA |
13 | #include <linux/mm.h> |
14 | #include <linux/err.h> | |
99cb252f | 15 | #include <linux/interval_tree.h> |
21a92735 | 16 | #include <linux/srcu.h> |
cddb8a5c AA |
17 | #include <linux/rcupdate.h> |
18 | #include <linux/sched.h> | |
6e84f315 | 19 | #include <linux/sched/mm.h> |
5a0e3ad6 | 20 | #include <linux/slab.h> |
cddb8a5c | 21 | |
21a92735 | 22 | /* global SRCU for all MMs */ |
dde8da6c | 23 | DEFINE_STATIC_SRCU(srcu); |
21a92735 | 24 | |
23b68395 SV |
25 | #ifdef CONFIG_LOCKDEP |
26 | struct lockdep_map __mmu_notifier_invalidate_range_start_map = { | |
27 | .name = "mmu_notifier_invalidate_range_start" | |
28 | }; | |
29 | #endif | |
30 | ||
56f434f4 JG |
31 | /* |
32 | * The mmu notifier_mm structure is allocated and installed in | |
33 | * mm->mmu_notifier_mm inside the mm_take_all_locks() protected | |
34 | * critical section and it's released only when mm_count reaches zero | |
35 | * in mmdrop(). | |
36 | */ | |
37 | struct mmu_notifier_mm { | |
38 | /* all mmu notifiers registered in this mm are queued in this list */ | |
39 | struct hlist_head list; | |
99cb252f | 40 | bool has_itree; |
56f434f4 JG |
41 | /* to serialize the list modifications and hlist_unhashed */ |
42 | spinlock_t lock; | |
99cb252f JG |
43 | unsigned long invalidate_seq; |
44 | unsigned long active_invalidate_ranges; | |
45 | struct rb_root_cached itree; | |
46 | wait_queue_head_t wq; | |
47 | struct hlist_head deferred_list; | |
56f434f4 JG |
48 | }; |
49 | ||
99cb252f JG |
50 | /* |
51 | * This is a collision-retry read-side/write-side 'lock', a lot like a | |
52 | * seqcount, however this allows multiple write-sides to hold it at | |
53 | * once. Conceptually the write side is protecting the values of the PTEs in | |
54 | * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any | |
55 | * writer exists. | |
56 | * | |
57 | * Note that the core mm creates nested invalidate_range_start()/end() regions | |
58 | * within the same thread, and runs invalidate_range_start()/end() in parallel | |
59 | * on multiple CPUs. This is designed to not reduce concurrency or block | |
60 | * progress on the mm side. | |
61 | * | |
62 | * As a secondary function, holding the full write side also serves to prevent | |
63 | * writers for the itree, this is an optimization to avoid extra locking | |
64 | * during invalidate_range_start/end notifiers. | |
65 | * | |
66 | * The write side has two states, fully excluded: | |
67 | * - mm->active_invalidate_ranges != 0 | |
68 | * - mnn->invalidate_seq & 1 == True (odd) | |
69 | * - some range on the mm_struct is being invalidated | |
70 | * - the itree is not allowed to change | |
71 | * | |
72 | * And partially excluded: | |
73 | * - mm->active_invalidate_ranges != 0 | |
74 | * - mnn->invalidate_seq & 1 == False (even) | |
75 | * - some range on the mm_struct is being invalidated | |
76 | * - the itree is allowed to change | |
77 | * | |
78 | * Operations on mmu_notifier_mm->invalidate_seq (under spinlock): | |
79 | * seq |= 1 # Begin writing | |
80 | * seq++ # Release the writing state | |
81 | * seq & 1 # True if a writer exists | |
82 | * | |
83 | * The later state avoids some expensive work on inv_end in the common case of | |
84 | * no mni monitoring the VA. | |
85 | */ | |
86 | static bool mn_itree_is_invalidating(struct mmu_notifier_mm *mmn_mm) | |
87 | { | |
88 | lockdep_assert_held(&mmn_mm->lock); | |
89 | return mmn_mm->invalidate_seq & 1; | |
90 | } | |
91 | ||
92 | static struct mmu_interval_notifier * | |
93 | mn_itree_inv_start_range(struct mmu_notifier_mm *mmn_mm, | |
94 | const struct mmu_notifier_range *range, | |
95 | unsigned long *seq) | |
96 | { | |
97 | struct interval_tree_node *node; | |
98 | struct mmu_interval_notifier *res = NULL; | |
99 | ||
100 | spin_lock(&mmn_mm->lock); | |
101 | mmn_mm->active_invalidate_ranges++; | |
102 | node = interval_tree_iter_first(&mmn_mm->itree, range->start, | |
103 | range->end - 1); | |
104 | if (node) { | |
105 | mmn_mm->invalidate_seq |= 1; | |
106 | res = container_of(node, struct mmu_interval_notifier, | |
107 | interval_tree); | |
108 | } | |
109 | ||
110 | *seq = mmn_mm->invalidate_seq; | |
111 | spin_unlock(&mmn_mm->lock); | |
112 | return res; | |
113 | } | |
114 | ||
115 | static struct mmu_interval_notifier * | |
116 | mn_itree_inv_next(struct mmu_interval_notifier *mni, | |
117 | const struct mmu_notifier_range *range) | |
118 | { | |
119 | struct interval_tree_node *node; | |
120 | ||
121 | node = interval_tree_iter_next(&mni->interval_tree, range->start, | |
122 | range->end - 1); | |
123 | if (!node) | |
124 | return NULL; | |
125 | return container_of(node, struct mmu_interval_notifier, interval_tree); | |
126 | } | |
127 | ||
128 | static void mn_itree_inv_end(struct mmu_notifier_mm *mmn_mm) | |
129 | { | |
130 | struct mmu_interval_notifier *mni; | |
131 | struct hlist_node *next; | |
132 | ||
133 | spin_lock(&mmn_mm->lock); | |
134 | if (--mmn_mm->active_invalidate_ranges || | |
135 | !mn_itree_is_invalidating(mmn_mm)) { | |
136 | spin_unlock(&mmn_mm->lock); | |
137 | return; | |
138 | } | |
139 | ||
140 | /* Make invalidate_seq even */ | |
141 | mmn_mm->invalidate_seq++; | |
142 | ||
143 | /* | |
144 | * The inv_end incorporates a deferred mechanism like rtnl_unlock(). | |
145 | * Adds and removes are queued until the final inv_end happens then | |
146 | * they are progressed. This arrangement for tree updates is used to | |
147 | * avoid using a blocking lock during invalidate_range_start. | |
148 | */ | |
149 | hlist_for_each_entry_safe(mni, next, &mmn_mm->deferred_list, | |
150 | deferred_item) { | |
151 | if (RB_EMPTY_NODE(&mni->interval_tree.rb)) | |
152 | interval_tree_insert(&mni->interval_tree, | |
153 | &mmn_mm->itree); | |
154 | else | |
155 | interval_tree_remove(&mni->interval_tree, | |
156 | &mmn_mm->itree); | |
157 | hlist_del(&mni->deferred_item); | |
158 | } | |
159 | spin_unlock(&mmn_mm->lock); | |
160 | ||
161 | wake_up_all(&mmn_mm->wq); | |
162 | } | |
163 | ||
164 | /** | |
165 | * mmu_interval_read_begin - Begin a read side critical section against a VA | |
166 | * range | |
167 | * mni: The range to use | |
168 | * | |
169 | * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a | |
170 | * collision-retry scheme similar to seqcount for the VA range under mni. If | |
171 | * the mm invokes invalidation during the critical section then | |
172 | * mmu_interval_read_retry() will return true. | |
173 | * | |
174 | * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs | |
175 | * require a blocking context. The critical region formed by this can sleep, | |
176 | * and the required 'user_lock' can also be a sleeping lock. | |
177 | * | |
178 | * The caller is required to provide a 'user_lock' to serialize both teardown | |
179 | * and setup. | |
180 | * | |
181 | * The return value should be passed to mmu_interval_read_retry(). | |
182 | */ | |
183 | unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni) | |
184 | { | |
185 | struct mmu_notifier_mm *mmn_mm = mni->mm->mmu_notifier_mm; | |
186 | unsigned long seq; | |
187 | bool is_invalidating; | |
188 | ||
189 | /* | |
190 | * If the mni has a different seq value under the user_lock than we | |
191 | * started with then it has collided. | |
192 | * | |
193 | * If the mni currently has the same seq value as the mmn_mm seq, then | |
194 | * it is currently between invalidate_start/end and is colliding. | |
195 | * | |
196 | * The locking looks broadly like this: | |
197 | * mn_tree_invalidate_start(): mmu_interval_read_begin(): | |
198 | * spin_lock | |
199 | * seq = READ_ONCE(mni->invalidate_seq); | |
200 | * seq == mmn_mm->invalidate_seq | |
201 | * spin_unlock | |
202 | * spin_lock | |
203 | * seq = ++mmn_mm->invalidate_seq | |
204 | * spin_unlock | |
205 | * op->invalidate_range(): | |
206 | * user_lock | |
207 | * mmu_interval_set_seq() | |
208 | * mni->invalidate_seq = seq | |
209 | * user_unlock | |
210 | * | |
211 | * [Required: mmu_interval_read_retry() == true] | |
212 | * | |
213 | * mn_itree_inv_end(): | |
214 | * spin_lock | |
215 | * seq = ++mmn_mm->invalidate_seq | |
216 | * spin_unlock | |
217 | * | |
218 | * user_lock | |
219 | * mmu_interval_read_retry(): | |
220 | * mni->invalidate_seq != seq | |
221 | * user_unlock | |
222 | * | |
223 | * Barriers are not needed here as any races here are closed by an | |
224 | * eventual mmu_interval_read_retry(), which provides a barrier via the | |
225 | * user_lock. | |
226 | */ | |
227 | spin_lock(&mmn_mm->lock); | |
228 | /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */ | |
229 | seq = READ_ONCE(mni->invalidate_seq); | |
230 | is_invalidating = seq == mmn_mm->invalidate_seq; | |
231 | spin_unlock(&mmn_mm->lock); | |
232 | ||
233 | /* | |
234 | * mni->invalidate_seq must always be set to an odd value via | |
235 | * mmu_interval_set_seq() using the provided cur_seq from | |
236 | * mn_itree_inv_start_range(). This ensures that if seq does wrap we | |
237 | * will always clear the below sleep in some reasonable time as | |
238 | * mmn_mm->invalidate_seq is even in the idle state. | |
239 | */ | |
240 | lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); | |
241 | lock_map_release(&__mmu_notifier_invalidate_range_start_map); | |
242 | if (is_invalidating) | |
243 | wait_event(mmn_mm->wq, | |
244 | READ_ONCE(mmn_mm->invalidate_seq) != seq); | |
245 | ||
246 | /* | |
247 | * Notice that mmu_interval_read_retry() can already be true at this | |
248 | * point, avoiding loops here allows the caller to provide a global | |
249 | * time bound. | |
250 | */ | |
251 | ||
252 | return seq; | |
253 | } | |
254 | EXPORT_SYMBOL_GPL(mmu_interval_read_begin); | |
255 | ||
256 | static void mn_itree_release(struct mmu_notifier_mm *mmn_mm, | |
257 | struct mm_struct *mm) | |
258 | { | |
259 | struct mmu_notifier_range range = { | |
260 | .flags = MMU_NOTIFIER_RANGE_BLOCKABLE, | |
261 | .event = MMU_NOTIFY_RELEASE, | |
262 | .mm = mm, | |
263 | .start = 0, | |
264 | .end = ULONG_MAX, | |
265 | }; | |
266 | struct mmu_interval_notifier *mni; | |
267 | unsigned long cur_seq; | |
268 | bool ret; | |
269 | ||
270 | for (mni = mn_itree_inv_start_range(mmn_mm, &range, &cur_seq); mni; | |
271 | mni = mn_itree_inv_next(mni, &range)) { | |
272 | ret = mni->ops->invalidate(mni, &range, cur_seq); | |
273 | WARN_ON(!ret); | |
274 | } | |
275 | ||
276 | mn_itree_inv_end(mmn_mm); | |
277 | } | |
278 | ||
cddb8a5c AA |
279 | /* |
280 | * This function can't run concurrently against mmu_notifier_register | |
281 | * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap | |
282 | * runs with mm_users == 0. Other tasks may still invoke mmu notifiers | |
283 | * in parallel despite there being no task using this mm any more, | |
284 | * through the vmas outside of the exit_mmap context, such as with | |
285 | * vmtruncate. This serializes against mmu_notifier_unregister with | |
21a92735 SG |
286 | * the mmu_notifier_mm->lock in addition to SRCU and it serializes |
287 | * against the other mmu notifiers with SRCU. struct mmu_notifier_mm | |
cddb8a5c AA |
288 | * can't go away from under us as exit_mmap holds an mm_count pin |
289 | * itself. | |
290 | */ | |
99cb252f JG |
291 | static void mn_hlist_release(struct mmu_notifier_mm *mmn_mm, |
292 | struct mm_struct *mm) | |
cddb8a5c AA |
293 | { |
294 | struct mmu_notifier *mn; | |
21a92735 | 295 | int id; |
3ad3d901 XG |
296 | |
297 | /* | |
d34883d4 XG |
298 | * SRCU here will block mmu_notifier_unregister until |
299 | * ->release returns. | |
3ad3d901 | 300 | */ |
21a92735 | 301 | id = srcu_read_lock(&srcu); |
99cb252f | 302 | hlist_for_each_entry_rcu(mn, &mmn_mm->list, hlist) |
d34883d4 XG |
303 | /* |
304 | * If ->release runs before mmu_notifier_unregister it must be | |
305 | * handled, as it's the only way for the driver to flush all | |
306 | * existing sptes and stop the driver from establishing any more | |
307 | * sptes before all the pages in the mm are freed. | |
308 | */ | |
309 | if (mn->ops->release) | |
310 | mn->ops->release(mn, mm); | |
d34883d4 | 311 | |
99cb252f JG |
312 | spin_lock(&mmn_mm->lock); |
313 | while (unlikely(!hlist_empty(&mmn_mm->list))) { | |
314 | mn = hlist_entry(mmn_mm->list.first, struct mmu_notifier, | |
cddb8a5c AA |
315 | hlist); |
316 | /* | |
d34883d4 XG |
317 | * We arrived before mmu_notifier_unregister so |
318 | * mmu_notifier_unregister will do nothing other than to wait | |
319 | * for ->release to finish and for mmu_notifier_unregister to | |
320 | * return. | |
cddb8a5c AA |
321 | */ |
322 | hlist_del_init_rcu(&mn->hlist); | |
cddb8a5c | 323 | } |
99cb252f | 324 | spin_unlock(&mmn_mm->lock); |
b972216e | 325 | srcu_read_unlock(&srcu, id); |
cddb8a5c AA |
326 | |
327 | /* | |
d34883d4 XG |
328 | * synchronize_srcu here prevents mmu_notifier_release from returning to |
329 | * exit_mmap (which would proceed with freeing all pages in the mm) | |
330 | * until the ->release method returns, if it was invoked by | |
331 | * mmu_notifier_unregister. | |
332 | * | |
333 | * The mmu_notifier_mm can't go away from under us because one mm_count | |
334 | * is held by exit_mmap. | |
cddb8a5c | 335 | */ |
21a92735 | 336 | synchronize_srcu(&srcu); |
cddb8a5c AA |
337 | } |
338 | ||
99cb252f JG |
339 | void __mmu_notifier_release(struct mm_struct *mm) |
340 | { | |
341 | struct mmu_notifier_mm *mmn_mm = mm->mmu_notifier_mm; | |
342 | ||
343 | if (mmn_mm->has_itree) | |
344 | mn_itree_release(mmn_mm, mm); | |
345 | ||
346 | if (!hlist_empty(&mmn_mm->list)) | |
347 | mn_hlist_release(mmn_mm, mm); | |
348 | } | |
349 | ||
cddb8a5c AA |
350 | /* |
351 | * If no young bitflag is supported by the hardware, ->clear_flush_young can | |
352 | * unmap the address and return 1 or 0 depending if the mapping previously | |
353 | * existed or not. | |
354 | */ | |
355 | int __mmu_notifier_clear_flush_young(struct mm_struct *mm, | |
57128468 ALC |
356 | unsigned long start, |
357 | unsigned long end) | |
cddb8a5c AA |
358 | { |
359 | struct mmu_notifier *mn; | |
21a92735 | 360 | int young = 0, id; |
cddb8a5c | 361 | |
21a92735 | 362 | id = srcu_read_lock(&srcu); |
b67bfe0d | 363 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
cddb8a5c | 364 | if (mn->ops->clear_flush_young) |
57128468 | 365 | young |= mn->ops->clear_flush_young(mn, mm, start, end); |
cddb8a5c | 366 | } |
21a92735 | 367 | srcu_read_unlock(&srcu, id); |
cddb8a5c AA |
368 | |
369 | return young; | |
370 | } | |
371 | ||
1d7715c6 VD |
372 | int __mmu_notifier_clear_young(struct mm_struct *mm, |
373 | unsigned long start, | |
374 | unsigned long end) | |
375 | { | |
376 | struct mmu_notifier *mn; | |
377 | int young = 0, id; | |
378 | ||
379 | id = srcu_read_lock(&srcu); | |
380 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { | |
381 | if (mn->ops->clear_young) | |
382 | young |= mn->ops->clear_young(mn, mm, start, end); | |
383 | } | |
384 | srcu_read_unlock(&srcu, id); | |
385 | ||
386 | return young; | |
387 | } | |
388 | ||
8ee53820 AA |
389 | int __mmu_notifier_test_young(struct mm_struct *mm, |
390 | unsigned long address) | |
391 | { | |
392 | struct mmu_notifier *mn; | |
21a92735 | 393 | int young = 0, id; |
8ee53820 | 394 | |
21a92735 | 395 | id = srcu_read_lock(&srcu); |
b67bfe0d | 396 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
8ee53820 AA |
397 | if (mn->ops->test_young) { |
398 | young = mn->ops->test_young(mn, mm, address); | |
399 | if (young) | |
400 | break; | |
401 | } | |
402 | } | |
21a92735 | 403 | srcu_read_unlock(&srcu, id); |
8ee53820 AA |
404 | |
405 | return young; | |
406 | } | |
407 | ||
828502d3 IE |
408 | void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, |
409 | pte_t pte) | |
410 | { | |
411 | struct mmu_notifier *mn; | |
21a92735 | 412 | int id; |
828502d3 | 413 | |
21a92735 | 414 | id = srcu_read_lock(&srcu); |
b67bfe0d | 415 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
828502d3 IE |
416 | if (mn->ops->change_pte) |
417 | mn->ops->change_pte(mn, mm, address, pte); | |
828502d3 | 418 | } |
21a92735 | 419 | srcu_read_unlock(&srcu, id); |
828502d3 IE |
420 | } |
421 | ||
99cb252f JG |
422 | static int mn_itree_invalidate(struct mmu_notifier_mm *mmn_mm, |
423 | const struct mmu_notifier_range *range) | |
424 | { | |
425 | struct mmu_interval_notifier *mni; | |
426 | unsigned long cur_seq; | |
427 | ||
428 | for (mni = mn_itree_inv_start_range(mmn_mm, range, &cur_seq); mni; | |
429 | mni = mn_itree_inv_next(mni, range)) { | |
430 | bool ret; | |
431 | ||
432 | ret = mni->ops->invalidate(mni, range, cur_seq); | |
433 | if (!ret) { | |
434 | if (WARN_ON(mmu_notifier_range_blockable(range))) | |
435 | continue; | |
436 | goto out_would_block; | |
437 | } | |
438 | } | |
439 | return 0; | |
440 | ||
441 | out_would_block: | |
442 | /* | |
443 | * On -EAGAIN the non-blocking caller is not allowed to call | |
444 | * invalidate_range_end() | |
445 | */ | |
446 | mn_itree_inv_end(mmn_mm); | |
447 | return -EAGAIN; | |
448 | } | |
449 | ||
450 | static int mn_hlist_invalidate_range_start(struct mmu_notifier_mm *mmn_mm, | |
451 | struct mmu_notifier_range *range) | |
cddb8a5c AA |
452 | { |
453 | struct mmu_notifier *mn; | |
93065ac7 | 454 | int ret = 0; |
21a92735 | 455 | int id; |
cddb8a5c | 456 | |
21a92735 | 457 | id = srcu_read_lock(&srcu); |
99cb252f | 458 | hlist_for_each_entry_rcu(mn, &mmn_mm->list, hlist) { |
93065ac7 | 459 | if (mn->ops->invalidate_range_start) { |
ba170f76 SV |
460 | int _ret; |
461 | ||
462 | if (!mmu_notifier_range_blockable(range)) | |
463 | non_block_start(); | |
464 | _ret = mn->ops->invalidate_range_start(mn, range); | |
465 | if (!mmu_notifier_range_blockable(range)) | |
466 | non_block_end(); | |
93065ac7 MH |
467 | if (_ret) { |
468 | pr_info("%pS callback failed with %d in %sblockable context.\n", | |
ac46d4f3 | 469 | mn->ops->invalidate_range_start, _ret, |
dfcd6660 | 470 | !mmu_notifier_range_blockable(range) ? "non-" : ""); |
8402ce61 | 471 | WARN_ON(mmu_notifier_range_blockable(range) || |
df2ec764 | 472 | _ret != -EAGAIN); |
93065ac7 MH |
473 | ret = _ret; |
474 | } | |
475 | } | |
cddb8a5c | 476 | } |
21a92735 | 477 | srcu_read_unlock(&srcu, id); |
93065ac7 MH |
478 | |
479 | return ret; | |
cddb8a5c AA |
480 | } |
481 | ||
99cb252f JG |
482 | int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) |
483 | { | |
484 | struct mmu_notifier_mm *mmn_mm = range->mm->mmu_notifier_mm; | |
485 | int ret; | |
486 | ||
487 | if (mmn_mm->has_itree) { | |
488 | ret = mn_itree_invalidate(mmn_mm, range); | |
489 | if (ret) | |
490 | return ret; | |
491 | } | |
492 | if (!hlist_empty(&mmn_mm->list)) | |
493 | return mn_hlist_invalidate_range_start(mmn_mm, range); | |
494 | return 0; | |
495 | } | |
496 | ||
497 | static void mn_hlist_invalidate_end(struct mmu_notifier_mm *mmn_mm, | |
498 | struct mmu_notifier_range *range, | |
499 | bool only_end) | |
cddb8a5c AA |
500 | { |
501 | struct mmu_notifier *mn; | |
21a92735 | 502 | int id; |
cddb8a5c | 503 | |
21a92735 | 504 | id = srcu_read_lock(&srcu); |
99cb252f | 505 | hlist_for_each_entry_rcu(mn, &mmn_mm->list, hlist) { |
0f0a327f JR |
506 | /* |
507 | * Call invalidate_range here too to avoid the need for the | |
508 | * subsystem of having to register an invalidate_range_end | |
509 | * call-back when there is invalidate_range already. Usually a | |
510 | * subsystem registers either invalidate_range_start()/end() or | |
511 | * invalidate_range(), so this will be no additional overhead | |
512 | * (besides the pointer check). | |
4645b9fe JG |
513 | * |
514 | * We skip call to invalidate_range() if we know it is safe ie | |
515 | * call site use mmu_notifier_invalidate_range_only_end() which | |
516 | * is safe to do when we know that a call to invalidate_range() | |
517 | * already happen under page table lock. | |
0f0a327f | 518 | */ |
4645b9fe | 519 | if (!only_end && mn->ops->invalidate_range) |
ac46d4f3 JG |
520 | mn->ops->invalidate_range(mn, range->mm, |
521 | range->start, | |
522 | range->end); | |
ba170f76 SV |
523 | if (mn->ops->invalidate_range_end) { |
524 | if (!mmu_notifier_range_blockable(range)) | |
525 | non_block_start(); | |
5d6527a7 | 526 | mn->ops->invalidate_range_end(mn, range); |
ba170f76 SV |
527 | if (!mmu_notifier_range_blockable(range)) |
528 | non_block_end(); | |
529 | } | |
cddb8a5c | 530 | } |
21a92735 | 531 | srcu_read_unlock(&srcu, id); |
99cb252f JG |
532 | } |
533 | ||
534 | void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range, | |
535 | bool only_end) | |
536 | { | |
537 | struct mmu_notifier_mm *mmn_mm = range->mm->mmu_notifier_mm; | |
538 | ||
539 | lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); | |
540 | if (mmn_mm->has_itree) | |
541 | mn_itree_inv_end(mmn_mm); | |
542 | ||
543 | if (!hlist_empty(&mmn_mm->list)) | |
544 | mn_hlist_invalidate_end(mmn_mm, range, only_end); | |
23b68395 | 545 | lock_map_release(&__mmu_notifier_invalidate_range_start_map); |
cddb8a5c AA |
546 | } |
547 | ||
0f0a327f JR |
548 | void __mmu_notifier_invalidate_range(struct mm_struct *mm, |
549 | unsigned long start, unsigned long end) | |
550 | { | |
551 | struct mmu_notifier *mn; | |
552 | int id; | |
553 | ||
554 | id = srcu_read_lock(&srcu); | |
555 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { | |
556 | if (mn->ops->invalidate_range) | |
557 | mn->ops->invalidate_range(mn, mm, start, end); | |
558 | } | |
559 | srcu_read_unlock(&srcu, id); | |
560 | } | |
0f0a327f | 561 | |
56c57103 | 562 | /* |
99cb252f JG |
563 | * Same as mmu_notifier_register but here the caller must hold the mmap_sem in |
564 | * write mode. A NULL mn signals the notifier is being registered for itree | |
565 | * mode. | |
56c57103 JG |
566 | */ |
567 | int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) | |
cddb8a5c | 568 | { |
70df291b | 569 | struct mmu_notifier_mm *mmu_notifier_mm = NULL; |
cddb8a5c AA |
570 | int ret; |
571 | ||
56c57103 | 572 | lockdep_assert_held_write(&mm->mmap_sem); |
cddb8a5c AA |
573 | BUG_ON(atomic_read(&mm->mm_users) <= 0); |
574 | ||
66204f1d SV |
575 | if (IS_ENABLED(CONFIG_LOCKDEP)) { |
576 | fs_reclaim_acquire(GFP_KERNEL); | |
577 | lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); | |
578 | lock_map_release(&__mmu_notifier_invalidate_range_start_map); | |
579 | fs_reclaim_release(GFP_KERNEL); | |
580 | } | |
581 | ||
70df291b JG |
582 | if (!mm->mmu_notifier_mm) { |
583 | /* | |
584 | * kmalloc cannot be called under mm_take_all_locks(), but we | |
585 | * know that mm->mmu_notifier_mm can't change while we hold | |
586 | * the write side of the mmap_sem. | |
587 | */ | |
588 | mmu_notifier_mm = | |
99cb252f | 589 | kzalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); |
70df291b JG |
590 | if (!mmu_notifier_mm) |
591 | return -ENOMEM; | |
592 | ||
593 | INIT_HLIST_HEAD(&mmu_notifier_mm->list); | |
594 | spin_lock_init(&mmu_notifier_mm->lock); | |
99cb252f JG |
595 | mmu_notifier_mm->invalidate_seq = 2; |
596 | mmu_notifier_mm->itree = RB_ROOT_CACHED; | |
597 | init_waitqueue_head(&mmu_notifier_mm->wq); | |
598 | INIT_HLIST_HEAD(&mmu_notifier_mm->deferred_list); | |
70df291b | 599 | } |
35cfa2b0 | 600 | |
cddb8a5c AA |
601 | ret = mm_take_all_locks(mm); |
602 | if (unlikely(ret)) | |
35cfa2b0 | 603 | goto out_clean; |
cddb8a5c | 604 | |
cddb8a5c AA |
605 | /* |
606 | * Serialize the update against mmu_notifier_unregister. A | |
607 | * side note: mmu_notifier_release can't run concurrently with | |
608 | * us because we hold the mm_users pin (either implicitly as | |
609 | * current->mm or explicitly with get_task_mm() or similar). | |
610 | * We can't race against any other mmu notifier method either | |
611 | * thanks to mm_take_all_locks(). | |
99cb252f JG |
612 | * |
613 | * release semantics on the initialization of the mmu_notifier_mm's | |
614 | * contents are provided for unlocked readers. acquire can only be | |
615 | * used while holding the mmgrab or mmget, and is safe because once | |
616 | * created the mmu_notififer_mm is not freed until the mm is | |
617 | * destroyed. As above, users holding the mmap_sem or one of the | |
618 | * mm_take_all_locks() do not need to use acquire semantics. | |
cddb8a5c | 619 | */ |
70df291b | 620 | if (mmu_notifier_mm) |
99cb252f | 621 | smp_store_release(&mm->mmu_notifier_mm, mmu_notifier_mm); |
70df291b | 622 | |
99cb252f JG |
623 | if (mn) { |
624 | /* Pairs with the mmdrop in mmu_notifier_unregister_* */ | |
625 | mmgrab(mm); | |
626 | mn->mm = mm; | |
627 | mn->users = 1; | |
628 | ||
629 | spin_lock(&mm->mmu_notifier_mm->lock); | |
630 | hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list); | |
631 | spin_unlock(&mm->mmu_notifier_mm->lock); | |
632 | } else | |
633 | mm->mmu_notifier_mm->has_itree = true; | |
cddb8a5c AA |
634 | |
635 | mm_drop_all_locks(mm); | |
70df291b JG |
636 | BUG_ON(atomic_read(&mm->mm_users) <= 0); |
637 | return 0; | |
638 | ||
35cfa2b0 | 639 | out_clean: |
35cfa2b0 | 640 | kfree(mmu_notifier_mm); |
cddb8a5c AA |
641 | return ret; |
642 | } | |
56c57103 | 643 | EXPORT_SYMBOL_GPL(__mmu_notifier_register); |
cddb8a5c | 644 | |
2c7933f5 JG |
645 | /** |
646 | * mmu_notifier_register - Register a notifier on a mm | |
647 | * @mn: The notifier to attach | |
648 | * @mm: The mm to attach the notifier to | |
649 | * | |
cddb8a5c AA |
650 | * Must not hold mmap_sem nor any other VM related lock when calling |
651 | * this registration function. Must also ensure mm_users can't go down | |
652 | * to zero while this runs to avoid races with mmu_notifier_release, | |
653 | * so mm has to be current->mm or the mm should be pinned safely such | |
654 | * as with get_task_mm(). If the mm is not current->mm, the mm_users | |
655 | * pin should be released by calling mmput after mmu_notifier_register | |
2c7933f5 JG |
656 | * returns. |
657 | * | |
658 | * mmu_notifier_unregister() or mmu_notifier_put() must be always called to | |
659 | * unregister the notifier. | |
660 | * | |
661 | * While the caller has a mmu_notifier get the mn->mm pointer will remain | |
662 | * valid, and can be converted to an active mm pointer via mmget_not_zero(). | |
cddb8a5c AA |
663 | */ |
664 | int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) | |
665 | { | |
56c57103 | 666 | int ret; |
cddb8a5c | 667 | |
56c57103 JG |
668 | down_write(&mm->mmap_sem); |
669 | ret = __mmu_notifier_register(mn, mm); | |
670 | up_write(&mm->mmap_sem); | |
671 | return ret; | |
cddb8a5c | 672 | } |
56c57103 | 673 | EXPORT_SYMBOL_GPL(mmu_notifier_register); |
cddb8a5c | 674 | |
2c7933f5 JG |
675 | static struct mmu_notifier * |
676 | find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops) | |
677 | { | |
678 | struct mmu_notifier *mn; | |
679 | ||
680 | spin_lock(&mm->mmu_notifier_mm->lock); | |
681 | hlist_for_each_entry_rcu (mn, &mm->mmu_notifier_mm->list, hlist) { | |
682 | if (mn->ops != ops) | |
683 | continue; | |
684 | ||
685 | if (likely(mn->users != UINT_MAX)) | |
686 | mn->users++; | |
687 | else | |
688 | mn = ERR_PTR(-EOVERFLOW); | |
689 | spin_unlock(&mm->mmu_notifier_mm->lock); | |
690 | return mn; | |
691 | } | |
692 | spin_unlock(&mm->mmu_notifier_mm->lock); | |
693 | return NULL; | |
694 | } | |
695 | ||
696 | /** | |
697 | * mmu_notifier_get_locked - Return the single struct mmu_notifier for | |
698 | * the mm & ops | |
699 | * @ops: The operations struct being subscribe with | |
700 | * @mm : The mm to attach notifiers too | |
701 | * | |
702 | * This function either allocates a new mmu_notifier via | |
703 | * ops->alloc_notifier(), or returns an already existing notifier on the | |
704 | * list. The value of the ops pointer is used to determine when two notifiers | |
705 | * are the same. | |
706 | * | |
707 | * Each call to mmu_notifier_get() must be paired with a call to | |
708 | * mmu_notifier_put(). The caller must hold the write side of mm->mmap_sem. | |
709 | * | |
710 | * While the caller has a mmu_notifier get the mm pointer will remain valid, | |
711 | * and can be converted to an active mm pointer via mmget_not_zero(). | |
712 | */ | |
713 | struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops, | |
714 | struct mm_struct *mm) | |
715 | { | |
716 | struct mmu_notifier *mn; | |
717 | int ret; | |
718 | ||
719 | lockdep_assert_held_write(&mm->mmap_sem); | |
720 | ||
721 | if (mm->mmu_notifier_mm) { | |
722 | mn = find_get_mmu_notifier(mm, ops); | |
723 | if (mn) | |
724 | return mn; | |
725 | } | |
726 | ||
727 | mn = ops->alloc_notifier(mm); | |
728 | if (IS_ERR(mn)) | |
729 | return mn; | |
730 | mn->ops = ops; | |
731 | ret = __mmu_notifier_register(mn, mm); | |
732 | if (ret) | |
733 | goto out_free; | |
734 | return mn; | |
735 | out_free: | |
736 | mn->ops->free_notifier(mn); | |
737 | return ERR_PTR(ret); | |
738 | } | |
739 | EXPORT_SYMBOL_GPL(mmu_notifier_get_locked); | |
740 | ||
cddb8a5c AA |
741 | /* this is called after the last mmu_notifier_unregister() returned */ |
742 | void __mmu_notifier_mm_destroy(struct mm_struct *mm) | |
743 | { | |
744 | BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list)); | |
745 | kfree(mm->mmu_notifier_mm); | |
746 | mm->mmu_notifier_mm = LIST_POISON1; /* debug */ | |
747 | } | |
748 | ||
749 | /* | |
750 | * This releases the mm_count pin automatically and frees the mm | |
751 | * structure if it was the last user of it. It serializes against | |
21a92735 SG |
752 | * running mmu notifiers with SRCU and against mmu_notifier_unregister |
753 | * with the unregister lock + SRCU. All sptes must be dropped before | |
cddb8a5c AA |
754 | * calling mmu_notifier_unregister. ->release or any other notifier |
755 | * method may be invoked concurrently with mmu_notifier_unregister, | |
756 | * and only after mmu_notifier_unregister returned we're guaranteed | |
757 | * that ->release or any other method can't run anymore. | |
758 | */ | |
759 | void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) | |
760 | { | |
761 | BUG_ON(atomic_read(&mm->mm_count) <= 0); | |
762 | ||
cddb8a5c | 763 | if (!hlist_unhashed(&mn->hlist)) { |
d34883d4 XG |
764 | /* |
765 | * SRCU here will force exit_mmap to wait for ->release to | |
766 | * finish before freeing the pages. | |
767 | */ | |
21a92735 | 768 | int id; |
3ad3d901 | 769 | |
d34883d4 | 770 | id = srcu_read_lock(&srcu); |
cddb8a5c | 771 | /* |
d34883d4 XG |
772 | * exit_mmap will block in mmu_notifier_release to guarantee |
773 | * that ->release is called before freeing the pages. | |
cddb8a5c AA |
774 | */ |
775 | if (mn->ops->release) | |
776 | mn->ops->release(mn, mm); | |
d34883d4 | 777 | srcu_read_unlock(&srcu, id); |
3ad3d901 | 778 | |
d34883d4 | 779 | spin_lock(&mm->mmu_notifier_mm->lock); |
751efd86 | 780 | /* |
d34883d4 XG |
781 | * Can not use list_del_rcu() since __mmu_notifier_release |
782 | * can delete it before we hold the lock. | |
751efd86 | 783 | */ |
d34883d4 | 784 | hlist_del_init_rcu(&mn->hlist); |
cddb8a5c | 785 | spin_unlock(&mm->mmu_notifier_mm->lock); |
d34883d4 | 786 | } |
cddb8a5c AA |
787 | |
788 | /* | |
d34883d4 | 789 | * Wait for any running method to finish, of course including |
83a35e36 | 790 | * ->release if it was run by mmu_notifier_release instead of us. |
cddb8a5c | 791 | */ |
21a92735 | 792 | synchronize_srcu(&srcu); |
cddb8a5c AA |
793 | |
794 | BUG_ON(atomic_read(&mm->mm_count) <= 0); | |
795 | ||
796 | mmdrop(mm); | |
797 | } | |
798 | EXPORT_SYMBOL_GPL(mmu_notifier_unregister); | |
21a92735 | 799 | |
2c7933f5 JG |
800 | static void mmu_notifier_free_rcu(struct rcu_head *rcu) |
801 | { | |
802 | struct mmu_notifier *mn = container_of(rcu, struct mmu_notifier, rcu); | |
803 | struct mm_struct *mm = mn->mm; | |
804 | ||
805 | mn->ops->free_notifier(mn); | |
806 | /* Pairs with the get in __mmu_notifier_register() */ | |
807 | mmdrop(mm); | |
808 | } | |
809 | ||
810 | /** | |
811 | * mmu_notifier_put - Release the reference on the notifier | |
812 | * @mn: The notifier to act on | |
813 | * | |
814 | * This function must be paired with each mmu_notifier_get(), it releases the | |
815 | * reference obtained by the get. If this is the last reference then process | |
816 | * to free the notifier will be run asynchronously. | |
817 | * | |
818 | * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release | |
819 | * when the mm_struct is destroyed. Instead free_notifier is always called to | |
820 | * release any resources held by the user. | |
821 | * | |
822 | * As ops->release is not guaranteed to be called, the user must ensure that | |
823 | * all sptes are dropped, and no new sptes can be established before | |
824 | * mmu_notifier_put() is called. | |
825 | * | |
826 | * This function can be called from the ops->release callback, however the | |
827 | * caller must still ensure it is called pairwise with mmu_notifier_get(). | |
828 | * | |
829 | * Modules calling this function must call mmu_notifier_synchronize() in | |
830 | * their __exit functions to ensure the async work is completed. | |
831 | */ | |
832 | void mmu_notifier_put(struct mmu_notifier *mn) | |
833 | { | |
834 | struct mm_struct *mm = mn->mm; | |
835 | ||
836 | spin_lock(&mm->mmu_notifier_mm->lock); | |
837 | if (WARN_ON(!mn->users) || --mn->users) | |
838 | goto out_unlock; | |
839 | hlist_del_init_rcu(&mn->hlist); | |
840 | spin_unlock(&mm->mmu_notifier_mm->lock); | |
841 | ||
842 | call_srcu(&srcu, &mn->rcu, mmu_notifier_free_rcu); | |
843 | return; | |
844 | ||
845 | out_unlock: | |
846 | spin_unlock(&mm->mmu_notifier_mm->lock); | |
847 | } | |
848 | EXPORT_SYMBOL_GPL(mmu_notifier_put); | |
849 | ||
99cb252f JG |
850 | static int __mmu_interval_notifier_insert( |
851 | struct mmu_interval_notifier *mni, struct mm_struct *mm, | |
852 | struct mmu_notifier_mm *mmn_mm, unsigned long start, | |
853 | unsigned long length, const struct mmu_interval_notifier_ops *ops) | |
854 | { | |
855 | mni->mm = mm; | |
856 | mni->ops = ops; | |
857 | RB_CLEAR_NODE(&mni->interval_tree.rb); | |
858 | mni->interval_tree.start = start; | |
859 | /* | |
860 | * Note that the representation of the intervals in the interval tree | |
861 | * considers the ending point as contained in the interval. | |
862 | */ | |
863 | if (length == 0 || | |
864 | check_add_overflow(start, length - 1, &mni->interval_tree.last)) | |
865 | return -EOVERFLOW; | |
866 | ||
867 | /* Must call with a mmget() held */ | |
868 | if (WARN_ON(atomic_read(&mm->mm_count) <= 0)) | |
869 | return -EINVAL; | |
870 | ||
871 | /* pairs with mmdrop in mmu_interval_notifier_remove() */ | |
872 | mmgrab(mm); | |
873 | ||
874 | /* | |
875 | * If some invalidate_range_start/end region is going on in parallel | |
876 | * we don't know what VA ranges are affected, so we must assume this | |
877 | * new range is included. | |
878 | * | |
879 | * If the itree is invalidating then we are not allowed to change | |
880 | * it. Retrying until invalidation is done is tricky due to the | |
881 | * possibility for live lock, instead defer the add to | |
882 | * mn_itree_inv_end() so this algorithm is deterministic. | |
883 | * | |
884 | * In all cases the value for the mni->invalidate_seq should be | |
885 | * odd, see mmu_interval_read_begin() | |
886 | */ | |
887 | spin_lock(&mmn_mm->lock); | |
888 | if (mmn_mm->active_invalidate_ranges) { | |
889 | if (mn_itree_is_invalidating(mmn_mm)) | |
890 | hlist_add_head(&mni->deferred_item, | |
891 | &mmn_mm->deferred_list); | |
892 | else { | |
893 | mmn_mm->invalidate_seq |= 1; | |
894 | interval_tree_insert(&mni->interval_tree, | |
895 | &mmn_mm->itree); | |
896 | } | |
897 | mni->invalidate_seq = mmn_mm->invalidate_seq; | |
898 | } else { | |
899 | WARN_ON(mn_itree_is_invalidating(mmn_mm)); | |
900 | /* | |
901 | * The starting seq for a mni not under invalidation should be | |
902 | * odd, not equal to the current invalidate_seq and | |
903 | * invalidate_seq should not 'wrap' to the new seq any time | |
904 | * soon. | |
905 | */ | |
906 | mni->invalidate_seq = mmn_mm->invalidate_seq - 1; | |
907 | interval_tree_insert(&mni->interval_tree, &mmn_mm->itree); | |
908 | } | |
909 | spin_unlock(&mmn_mm->lock); | |
910 | return 0; | |
911 | } | |
912 | ||
913 | /** | |
914 | * mmu_interval_notifier_insert - Insert an interval notifier | |
915 | * @mni: Interval notifier to register | |
916 | * @start: Starting virtual address to monitor | |
917 | * @length: Length of the range to monitor | |
918 | * @mm : mm_struct to attach to | |
919 | * | |
920 | * This function subscribes the interval notifier for notifications from the | |
921 | * mm. Upon return the ops related to mmu_interval_notifier will be called | |
922 | * whenever an event that intersects with the given range occurs. | |
923 | * | |
924 | * Upon return the range_notifier may not be present in the interval tree yet. | |
925 | * The caller must use the normal interval notifier read flow via | |
926 | * mmu_interval_read_begin() to establish SPTEs for this range. | |
927 | */ | |
928 | int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni, | |
929 | struct mm_struct *mm, unsigned long start, | |
930 | unsigned long length, | |
931 | const struct mmu_interval_notifier_ops *ops) | |
932 | { | |
933 | struct mmu_notifier_mm *mmn_mm; | |
934 | int ret; | |
935 | ||
936 | might_lock(&mm->mmap_sem); | |
937 | ||
938 | mmn_mm = smp_load_acquire(&mm->mmu_notifier_mm); | |
939 | if (!mmn_mm || !mmn_mm->has_itree) { | |
940 | ret = mmu_notifier_register(NULL, mm); | |
941 | if (ret) | |
942 | return ret; | |
943 | mmn_mm = mm->mmu_notifier_mm; | |
944 | } | |
945 | return __mmu_interval_notifier_insert(mni, mm, mmn_mm, start, length, | |
946 | ops); | |
947 | } | |
948 | EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert); | |
949 | ||
950 | int mmu_interval_notifier_insert_locked( | |
951 | struct mmu_interval_notifier *mni, struct mm_struct *mm, | |
952 | unsigned long start, unsigned long length, | |
953 | const struct mmu_interval_notifier_ops *ops) | |
954 | { | |
955 | struct mmu_notifier_mm *mmn_mm; | |
956 | int ret; | |
957 | ||
958 | lockdep_assert_held_write(&mm->mmap_sem); | |
959 | ||
960 | mmn_mm = mm->mmu_notifier_mm; | |
961 | if (!mmn_mm || !mmn_mm->has_itree) { | |
962 | ret = __mmu_notifier_register(NULL, mm); | |
963 | if (ret) | |
964 | return ret; | |
965 | mmn_mm = mm->mmu_notifier_mm; | |
966 | } | |
967 | return __mmu_interval_notifier_insert(mni, mm, mmn_mm, start, length, | |
968 | ops); | |
969 | } | |
970 | EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked); | |
971 | ||
972 | /** | |
973 | * mmu_interval_notifier_remove - Remove a interval notifier | |
974 | * @mni: Interval notifier to unregister | |
975 | * | |
976 | * This function must be paired with mmu_interval_notifier_insert(). It cannot | |
977 | * be called from any ops callback. | |
978 | * | |
979 | * Once this returns ops callbacks are no longer running on other CPUs and | |
980 | * will not be called in future. | |
981 | */ | |
982 | void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni) | |
983 | { | |
984 | struct mm_struct *mm = mni->mm; | |
985 | struct mmu_notifier_mm *mmn_mm = mm->mmu_notifier_mm; | |
986 | unsigned long seq = 0; | |
987 | ||
988 | might_sleep(); | |
989 | ||
990 | spin_lock(&mmn_mm->lock); | |
991 | if (mn_itree_is_invalidating(mmn_mm)) { | |
992 | /* | |
993 | * remove is being called after insert put this on the | |
994 | * deferred list, but before the deferred list was processed. | |
995 | */ | |
996 | if (RB_EMPTY_NODE(&mni->interval_tree.rb)) { | |
997 | hlist_del(&mni->deferred_item); | |
998 | } else { | |
999 | hlist_add_head(&mni->deferred_item, | |
1000 | &mmn_mm->deferred_list); | |
1001 | seq = mmn_mm->invalidate_seq; | |
1002 | } | |
1003 | } else { | |
1004 | WARN_ON(RB_EMPTY_NODE(&mni->interval_tree.rb)); | |
1005 | interval_tree_remove(&mni->interval_tree, &mmn_mm->itree); | |
1006 | } | |
1007 | spin_unlock(&mmn_mm->lock); | |
1008 | ||
1009 | /* | |
1010 | * The possible sleep on progress in the invalidation requires the | |
1011 | * caller not hold any locks held by invalidation callbacks. | |
1012 | */ | |
1013 | lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); | |
1014 | lock_map_release(&__mmu_notifier_invalidate_range_start_map); | |
1015 | if (seq) | |
1016 | wait_event(mmn_mm->wq, | |
1017 | READ_ONCE(mmn_mm->invalidate_seq) != seq); | |
1018 | ||
1019 | /* pairs with mmgrab in mmu_interval_notifier_insert() */ | |
1020 | mmdrop(mm); | |
1021 | } | |
1022 | EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove); | |
1023 | ||
2c7933f5 JG |
1024 | /** |
1025 | * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed | |
1026 | * | |
1027 | * This function ensures that all outstanding async SRU work from | |
1028 | * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops | |
1029 | * associated with an unused mmu_notifier will no longer be called. | |
1030 | * | |
1031 | * Before using the caller must ensure that all of its mmu_notifiers have been | |
1032 | * fully released via mmu_notifier_put(). | |
1033 | * | |
1034 | * Modules using the mmu_notifier_put() API should call this in their __exit | |
1035 | * function to avoid module unloading races. | |
1036 | */ | |
1037 | void mmu_notifier_synchronize(void) | |
1038 | { | |
1039 | synchronize_srcu(&srcu); | |
1040 | } | |
1041 | EXPORT_SYMBOL_GPL(mmu_notifier_synchronize); | |
1042 | ||
c6d23413 JG |
1043 | bool |
1044 | mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range) | |
1045 | { | |
1046 | if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA) | |
1047 | return false; | |
1048 | /* Return true if the vma still have the read flag set. */ | |
1049 | return range->vma->vm_flags & VM_READ; | |
1050 | } | |
1051 | EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only); |