]>
Commit | Line | Data |
---|---|---|
20c8ccb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
cddb8a5c AA |
2 | /* |
3 | * linux/mm/mmu_notifier.c | |
4 | * | |
5 | * Copyright (C) 2008 Qumranet, Inc. | |
6 | * Copyright (C) 2008 SGI | |
93e205a7 | 7 | * Christoph Lameter <[email protected]> |
cddb8a5c AA |
8 | */ |
9 | ||
10 | #include <linux/rculist.h> | |
11 | #include <linux/mmu_notifier.h> | |
b95f1b31 | 12 | #include <linux/export.h> |
cddb8a5c AA |
13 | #include <linux/mm.h> |
14 | #include <linux/err.h> | |
99cb252f | 15 | #include <linux/interval_tree.h> |
21a92735 | 16 | #include <linux/srcu.h> |
cddb8a5c AA |
17 | #include <linux/rcupdate.h> |
18 | #include <linux/sched.h> | |
6e84f315 | 19 | #include <linux/sched/mm.h> |
5a0e3ad6 | 20 | #include <linux/slab.h> |
cddb8a5c | 21 | |
21a92735 | 22 | /* global SRCU for all MMs */ |
dde8da6c | 23 | DEFINE_STATIC_SRCU(srcu); |
21a92735 | 24 | |
23b68395 SV |
25 | #ifdef CONFIG_LOCKDEP |
26 | struct lockdep_map __mmu_notifier_invalidate_range_start_map = { | |
27 | .name = "mmu_notifier_invalidate_range_start" | |
28 | }; | |
29 | #endif | |
30 | ||
56f434f4 | 31 | /* |
984cfe4e JG |
32 | * The mmu_notifier_subscriptions structure is allocated and installed in |
33 | * mm->notifier_subscriptions inside the mm_take_all_locks() protected | |
56f434f4 JG |
34 | * critical section and it's released only when mm_count reaches zero |
35 | * in mmdrop(). | |
36 | */ | |
984cfe4e | 37 | struct mmu_notifier_subscriptions { |
56f434f4 JG |
38 | /* all mmu notifiers registered in this mm are queued in this list */ |
39 | struct hlist_head list; | |
99cb252f | 40 | bool has_itree; |
56f434f4 JG |
41 | /* to serialize the list modifications and hlist_unhashed */ |
42 | spinlock_t lock; | |
99cb252f JG |
43 | unsigned long invalidate_seq; |
44 | unsigned long active_invalidate_ranges; | |
45 | struct rb_root_cached itree; | |
46 | wait_queue_head_t wq; | |
47 | struct hlist_head deferred_list; | |
56f434f4 JG |
48 | }; |
49 | ||
99cb252f JG |
50 | /* |
51 | * This is a collision-retry read-side/write-side 'lock', a lot like a | |
52 | * seqcount, however this allows multiple write-sides to hold it at | |
53 | * once. Conceptually the write side is protecting the values of the PTEs in | |
54 | * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any | |
55 | * writer exists. | |
56 | * | |
57 | * Note that the core mm creates nested invalidate_range_start()/end() regions | |
58 | * within the same thread, and runs invalidate_range_start()/end() in parallel | |
59 | * on multiple CPUs. This is designed to not reduce concurrency or block | |
60 | * progress on the mm side. | |
61 | * | |
62 | * As a secondary function, holding the full write side also serves to prevent | |
63 | * writers for the itree, this is an optimization to avoid extra locking | |
64 | * during invalidate_range_start/end notifiers. | |
65 | * | |
66 | * The write side has two states, fully excluded: | |
67 | * - mm->active_invalidate_ranges != 0 | |
984cfe4e | 68 | * - subscriptions->invalidate_seq & 1 == True (odd) |
99cb252f JG |
69 | * - some range on the mm_struct is being invalidated |
70 | * - the itree is not allowed to change | |
71 | * | |
72 | * And partially excluded: | |
73 | * - mm->active_invalidate_ranges != 0 | |
984cfe4e | 74 | * - subscriptions->invalidate_seq & 1 == False (even) |
99cb252f JG |
75 | * - some range on the mm_struct is being invalidated |
76 | * - the itree is allowed to change | |
77 | * | |
984cfe4e | 78 | * Operations on notifier_subscriptions->invalidate_seq (under spinlock): |
99cb252f JG |
79 | * seq |= 1 # Begin writing |
80 | * seq++ # Release the writing state | |
81 | * seq & 1 # True if a writer exists | |
82 | * | |
83 | * The later state avoids some expensive work on inv_end in the common case of | |
5292e24a | 84 | * no mmu_interval_notifier monitoring the VA. |
99cb252f | 85 | */ |
984cfe4e JG |
86 | static bool |
87 | mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions) | |
99cb252f | 88 | { |
984cfe4e JG |
89 | lockdep_assert_held(&subscriptions->lock); |
90 | return subscriptions->invalidate_seq & 1; | |
99cb252f JG |
91 | } |
92 | ||
93 | static struct mmu_interval_notifier * | |
984cfe4e | 94 | mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions, |
99cb252f JG |
95 | const struct mmu_notifier_range *range, |
96 | unsigned long *seq) | |
97 | { | |
98 | struct interval_tree_node *node; | |
99 | struct mmu_interval_notifier *res = NULL; | |
100 | ||
984cfe4e JG |
101 | spin_lock(&subscriptions->lock); |
102 | subscriptions->active_invalidate_ranges++; | |
103 | node = interval_tree_iter_first(&subscriptions->itree, range->start, | |
99cb252f JG |
104 | range->end - 1); |
105 | if (node) { | |
984cfe4e | 106 | subscriptions->invalidate_seq |= 1; |
99cb252f JG |
107 | res = container_of(node, struct mmu_interval_notifier, |
108 | interval_tree); | |
109 | } | |
110 | ||
984cfe4e JG |
111 | *seq = subscriptions->invalidate_seq; |
112 | spin_unlock(&subscriptions->lock); | |
99cb252f JG |
113 | return res; |
114 | } | |
115 | ||
116 | static struct mmu_interval_notifier * | |
5292e24a | 117 | mn_itree_inv_next(struct mmu_interval_notifier *interval_sub, |
99cb252f JG |
118 | const struct mmu_notifier_range *range) |
119 | { | |
120 | struct interval_tree_node *node; | |
121 | ||
5292e24a JG |
122 | node = interval_tree_iter_next(&interval_sub->interval_tree, |
123 | range->start, range->end - 1); | |
99cb252f JG |
124 | if (!node) |
125 | return NULL; | |
126 | return container_of(node, struct mmu_interval_notifier, interval_tree); | |
127 | } | |
128 | ||
984cfe4e | 129 | static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions) |
99cb252f | 130 | { |
5292e24a | 131 | struct mmu_interval_notifier *interval_sub; |
99cb252f JG |
132 | struct hlist_node *next; |
133 | ||
984cfe4e JG |
134 | spin_lock(&subscriptions->lock); |
135 | if (--subscriptions->active_invalidate_ranges || | |
136 | !mn_itree_is_invalidating(subscriptions)) { | |
137 | spin_unlock(&subscriptions->lock); | |
99cb252f JG |
138 | return; |
139 | } | |
140 | ||
141 | /* Make invalidate_seq even */ | |
984cfe4e | 142 | subscriptions->invalidate_seq++; |
99cb252f JG |
143 | |
144 | /* | |
145 | * The inv_end incorporates a deferred mechanism like rtnl_unlock(). | |
146 | * Adds and removes are queued until the final inv_end happens then | |
147 | * they are progressed. This arrangement for tree updates is used to | |
148 | * avoid using a blocking lock during invalidate_range_start. | |
149 | */ | |
5292e24a JG |
150 | hlist_for_each_entry_safe(interval_sub, next, |
151 | &subscriptions->deferred_list, | |
99cb252f | 152 | deferred_item) { |
5292e24a JG |
153 | if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) |
154 | interval_tree_insert(&interval_sub->interval_tree, | |
984cfe4e | 155 | &subscriptions->itree); |
99cb252f | 156 | else |
5292e24a | 157 | interval_tree_remove(&interval_sub->interval_tree, |
984cfe4e | 158 | &subscriptions->itree); |
5292e24a | 159 | hlist_del(&interval_sub->deferred_item); |
99cb252f | 160 | } |
984cfe4e | 161 | spin_unlock(&subscriptions->lock); |
99cb252f | 162 | |
984cfe4e | 163 | wake_up_all(&subscriptions->wq); |
99cb252f JG |
164 | } |
165 | ||
166 | /** | |
167 | * mmu_interval_read_begin - Begin a read side critical section against a VA | |
168 | * range | |
d49653f3 | 169 | * @interval_sub: The interval subscription |
99cb252f JG |
170 | * |
171 | * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a | |
5292e24a JG |
172 | * collision-retry scheme similar to seqcount for the VA range under |
173 | * subscription. If the mm invokes invalidation during the critical section | |
174 | * then mmu_interval_read_retry() will return true. | |
99cb252f JG |
175 | * |
176 | * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs | |
177 | * require a blocking context. The critical region formed by this can sleep, | |
178 | * and the required 'user_lock' can also be a sleeping lock. | |
179 | * | |
180 | * The caller is required to provide a 'user_lock' to serialize both teardown | |
181 | * and setup. | |
182 | * | |
183 | * The return value should be passed to mmu_interval_read_retry(). | |
184 | */ | |
5292e24a JG |
185 | unsigned long |
186 | mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub) | |
99cb252f | 187 | { |
984cfe4e | 188 | struct mmu_notifier_subscriptions *subscriptions = |
5292e24a | 189 | interval_sub->mm->notifier_subscriptions; |
99cb252f JG |
190 | unsigned long seq; |
191 | bool is_invalidating; | |
192 | ||
193 | /* | |
5292e24a JG |
194 | * If the subscription has a different seq value under the user_lock |
195 | * than we started with then it has collided. | |
99cb252f | 196 | * |
5292e24a JG |
197 | * If the subscription currently has the same seq value as the |
198 | * subscriptions seq, then it is currently between | |
199 | * invalidate_start/end and is colliding. | |
99cb252f JG |
200 | * |
201 | * The locking looks broadly like this: | |
202 | * mn_tree_invalidate_start(): mmu_interval_read_begin(): | |
203 | * spin_lock | |
5292e24a | 204 | * seq = READ_ONCE(interval_sub->invalidate_seq); |
984cfe4e | 205 | * seq == subs->invalidate_seq |
99cb252f JG |
206 | * spin_unlock |
207 | * spin_lock | |
984cfe4e | 208 | * seq = ++subscriptions->invalidate_seq |
99cb252f JG |
209 | * spin_unlock |
210 | * op->invalidate_range(): | |
211 | * user_lock | |
212 | * mmu_interval_set_seq() | |
5292e24a | 213 | * interval_sub->invalidate_seq = seq |
99cb252f JG |
214 | * user_unlock |
215 | * | |
216 | * [Required: mmu_interval_read_retry() == true] | |
217 | * | |
218 | * mn_itree_inv_end(): | |
219 | * spin_lock | |
984cfe4e | 220 | * seq = ++subscriptions->invalidate_seq |
99cb252f JG |
221 | * spin_unlock |
222 | * | |
223 | * user_lock | |
224 | * mmu_interval_read_retry(): | |
5292e24a | 225 | * interval_sub->invalidate_seq != seq |
99cb252f JG |
226 | * user_unlock |
227 | * | |
228 | * Barriers are not needed here as any races here are closed by an | |
229 | * eventual mmu_interval_read_retry(), which provides a barrier via the | |
230 | * user_lock. | |
231 | */ | |
984cfe4e | 232 | spin_lock(&subscriptions->lock); |
99cb252f | 233 | /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */ |
5292e24a | 234 | seq = READ_ONCE(interval_sub->invalidate_seq); |
984cfe4e JG |
235 | is_invalidating = seq == subscriptions->invalidate_seq; |
236 | spin_unlock(&subscriptions->lock); | |
99cb252f JG |
237 | |
238 | /* | |
5292e24a | 239 | * interval_sub->invalidate_seq must always be set to an odd value via |
99cb252f JG |
240 | * mmu_interval_set_seq() using the provided cur_seq from |
241 | * mn_itree_inv_start_range(). This ensures that if seq does wrap we | |
242 | * will always clear the below sleep in some reasonable time as | |
984cfe4e | 243 | * subscriptions->invalidate_seq is even in the idle state. |
99cb252f JG |
244 | */ |
245 | lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); | |
246 | lock_map_release(&__mmu_notifier_invalidate_range_start_map); | |
247 | if (is_invalidating) | |
984cfe4e JG |
248 | wait_event(subscriptions->wq, |
249 | READ_ONCE(subscriptions->invalidate_seq) != seq); | |
99cb252f JG |
250 | |
251 | /* | |
252 | * Notice that mmu_interval_read_retry() can already be true at this | |
253 | * point, avoiding loops here allows the caller to provide a global | |
254 | * time bound. | |
255 | */ | |
256 | ||
257 | return seq; | |
258 | } | |
259 | EXPORT_SYMBOL_GPL(mmu_interval_read_begin); | |
260 | ||
984cfe4e | 261 | static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions, |
99cb252f JG |
262 | struct mm_struct *mm) |
263 | { | |
264 | struct mmu_notifier_range range = { | |
265 | .flags = MMU_NOTIFIER_RANGE_BLOCKABLE, | |
266 | .event = MMU_NOTIFY_RELEASE, | |
267 | .mm = mm, | |
268 | .start = 0, | |
269 | .end = ULONG_MAX, | |
270 | }; | |
5292e24a | 271 | struct mmu_interval_notifier *interval_sub; |
99cb252f JG |
272 | unsigned long cur_seq; |
273 | bool ret; | |
274 | ||
5292e24a JG |
275 | for (interval_sub = |
276 | mn_itree_inv_start_range(subscriptions, &range, &cur_seq); | |
277 | interval_sub; | |
278 | interval_sub = mn_itree_inv_next(interval_sub, &range)) { | |
279 | ret = interval_sub->ops->invalidate(interval_sub, &range, | |
280 | cur_seq); | |
99cb252f JG |
281 | WARN_ON(!ret); |
282 | } | |
283 | ||
984cfe4e | 284 | mn_itree_inv_end(subscriptions); |
99cb252f JG |
285 | } |
286 | ||
cddb8a5c AA |
287 | /* |
288 | * This function can't run concurrently against mmu_notifier_register | |
289 | * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap | |
290 | * runs with mm_users == 0. Other tasks may still invoke mmu notifiers | |
291 | * in parallel despite there being no task using this mm any more, | |
292 | * through the vmas outside of the exit_mmap context, such as with | |
293 | * vmtruncate. This serializes against mmu_notifier_unregister with | |
984cfe4e JG |
294 | * the notifier_subscriptions->lock in addition to SRCU and it serializes |
295 | * against the other mmu notifiers with SRCU. struct mmu_notifier_subscriptions | |
cddb8a5c AA |
296 | * can't go away from under us as exit_mmap holds an mm_count pin |
297 | * itself. | |
298 | */ | |
984cfe4e | 299 | static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions, |
99cb252f | 300 | struct mm_struct *mm) |
cddb8a5c | 301 | { |
1991722a | 302 | struct mmu_notifier *subscription; |
21a92735 | 303 | int id; |
3ad3d901 XG |
304 | |
305 | /* | |
d34883d4 XG |
306 | * SRCU here will block mmu_notifier_unregister until |
307 | * ->release returns. | |
3ad3d901 | 308 | */ |
21a92735 | 309 | id = srcu_read_lock(&srcu); |
63886bad QC |
310 | hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, |
311 | srcu_read_lock_held(&srcu)) | |
d34883d4 XG |
312 | /* |
313 | * If ->release runs before mmu_notifier_unregister it must be | |
314 | * handled, as it's the only way for the driver to flush all | |
315 | * existing sptes and stop the driver from establishing any more | |
316 | * sptes before all the pages in the mm are freed. | |
317 | */ | |
1991722a JG |
318 | if (subscription->ops->release) |
319 | subscription->ops->release(subscription, mm); | |
d34883d4 | 320 | |
984cfe4e JG |
321 | spin_lock(&subscriptions->lock); |
322 | while (unlikely(!hlist_empty(&subscriptions->list))) { | |
1991722a JG |
323 | subscription = hlist_entry(subscriptions->list.first, |
324 | struct mmu_notifier, hlist); | |
cddb8a5c | 325 | /* |
d34883d4 XG |
326 | * We arrived before mmu_notifier_unregister so |
327 | * mmu_notifier_unregister will do nothing other than to wait | |
328 | * for ->release to finish and for mmu_notifier_unregister to | |
329 | * return. | |
cddb8a5c | 330 | */ |
1991722a | 331 | hlist_del_init_rcu(&subscription->hlist); |
cddb8a5c | 332 | } |
984cfe4e | 333 | spin_unlock(&subscriptions->lock); |
b972216e | 334 | srcu_read_unlock(&srcu, id); |
cddb8a5c AA |
335 | |
336 | /* | |
d34883d4 XG |
337 | * synchronize_srcu here prevents mmu_notifier_release from returning to |
338 | * exit_mmap (which would proceed with freeing all pages in the mm) | |
339 | * until the ->release method returns, if it was invoked by | |
340 | * mmu_notifier_unregister. | |
341 | * | |
984cfe4e JG |
342 | * The notifier_subscriptions can't go away from under us because |
343 | * one mm_count is held by exit_mmap. | |
cddb8a5c | 344 | */ |
21a92735 | 345 | synchronize_srcu(&srcu); |
cddb8a5c AA |
346 | } |
347 | ||
99cb252f JG |
348 | void __mmu_notifier_release(struct mm_struct *mm) |
349 | { | |
984cfe4e JG |
350 | struct mmu_notifier_subscriptions *subscriptions = |
351 | mm->notifier_subscriptions; | |
99cb252f | 352 | |
984cfe4e JG |
353 | if (subscriptions->has_itree) |
354 | mn_itree_release(subscriptions, mm); | |
99cb252f | 355 | |
984cfe4e JG |
356 | if (!hlist_empty(&subscriptions->list)) |
357 | mn_hlist_release(subscriptions, mm); | |
99cb252f JG |
358 | } |
359 | ||
cddb8a5c AA |
360 | /* |
361 | * If no young bitflag is supported by the hardware, ->clear_flush_young can | |
362 | * unmap the address and return 1 or 0 depending if the mapping previously | |
363 | * existed or not. | |
364 | */ | |
365 | int __mmu_notifier_clear_flush_young(struct mm_struct *mm, | |
57128468 ALC |
366 | unsigned long start, |
367 | unsigned long end) | |
cddb8a5c | 368 | { |
1991722a | 369 | struct mmu_notifier *subscription; |
21a92735 | 370 | int young = 0, id; |
cddb8a5c | 371 | |
21a92735 | 372 | id = srcu_read_lock(&srcu); |
1991722a | 373 | hlist_for_each_entry_rcu(subscription, |
63886bad QC |
374 | &mm->notifier_subscriptions->list, hlist, |
375 | srcu_read_lock_held(&srcu)) { | |
1991722a JG |
376 | if (subscription->ops->clear_flush_young) |
377 | young |= subscription->ops->clear_flush_young( | |
378 | subscription, mm, start, end); | |
cddb8a5c | 379 | } |
21a92735 | 380 | srcu_read_unlock(&srcu, id); |
cddb8a5c AA |
381 | |
382 | return young; | |
383 | } | |
384 | ||
1d7715c6 VD |
385 | int __mmu_notifier_clear_young(struct mm_struct *mm, |
386 | unsigned long start, | |
387 | unsigned long end) | |
388 | { | |
1991722a | 389 | struct mmu_notifier *subscription; |
1d7715c6 VD |
390 | int young = 0, id; |
391 | ||
392 | id = srcu_read_lock(&srcu); | |
1991722a | 393 | hlist_for_each_entry_rcu(subscription, |
63886bad QC |
394 | &mm->notifier_subscriptions->list, hlist, |
395 | srcu_read_lock_held(&srcu)) { | |
1991722a JG |
396 | if (subscription->ops->clear_young) |
397 | young |= subscription->ops->clear_young(subscription, | |
398 | mm, start, end); | |
1d7715c6 VD |
399 | } |
400 | srcu_read_unlock(&srcu, id); | |
401 | ||
402 | return young; | |
403 | } | |
404 | ||
8ee53820 AA |
405 | int __mmu_notifier_test_young(struct mm_struct *mm, |
406 | unsigned long address) | |
407 | { | |
1991722a | 408 | struct mmu_notifier *subscription; |
21a92735 | 409 | int young = 0, id; |
8ee53820 | 410 | |
21a92735 | 411 | id = srcu_read_lock(&srcu); |
1991722a | 412 | hlist_for_each_entry_rcu(subscription, |
63886bad QC |
413 | &mm->notifier_subscriptions->list, hlist, |
414 | srcu_read_lock_held(&srcu)) { | |
1991722a JG |
415 | if (subscription->ops->test_young) { |
416 | young = subscription->ops->test_young(subscription, mm, | |
417 | address); | |
8ee53820 AA |
418 | if (young) |
419 | break; | |
420 | } | |
421 | } | |
21a92735 | 422 | srcu_read_unlock(&srcu, id); |
8ee53820 AA |
423 | |
424 | return young; | |
425 | } | |
426 | ||
828502d3 IE |
427 | void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, |
428 | pte_t pte) | |
429 | { | |
1991722a | 430 | struct mmu_notifier *subscription; |
21a92735 | 431 | int id; |
828502d3 | 432 | |
21a92735 | 433 | id = srcu_read_lock(&srcu); |
1991722a | 434 | hlist_for_each_entry_rcu(subscription, |
63886bad QC |
435 | &mm->notifier_subscriptions->list, hlist, |
436 | srcu_read_lock_held(&srcu)) { | |
1991722a JG |
437 | if (subscription->ops->change_pte) |
438 | subscription->ops->change_pte(subscription, mm, address, | |
439 | pte); | |
828502d3 | 440 | } |
21a92735 | 441 | srcu_read_unlock(&srcu, id); |
828502d3 IE |
442 | } |
443 | ||
984cfe4e | 444 | static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions, |
99cb252f JG |
445 | const struct mmu_notifier_range *range) |
446 | { | |
5292e24a | 447 | struct mmu_interval_notifier *interval_sub; |
99cb252f JG |
448 | unsigned long cur_seq; |
449 | ||
5292e24a JG |
450 | for (interval_sub = |
451 | mn_itree_inv_start_range(subscriptions, range, &cur_seq); | |
452 | interval_sub; | |
453 | interval_sub = mn_itree_inv_next(interval_sub, range)) { | |
99cb252f JG |
454 | bool ret; |
455 | ||
5292e24a JG |
456 | ret = interval_sub->ops->invalidate(interval_sub, range, |
457 | cur_seq); | |
99cb252f JG |
458 | if (!ret) { |
459 | if (WARN_ON(mmu_notifier_range_blockable(range))) | |
460 | continue; | |
461 | goto out_would_block; | |
462 | } | |
463 | } | |
464 | return 0; | |
465 | ||
466 | out_would_block: | |
467 | /* | |
468 | * On -EAGAIN the non-blocking caller is not allowed to call | |
469 | * invalidate_range_end() | |
470 | */ | |
984cfe4e | 471 | mn_itree_inv_end(subscriptions); |
99cb252f JG |
472 | return -EAGAIN; |
473 | } | |
474 | ||
984cfe4e JG |
475 | static int mn_hlist_invalidate_range_start( |
476 | struct mmu_notifier_subscriptions *subscriptions, | |
477 | struct mmu_notifier_range *range) | |
cddb8a5c | 478 | { |
1991722a | 479 | struct mmu_notifier *subscription; |
93065ac7 | 480 | int ret = 0; |
21a92735 | 481 | int id; |
cddb8a5c | 482 | |
21a92735 | 483 | id = srcu_read_lock(&srcu); |
63886bad QC |
484 | hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, |
485 | srcu_read_lock_held(&srcu)) { | |
1991722a JG |
486 | const struct mmu_notifier_ops *ops = subscription->ops; |
487 | ||
488 | if (ops->invalidate_range_start) { | |
ba170f76 SV |
489 | int _ret; |
490 | ||
491 | if (!mmu_notifier_range_blockable(range)) | |
492 | non_block_start(); | |
1991722a | 493 | _ret = ops->invalidate_range_start(subscription, range); |
ba170f76 SV |
494 | if (!mmu_notifier_range_blockable(range)) |
495 | non_block_end(); | |
93065ac7 MH |
496 | if (_ret) { |
497 | pr_info("%pS callback failed with %d in %sblockable context.\n", | |
1991722a JG |
498 | ops->invalidate_range_start, _ret, |
499 | !mmu_notifier_range_blockable(range) ? | |
500 | "non-" : | |
501 | ""); | |
8402ce61 | 502 | WARN_ON(mmu_notifier_range_blockable(range) || |
df2ec764 | 503 | _ret != -EAGAIN); |
c2655835 SC |
504 | /* |
505 | * We call all the notifiers on any EAGAIN, | |
506 | * there is no way for a notifier to know if | |
507 | * its start method failed, thus a start that | |
508 | * does EAGAIN can't also do end. | |
509 | */ | |
510 | WARN_ON(ops->invalidate_range_end); | |
93065ac7 MH |
511 | ret = _ret; |
512 | } | |
513 | } | |
cddb8a5c | 514 | } |
c2655835 SC |
515 | |
516 | if (ret) { | |
517 | /* | |
518 | * Must be non-blocking to get here. If there are multiple | |
519 | * notifiers and one or more failed start, any that succeeded | |
520 | * start are expecting their end to be called. Do so now. | |
521 | */ | |
522 | hlist_for_each_entry_rcu(subscription, &subscriptions->list, | |
523 | hlist, srcu_read_lock_held(&srcu)) { | |
524 | if (!subscription->ops->invalidate_range_end) | |
525 | continue; | |
526 | ||
527 | subscription->ops->invalidate_range_end(subscription, | |
528 | range); | |
529 | } | |
530 | } | |
21a92735 | 531 | srcu_read_unlock(&srcu, id); |
93065ac7 MH |
532 | |
533 | return ret; | |
cddb8a5c AA |
534 | } |
535 | ||
99cb252f JG |
536 | int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) |
537 | { | |
984cfe4e JG |
538 | struct mmu_notifier_subscriptions *subscriptions = |
539 | range->mm->notifier_subscriptions; | |
99cb252f JG |
540 | int ret; |
541 | ||
984cfe4e JG |
542 | if (subscriptions->has_itree) { |
543 | ret = mn_itree_invalidate(subscriptions, range); | |
99cb252f JG |
544 | if (ret) |
545 | return ret; | |
546 | } | |
984cfe4e JG |
547 | if (!hlist_empty(&subscriptions->list)) |
548 | return mn_hlist_invalidate_range_start(subscriptions, range); | |
99cb252f JG |
549 | return 0; |
550 | } | |
551 | ||
984cfe4e JG |
552 | static void |
553 | mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions, | |
554 | struct mmu_notifier_range *range, bool only_end) | |
cddb8a5c | 555 | { |
1991722a | 556 | struct mmu_notifier *subscription; |
21a92735 | 557 | int id; |
cddb8a5c | 558 | |
21a92735 | 559 | id = srcu_read_lock(&srcu); |
63886bad QC |
560 | hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, |
561 | srcu_read_lock_held(&srcu)) { | |
0f0a327f JR |
562 | /* |
563 | * Call invalidate_range here too to avoid the need for the | |
564 | * subsystem of having to register an invalidate_range_end | |
565 | * call-back when there is invalidate_range already. Usually a | |
566 | * subsystem registers either invalidate_range_start()/end() or | |
567 | * invalidate_range(), so this will be no additional overhead | |
568 | * (besides the pointer check). | |
4645b9fe JG |
569 | * |
570 | * We skip call to invalidate_range() if we know it is safe ie | |
571 | * call site use mmu_notifier_invalidate_range_only_end() which | |
572 | * is safe to do when we know that a call to invalidate_range() | |
573 | * already happen under page table lock. | |
0f0a327f | 574 | */ |
1991722a JG |
575 | if (!only_end && subscription->ops->invalidate_range) |
576 | subscription->ops->invalidate_range(subscription, | |
577 | range->mm, | |
578 | range->start, | |
579 | range->end); | |
580 | if (subscription->ops->invalidate_range_end) { | |
ba170f76 SV |
581 | if (!mmu_notifier_range_blockable(range)) |
582 | non_block_start(); | |
1991722a JG |
583 | subscription->ops->invalidate_range_end(subscription, |
584 | range); | |
ba170f76 SV |
585 | if (!mmu_notifier_range_blockable(range)) |
586 | non_block_end(); | |
587 | } | |
cddb8a5c | 588 | } |
21a92735 | 589 | srcu_read_unlock(&srcu, id); |
99cb252f JG |
590 | } |
591 | ||
592 | void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range, | |
593 | bool only_end) | |
594 | { | |
984cfe4e JG |
595 | struct mmu_notifier_subscriptions *subscriptions = |
596 | range->mm->notifier_subscriptions; | |
99cb252f JG |
597 | |
598 | lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); | |
984cfe4e JG |
599 | if (subscriptions->has_itree) |
600 | mn_itree_inv_end(subscriptions); | |
99cb252f | 601 | |
984cfe4e JG |
602 | if (!hlist_empty(&subscriptions->list)) |
603 | mn_hlist_invalidate_end(subscriptions, range, only_end); | |
23b68395 | 604 | lock_map_release(&__mmu_notifier_invalidate_range_start_map); |
cddb8a5c AA |
605 | } |
606 | ||
0f0a327f JR |
607 | void __mmu_notifier_invalidate_range(struct mm_struct *mm, |
608 | unsigned long start, unsigned long end) | |
609 | { | |
1991722a | 610 | struct mmu_notifier *subscription; |
0f0a327f JR |
611 | int id; |
612 | ||
613 | id = srcu_read_lock(&srcu); | |
1991722a | 614 | hlist_for_each_entry_rcu(subscription, |
63886bad QC |
615 | &mm->notifier_subscriptions->list, hlist, |
616 | srcu_read_lock_held(&srcu)) { | |
1991722a JG |
617 | if (subscription->ops->invalidate_range) |
618 | subscription->ops->invalidate_range(subscription, mm, | |
619 | start, end); | |
0f0a327f JR |
620 | } |
621 | srcu_read_unlock(&srcu, id); | |
622 | } | |
0f0a327f | 623 | |
56c57103 | 624 | /* |
c1e8d7c6 | 625 | * Same as mmu_notifier_register but here the caller must hold the mmap_lock in |
99cb252f JG |
626 | * write mode. A NULL mn signals the notifier is being registered for itree |
627 | * mode. | |
56c57103 | 628 | */ |
1991722a JG |
629 | int __mmu_notifier_register(struct mmu_notifier *subscription, |
630 | struct mm_struct *mm) | |
cddb8a5c | 631 | { |
984cfe4e | 632 | struct mmu_notifier_subscriptions *subscriptions = NULL; |
cddb8a5c AA |
633 | int ret; |
634 | ||
42fc5414 | 635 | mmap_assert_write_locked(mm); |
cddb8a5c AA |
636 | BUG_ON(atomic_read(&mm->mm_users) <= 0); |
637 | ||
984cfe4e | 638 | if (!mm->notifier_subscriptions) { |
70df291b JG |
639 | /* |
640 | * kmalloc cannot be called under mm_take_all_locks(), but we | |
984cfe4e | 641 | * know that mm->notifier_subscriptions can't change while we |
c1e8d7c6 | 642 | * hold the write side of the mmap_lock. |
70df291b | 643 | */ |
984cfe4e JG |
644 | subscriptions = kzalloc( |
645 | sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL); | |
646 | if (!subscriptions) | |
70df291b JG |
647 | return -ENOMEM; |
648 | ||
984cfe4e JG |
649 | INIT_HLIST_HEAD(&subscriptions->list); |
650 | spin_lock_init(&subscriptions->lock); | |
651 | subscriptions->invalidate_seq = 2; | |
652 | subscriptions->itree = RB_ROOT_CACHED; | |
653 | init_waitqueue_head(&subscriptions->wq); | |
654 | INIT_HLIST_HEAD(&subscriptions->deferred_list); | |
70df291b | 655 | } |
35cfa2b0 | 656 | |
cddb8a5c AA |
657 | ret = mm_take_all_locks(mm); |
658 | if (unlikely(ret)) | |
35cfa2b0 | 659 | goto out_clean; |
cddb8a5c | 660 | |
cddb8a5c AA |
661 | /* |
662 | * Serialize the update against mmu_notifier_unregister. A | |
663 | * side note: mmu_notifier_release can't run concurrently with | |
664 | * us because we hold the mm_users pin (either implicitly as | |
665 | * current->mm or explicitly with get_task_mm() or similar). | |
666 | * We can't race against any other mmu notifier method either | |
667 | * thanks to mm_take_all_locks(). | |
99cb252f | 668 | * |
984cfe4e JG |
669 | * release semantics on the initialization of the |
670 | * mmu_notifier_subscriptions's contents are provided for unlocked | |
671 | * readers. acquire can only be used while holding the mmgrab or | |
672 | * mmget, and is safe because once created the | |
673 | * mmu_notifier_subscriptions is not freed until the mm is destroyed. | |
c1e8d7c6 | 674 | * As above, users holding the mmap_lock or one of the |
99cb252f | 675 | * mm_take_all_locks() do not need to use acquire semantics. |
cddb8a5c | 676 | */ |
984cfe4e JG |
677 | if (subscriptions) |
678 | smp_store_release(&mm->notifier_subscriptions, subscriptions); | |
70df291b | 679 | |
1991722a | 680 | if (subscription) { |
99cb252f JG |
681 | /* Pairs with the mmdrop in mmu_notifier_unregister_* */ |
682 | mmgrab(mm); | |
1991722a JG |
683 | subscription->mm = mm; |
684 | subscription->users = 1; | |
99cb252f | 685 | |
984cfe4e | 686 | spin_lock(&mm->notifier_subscriptions->lock); |
1991722a | 687 | hlist_add_head_rcu(&subscription->hlist, |
984cfe4e JG |
688 | &mm->notifier_subscriptions->list); |
689 | spin_unlock(&mm->notifier_subscriptions->lock); | |
99cb252f | 690 | } else |
984cfe4e | 691 | mm->notifier_subscriptions->has_itree = true; |
cddb8a5c AA |
692 | |
693 | mm_drop_all_locks(mm); | |
70df291b JG |
694 | BUG_ON(atomic_read(&mm->mm_users) <= 0); |
695 | return 0; | |
696 | ||
35cfa2b0 | 697 | out_clean: |
984cfe4e | 698 | kfree(subscriptions); |
cddb8a5c AA |
699 | return ret; |
700 | } | |
56c57103 | 701 | EXPORT_SYMBOL_GPL(__mmu_notifier_register); |
cddb8a5c | 702 | |
2c7933f5 JG |
703 | /** |
704 | * mmu_notifier_register - Register a notifier on a mm | |
d49653f3 | 705 | * @subscription: The notifier to attach |
2c7933f5 JG |
706 | * @mm: The mm to attach the notifier to |
707 | * | |
c1e8d7c6 | 708 | * Must not hold mmap_lock nor any other VM related lock when calling |
cddb8a5c AA |
709 | * this registration function. Must also ensure mm_users can't go down |
710 | * to zero while this runs to avoid races with mmu_notifier_release, | |
711 | * so mm has to be current->mm or the mm should be pinned safely such | |
712 | * as with get_task_mm(). If the mm is not current->mm, the mm_users | |
713 | * pin should be released by calling mmput after mmu_notifier_register | |
2c7933f5 JG |
714 | * returns. |
715 | * | |
716 | * mmu_notifier_unregister() or mmu_notifier_put() must be always called to | |
717 | * unregister the notifier. | |
718 | * | |
1991722a | 719 | * While the caller has a mmu_notifier get the subscription->mm pointer will remain |
2c7933f5 | 720 | * valid, and can be converted to an active mm pointer via mmget_not_zero(). |
cddb8a5c | 721 | */ |
1991722a JG |
722 | int mmu_notifier_register(struct mmu_notifier *subscription, |
723 | struct mm_struct *mm) | |
cddb8a5c | 724 | { |
56c57103 | 725 | int ret; |
cddb8a5c | 726 | |
d8ed45c5 | 727 | mmap_write_lock(mm); |
1991722a | 728 | ret = __mmu_notifier_register(subscription, mm); |
d8ed45c5 | 729 | mmap_write_unlock(mm); |
56c57103 | 730 | return ret; |
cddb8a5c | 731 | } |
56c57103 | 732 | EXPORT_SYMBOL_GPL(mmu_notifier_register); |
cddb8a5c | 733 | |
2c7933f5 JG |
734 | static struct mmu_notifier * |
735 | find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops) | |
736 | { | |
1991722a | 737 | struct mmu_notifier *subscription; |
2c7933f5 | 738 | |
984cfe4e | 739 | spin_lock(&mm->notifier_subscriptions->lock); |
1991722a | 740 | hlist_for_each_entry_rcu(subscription, |
63886bad QC |
741 | &mm->notifier_subscriptions->list, hlist, |
742 | lockdep_is_held(&mm->notifier_subscriptions->lock)) { | |
1991722a | 743 | if (subscription->ops != ops) |
2c7933f5 JG |
744 | continue; |
745 | ||
1991722a JG |
746 | if (likely(subscription->users != UINT_MAX)) |
747 | subscription->users++; | |
2c7933f5 | 748 | else |
1991722a | 749 | subscription = ERR_PTR(-EOVERFLOW); |
984cfe4e | 750 | spin_unlock(&mm->notifier_subscriptions->lock); |
1991722a | 751 | return subscription; |
2c7933f5 | 752 | } |
984cfe4e | 753 | spin_unlock(&mm->notifier_subscriptions->lock); |
2c7933f5 JG |
754 | return NULL; |
755 | } | |
756 | ||
757 | /** | |
758 | * mmu_notifier_get_locked - Return the single struct mmu_notifier for | |
759 | * the mm & ops | |
760 | * @ops: The operations struct being subscribe with | |
761 | * @mm : The mm to attach notifiers too | |
762 | * | |
763 | * This function either allocates a new mmu_notifier via | |
764 | * ops->alloc_notifier(), or returns an already existing notifier on the | |
765 | * list. The value of the ops pointer is used to determine when two notifiers | |
766 | * are the same. | |
767 | * | |
768 | * Each call to mmu_notifier_get() must be paired with a call to | |
c1e8d7c6 | 769 | * mmu_notifier_put(). The caller must hold the write side of mm->mmap_lock. |
2c7933f5 JG |
770 | * |
771 | * While the caller has a mmu_notifier get the mm pointer will remain valid, | |
772 | * and can be converted to an active mm pointer via mmget_not_zero(). | |
773 | */ | |
774 | struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops, | |
775 | struct mm_struct *mm) | |
776 | { | |
1991722a | 777 | struct mmu_notifier *subscription; |
2c7933f5 JG |
778 | int ret; |
779 | ||
42fc5414 | 780 | mmap_assert_write_locked(mm); |
2c7933f5 | 781 | |
984cfe4e | 782 | if (mm->notifier_subscriptions) { |
1991722a JG |
783 | subscription = find_get_mmu_notifier(mm, ops); |
784 | if (subscription) | |
785 | return subscription; | |
2c7933f5 JG |
786 | } |
787 | ||
1991722a JG |
788 | subscription = ops->alloc_notifier(mm); |
789 | if (IS_ERR(subscription)) | |
790 | return subscription; | |
791 | subscription->ops = ops; | |
792 | ret = __mmu_notifier_register(subscription, mm); | |
2c7933f5 JG |
793 | if (ret) |
794 | goto out_free; | |
1991722a | 795 | return subscription; |
2c7933f5 | 796 | out_free: |
1991722a | 797 | subscription->ops->free_notifier(subscription); |
2c7933f5 JG |
798 | return ERR_PTR(ret); |
799 | } | |
800 | EXPORT_SYMBOL_GPL(mmu_notifier_get_locked); | |
801 | ||
cddb8a5c | 802 | /* this is called after the last mmu_notifier_unregister() returned */ |
984cfe4e | 803 | void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm) |
cddb8a5c | 804 | { |
984cfe4e JG |
805 | BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list)); |
806 | kfree(mm->notifier_subscriptions); | |
807 | mm->notifier_subscriptions = LIST_POISON1; /* debug */ | |
cddb8a5c AA |
808 | } |
809 | ||
810 | /* | |
811 | * This releases the mm_count pin automatically and frees the mm | |
812 | * structure if it was the last user of it. It serializes against | |
21a92735 SG |
813 | * running mmu notifiers with SRCU and against mmu_notifier_unregister |
814 | * with the unregister lock + SRCU. All sptes must be dropped before | |
cddb8a5c AA |
815 | * calling mmu_notifier_unregister. ->release or any other notifier |
816 | * method may be invoked concurrently with mmu_notifier_unregister, | |
817 | * and only after mmu_notifier_unregister returned we're guaranteed | |
818 | * that ->release or any other method can't run anymore. | |
819 | */ | |
1991722a JG |
820 | void mmu_notifier_unregister(struct mmu_notifier *subscription, |
821 | struct mm_struct *mm) | |
cddb8a5c AA |
822 | { |
823 | BUG_ON(atomic_read(&mm->mm_count) <= 0); | |
824 | ||
1991722a | 825 | if (!hlist_unhashed(&subscription->hlist)) { |
d34883d4 XG |
826 | /* |
827 | * SRCU here will force exit_mmap to wait for ->release to | |
828 | * finish before freeing the pages. | |
829 | */ | |
21a92735 | 830 | int id; |
3ad3d901 | 831 | |
d34883d4 | 832 | id = srcu_read_lock(&srcu); |
cddb8a5c | 833 | /* |
d34883d4 XG |
834 | * exit_mmap will block in mmu_notifier_release to guarantee |
835 | * that ->release is called before freeing the pages. | |
cddb8a5c | 836 | */ |
1991722a JG |
837 | if (subscription->ops->release) |
838 | subscription->ops->release(subscription, mm); | |
d34883d4 | 839 | srcu_read_unlock(&srcu, id); |
3ad3d901 | 840 | |
984cfe4e | 841 | spin_lock(&mm->notifier_subscriptions->lock); |
751efd86 | 842 | /* |
d34883d4 XG |
843 | * Can not use list_del_rcu() since __mmu_notifier_release |
844 | * can delete it before we hold the lock. | |
751efd86 | 845 | */ |
1991722a | 846 | hlist_del_init_rcu(&subscription->hlist); |
984cfe4e | 847 | spin_unlock(&mm->notifier_subscriptions->lock); |
d34883d4 | 848 | } |
cddb8a5c AA |
849 | |
850 | /* | |
d34883d4 | 851 | * Wait for any running method to finish, of course including |
83a35e36 | 852 | * ->release if it was run by mmu_notifier_release instead of us. |
cddb8a5c | 853 | */ |
21a92735 | 854 | synchronize_srcu(&srcu); |
cddb8a5c AA |
855 | |
856 | BUG_ON(atomic_read(&mm->mm_count) <= 0); | |
857 | ||
858 | mmdrop(mm); | |
859 | } | |
860 | EXPORT_SYMBOL_GPL(mmu_notifier_unregister); | |
21a92735 | 861 | |
2c7933f5 JG |
862 | static void mmu_notifier_free_rcu(struct rcu_head *rcu) |
863 | { | |
1991722a JG |
864 | struct mmu_notifier *subscription = |
865 | container_of(rcu, struct mmu_notifier, rcu); | |
866 | struct mm_struct *mm = subscription->mm; | |
2c7933f5 | 867 | |
1991722a | 868 | subscription->ops->free_notifier(subscription); |
2c7933f5 JG |
869 | /* Pairs with the get in __mmu_notifier_register() */ |
870 | mmdrop(mm); | |
871 | } | |
872 | ||
873 | /** | |
874 | * mmu_notifier_put - Release the reference on the notifier | |
d49653f3 | 875 | * @subscription: The notifier to act on |
2c7933f5 JG |
876 | * |
877 | * This function must be paired with each mmu_notifier_get(), it releases the | |
878 | * reference obtained by the get. If this is the last reference then process | |
879 | * to free the notifier will be run asynchronously. | |
880 | * | |
881 | * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release | |
882 | * when the mm_struct is destroyed. Instead free_notifier is always called to | |
883 | * release any resources held by the user. | |
884 | * | |
885 | * As ops->release is not guaranteed to be called, the user must ensure that | |
886 | * all sptes are dropped, and no new sptes can be established before | |
887 | * mmu_notifier_put() is called. | |
888 | * | |
889 | * This function can be called from the ops->release callback, however the | |
890 | * caller must still ensure it is called pairwise with mmu_notifier_get(). | |
891 | * | |
892 | * Modules calling this function must call mmu_notifier_synchronize() in | |
893 | * their __exit functions to ensure the async work is completed. | |
894 | */ | |
1991722a | 895 | void mmu_notifier_put(struct mmu_notifier *subscription) |
2c7933f5 | 896 | { |
1991722a | 897 | struct mm_struct *mm = subscription->mm; |
2c7933f5 | 898 | |
984cfe4e | 899 | spin_lock(&mm->notifier_subscriptions->lock); |
1991722a | 900 | if (WARN_ON(!subscription->users) || --subscription->users) |
2c7933f5 | 901 | goto out_unlock; |
1991722a | 902 | hlist_del_init_rcu(&subscription->hlist); |
984cfe4e | 903 | spin_unlock(&mm->notifier_subscriptions->lock); |
2c7933f5 | 904 | |
1991722a | 905 | call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu); |
2c7933f5 JG |
906 | return; |
907 | ||
908 | out_unlock: | |
984cfe4e | 909 | spin_unlock(&mm->notifier_subscriptions->lock); |
2c7933f5 JG |
910 | } |
911 | EXPORT_SYMBOL_GPL(mmu_notifier_put); | |
912 | ||
99cb252f | 913 | static int __mmu_interval_notifier_insert( |
5292e24a | 914 | struct mmu_interval_notifier *interval_sub, struct mm_struct *mm, |
984cfe4e | 915 | struct mmu_notifier_subscriptions *subscriptions, unsigned long start, |
99cb252f JG |
916 | unsigned long length, const struct mmu_interval_notifier_ops *ops) |
917 | { | |
5292e24a JG |
918 | interval_sub->mm = mm; |
919 | interval_sub->ops = ops; | |
920 | RB_CLEAR_NODE(&interval_sub->interval_tree.rb); | |
921 | interval_sub->interval_tree.start = start; | |
99cb252f JG |
922 | /* |
923 | * Note that the representation of the intervals in the interval tree | |
924 | * considers the ending point as contained in the interval. | |
925 | */ | |
926 | if (length == 0 || | |
5292e24a JG |
927 | check_add_overflow(start, length - 1, |
928 | &interval_sub->interval_tree.last)) | |
99cb252f JG |
929 | return -EOVERFLOW; |
930 | ||
931 | /* Must call with a mmget() held */ | |
c9682d10 | 932 | if (WARN_ON(atomic_read(&mm->mm_users) <= 0)) |
99cb252f JG |
933 | return -EINVAL; |
934 | ||
935 | /* pairs with mmdrop in mmu_interval_notifier_remove() */ | |
936 | mmgrab(mm); | |
937 | ||
938 | /* | |
939 | * If some invalidate_range_start/end region is going on in parallel | |
940 | * we don't know what VA ranges are affected, so we must assume this | |
941 | * new range is included. | |
942 | * | |
943 | * If the itree is invalidating then we are not allowed to change | |
944 | * it. Retrying until invalidation is done is tricky due to the | |
945 | * possibility for live lock, instead defer the add to | |
946 | * mn_itree_inv_end() so this algorithm is deterministic. | |
947 | * | |
5292e24a | 948 | * In all cases the value for the interval_sub->invalidate_seq should be |
99cb252f JG |
949 | * odd, see mmu_interval_read_begin() |
950 | */ | |
984cfe4e JG |
951 | spin_lock(&subscriptions->lock); |
952 | if (subscriptions->active_invalidate_ranges) { | |
953 | if (mn_itree_is_invalidating(subscriptions)) | |
5292e24a | 954 | hlist_add_head(&interval_sub->deferred_item, |
984cfe4e | 955 | &subscriptions->deferred_list); |
99cb252f | 956 | else { |
984cfe4e | 957 | subscriptions->invalidate_seq |= 1; |
5292e24a | 958 | interval_tree_insert(&interval_sub->interval_tree, |
984cfe4e | 959 | &subscriptions->itree); |
99cb252f | 960 | } |
5292e24a | 961 | interval_sub->invalidate_seq = subscriptions->invalidate_seq; |
99cb252f | 962 | } else { |
984cfe4e | 963 | WARN_ON(mn_itree_is_invalidating(subscriptions)); |
99cb252f | 964 | /* |
5292e24a JG |
965 | * The starting seq for a subscription not under invalidation |
966 | * should be odd, not equal to the current invalidate_seq and | |
99cb252f JG |
967 | * invalidate_seq should not 'wrap' to the new seq any time |
968 | * soon. | |
969 | */ | |
5292e24a JG |
970 | interval_sub->invalidate_seq = |
971 | subscriptions->invalidate_seq - 1; | |
972 | interval_tree_insert(&interval_sub->interval_tree, | |
984cfe4e | 973 | &subscriptions->itree); |
99cb252f | 974 | } |
984cfe4e | 975 | spin_unlock(&subscriptions->lock); |
99cb252f JG |
976 | return 0; |
977 | } | |
978 | ||
979 | /** | |
980 | * mmu_interval_notifier_insert - Insert an interval notifier | |
5292e24a | 981 | * @interval_sub: Interval subscription to register |
99cb252f JG |
982 | * @start: Starting virtual address to monitor |
983 | * @length: Length of the range to monitor | |
d49653f3 KK |
984 | * @mm: mm_struct to attach to |
985 | * @ops: Interval notifier operations to be called on matching events | |
99cb252f JG |
986 | * |
987 | * This function subscribes the interval notifier for notifications from the | |
988 | * mm. Upon return the ops related to mmu_interval_notifier will be called | |
989 | * whenever an event that intersects with the given range occurs. | |
990 | * | |
991 | * Upon return the range_notifier may not be present in the interval tree yet. | |
992 | * The caller must use the normal interval notifier read flow via | |
993 | * mmu_interval_read_begin() to establish SPTEs for this range. | |
994 | */ | |
5292e24a | 995 | int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub, |
99cb252f JG |
996 | struct mm_struct *mm, unsigned long start, |
997 | unsigned long length, | |
998 | const struct mmu_interval_notifier_ops *ops) | |
999 | { | |
984cfe4e | 1000 | struct mmu_notifier_subscriptions *subscriptions; |
99cb252f JG |
1001 | int ret; |
1002 | ||
da1c55f1 | 1003 | might_lock(&mm->mmap_lock); |
99cb252f | 1004 | |
984cfe4e JG |
1005 | subscriptions = smp_load_acquire(&mm->notifier_subscriptions); |
1006 | if (!subscriptions || !subscriptions->has_itree) { | |
99cb252f JG |
1007 | ret = mmu_notifier_register(NULL, mm); |
1008 | if (ret) | |
1009 | return ret; | |
984cfe4e | 1010 | subscriptions = mm->notifier_subscriptions; |
99cb252f | 1011 | } |
5292e24a JG |
1012 | return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions, |
1013 | start, length, ops); | |
99cb252f JG |
1014 | } |
1015 | EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert); | |
1016 | ||
1017 | int mmu_interval_notifier_insert_locked( | |
5292e24a | 1018 | struct mmu_interval_notifier *interval_sub, struct mm_struct *mm, |
99cb252f JG |
1019 | unsigned long start, unsigned long length, |
1020 | const struct mmu_interval_notifier_ops *ops) | |
1021 | { | |
984cfe4e JG |
1022 | struct mmu_notifier_subscriptions *subscriptions = |
1023 | mm->notifier_subscriptions; | |
99cb252f JG |
1024 | int ret; |
1025 | ||
42fc5414 | 1026 | mmap_assert_write_locked(mm); |
99cb252f | 1027 | |
984cfe4e | 1028 | if (!subscriptions || !subscriptions->has_itree) { |
99cb252f JG |
1029 | ret = __mmu_notifier_register(NULL, mm); |
1030 | if (ret) | |
1031 | return ret; | |
984cfe4e | 1032 | subscriptions = mm->notifier_subscriptions; |
99cb252f | 1033 | } |
5292e24a JG |
1034 | return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions, |
1035 | start, length, ops); | |
99cb252f JG |
1036 | } |
1037 | EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked); | |
1038 | ||
31956166 AP |
1039 | static bool |
1040 | mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions, | |
1041 | unsigned long seq) | |
1042 | { | |
1043 | bool ret; | |
1044 | ||
1045 | spin_lock(&subscriptions->lock); | |
1046 | ret = subscriptions->invalidate_seq != seq; | |
1047 | spin_unlock(&subscriptions->lock); | |
1048 | return ret; | |
1049 | } | |
1050 | ||
99cb252f JG |
1051 | /** |
1052 | * mmu_interval_notifier_remove - Remove a interval notifier | |
5292e24a | 1053 | * @interval_sub: Interval subscription to unregister |
99cb252f JG |
1054 | * |
1055 | * This function must be paired with mmu_interval_notifier_insert(). It cannot | |
1056 | * be called from any ops callback. | |
1057 | * | |
1058 | * Once this returns ops callbacks are no longer running on other CPUs and | |
1059 | * will not be called in future. | |
1060 | */ | |
5292e24a | 1061 | void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub) |
99cb252f | 1062 | { |
5292e24a | 1063 | struct mm_struct *mm = interval_sub->mm; |
984cfe4e JG |
1064 | struct mmu_notifier_subscriptions *subscriptions = |
1065 | mm->notifier_subscriptions; | |
99cb252f JG |
1066 | unsigned long seq = 0; |
1067 | ||
1068 | might_sleep(); | |
1069 | ||
984cfe4e JG |
1070 | spin_lock(&subscriptions->lock); |
1071 | if (mn_itree_is_invalidating(subscriptions)) { | |
99cb252f JG |
1072 | /* |
1073 | * remove is being called after insert put this on the | |
1074 | * deferred list, but before the deferred list was processed. | |
1075 | */ | |
5292e24a JG |
1076 | if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) { |
1077 | hlist_del(&interval_sub->deferred_item); | |
99cb252f | 1078 | } else { |
5292e24a | 1079 | hlist_add_head(&interval_sub->deferred_item, |
984cfe4e JG |
1080 | &subscriptions->deferred_list); |
1081 | seq = subscriptions->invalidate_seq; | |
99cb252f JG |
1082 | } |
1083 | } else { | |
5292e24a JG |
1084 | WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb)); |
1085 | interval_tree_remove(&interval_sub->interval_tree, | |
984cfe4e | 1086 | &subscriptions->itree); |
99cb252f | 1087 | } |
984cfe4e | 1088 | spin_unlock(&subscriptions->lock); |
99cb252f JG |
1089 | |
1090 | /* | |
1091 | * The possible sleep on progress in the invalidation requires the | |
1092 | * caller not hold any locks held by invalidation callbacks. | |
1093 | */ | |
1094 | lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); | |
1095 | lock_map_release(&__mmu_notifier_invalidate_range_start_map); | |
1096 | if (seq) | |
984cfe4e | 1097 | wait_event(subscriptions->wq, |
31956166 | 1098 | mmu_interval_seq_released(subscriptions, seq)); |
99cb252f JG |
1099 | |
1100 | /* pairs with mmgrab in mmu_interval_notifier_insert() */ | |
1101 | mmdrop(mm); | |
1102 | } | |
1103 | EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove); | |
1104 | ||
2c7933f5 JG |
1105 | /** |
1106 | * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed | |
1107 | * | |
1108 | * This function ensures that all outstanding async SRU work from | |
1109 | * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops | |
1110 | * associated with an unused mmu_notifier will no longer be called. | |
1111 | * | |
1112 | * Before using the caller must ensure that all of its mmu_notifiers have been | |
1113 | * fully released via mmu_notifier_put(). | |
1114 | * | |
1115 | * Modules using the mmu_notifier_put() API should call this in their __exit | |
1116 | * function to avoid module unloading races. | |
1117 | */ | |
1118 | void mmu_notifier_synchronize(void) | |
1119 | { | |
1120 | synchronize_srcu(&srcu); | |
1121 | } | |
1122 | EXPORT_SYMBOL_GPL(mmu_notifier_synchronize); | |
1123 | ||
c6d23413 JG |
1124 | bool |
1125 | mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range) | |
1126 | { | |
1127 | if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA) | |
1128 | return false; | |
1129 | /* Return true if the vma still have the read flag set. */ | |
1130 | return range->vma->vm_flags & VM_READ; | |
1131 | } | |
1132 | EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only); |