]>
Commit | Line | Data |
---|---|---|
cddb8a5c AA |
1 | /* |
2 | * linux/mm/mmu_notifier.c | |
3 | * | |
4 | * Copyright (C) 2008 Qumranet, Inc. | |
5 | * Copyright (C) 2008 SGI | |
6 | * Christoph Lameter <[email protected]> | |
7 | * | |
8 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
9 | * the COPYING file in the top-level directory. | |
10 | */ | |
11 | ||
12 | #include <linux/rculist.h> | |
13 | #include <linux/mmu_notifier.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/mm.h> | |
16 | #include <linux/err.h> | |
17 | #include <linux/rcupdate.h> | |
18 | #include <linux/sched.h> | |
19 | ||
20 | /* | |
21 | * This function can't run concurrently against mmu_notifier_register | |
22 | * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap | |
23 | * runs with mm_users == 0. Other tasks may still invoke mmu notifiers | |
24 | * in parallel despite there being no task using this mm any more, | |
25 | * through the vmas outside of the exit_mmap context, such as with | |
26 | * vmtruncate. This serializes against mmu_notifier_unregister with | |
27 | * the mmu_notifier_mm->lock in addition to RCU and it serializes | |
28 | * against the other mmu notifiers with RCU. struct mmu_notifier_mm | |
29 | * can't go away from under us as exit_mmap holds an mm_count pin | |
30 | * itself. | |
31 | */ | |
32 | void __mmu_notifier_release(struct mm_struct *mm) | |
33 | { | |
34 | struct mmu_notifier *mn; | |
35 | ||
36 | spin_lock(&mm->mmu_notifier_mm->lock); | |
37 | while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { | |
38 | mn = hlist_entry(mm->mmu_notifier_mm->list.first, | |
39 | struct mmu_notifier, | |
40 | hlist); | |
41 | /* | |
42 | * We arrived before mmu_notifier_unregister so | |
43 | * mmu_notifier_unregister will do nothing other than | |
44 | * to wait ->release to finish and | |
45 | * mmu_notifier_unregister to return. | |
46 | */ | |
47 | hlist_del_init_rcu(&mn->hlist); | |
48 | /* | |
49 | * RCU here will block mmu_notifier_unregister until | |
50 | * ->release returns. | |
51 | */ | |
52 | rcu_read_lock(); | |
53 | spin_unlock(&mm->mmu_notifier_mm->lock); | |
54 | /* | |
55 | * if ->release runs before mmu_notifier_unregister it | |
56 | * must be handled as it's the only way for the driver | |
57 | * to flush all existing sptes and stop the driver | |
58 | * from establishing any more sptes before all the | |
59 | * pages in the mm are freed. | |
60 | */ | |
61 | if (mn->ops->release) | |
62 | mn->ops->release(mn, mm); | |
63 | rcu_read_unlock(); | |
64 | spin_lock(&mm->mmu_notifier_mm->lock); | |
65 | } | |
66 | spin_unlock(&mm->mmu_notifier_mm->lock); | |
67 | ||
68 | /* | |
69 | * synchronize_rcu here prevents mmu_notifier_release to | |
70 | * return to exit_mmap (which would proceed freeing all pages | |
71 | * in the mm) until the ->release method returns, if it was | |
72 | * invoked by mmu_notifier_unregister. | |
73 | * | |
74 | * The mmu_notifier_mm can't go away from under us because one | |
75 | * mm_count is hold by exit_mmap. | |
76 | */ | |
77 | synchronize_rcu(); | |
78 | } | |
79 | ||
80 | /* | |
81 | * If no young bitflag is supported by the hardware, ->clear_flush_young can | |
82 | * unmap the address and return 1 or 0 depending if the mapping previously | |
83 | * existed or not. | |
84 | */ | |
85 | int __mmu_notifier_clear_flush_young(struct mm_struct *mm, | |
86 | unsigned long address) | |
87 | { | |
88 | struct mmu_notifier *mn; | |
89 | struct hlist_node *n; | |
90 | int young = 0; | |
91 | ||
92 | rcu_read_lock(); | |
93 | hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { | |
94 | if (mn->ops->clear_flush_young) | |
95 | young |= mn->ops->clear_flush_young(mn, mm, address); | |
96 | } | |
97 | rcu_read_unlock(); | |
98 | ||
99 | return young; | |
100 | } | |
101 | ||
102 | void __mmu_notifier_invalidate_page(struct mm_struct *mm, | |
103 | unsigned long address) | |
104 | { | |
105 | struct mmu_notifier *mn; | |
106 | struct hlist_node *n; | |
107 | ||
108 | rcu_read_lock(); | |
109 | hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { | |
110 | if (mn->ops->invalidate_page) | |
111 | mn->ops->invalidate_page(mn, mm, address); | |
112 | } | |
113 | rcu_read_unlock(); | |
114 | } | |
115 | ||
116 | void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, | |
117 | unsigned long start, unsigned long end) | |
118 | { | |
119 | struct mmu_notifier *mn; | |
120 | struct hlist_node *n; | |
121 | ||
122 | rcu_read_lock(); | |
123 | hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { | |
124 | if (mn->ops->invalidate_range_start) | |
125 | mn->ops->invalidate_range_start(mn, mm, start, end); | |
126 | } | |
127 | rcu_read_unlock(); | |
128 | } | |
129 | ||
130 | void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, | |
131 | unsigned long start, unsigned long end) | |
132 | { | |
133 | struct mmu_notifier *mn; | |
134 | struct hlist_node *n; | |
135 | ||
136 | rcu_read_lock(); | |
137 | hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { | |
138 | if (mn->ops->invalidate_range_end) | |
139 | mn->ops->invalidate_range_end(mn, mm, start, end); | |
140 | } | |
141 | rcu_read_unlock(); | |
142 | } | |
143 | ||
144 | static int do_mmu_notifier_register(struct mmu_notifier *mn, | |
145 | struct mm_struct *mm, | |
146 | int take_mmap_sem) | |
147 | { | |
148 | struct mmu_notifier_mm *mmu_notifier_mm; | |
149 | int ret; | |
150 | ||
151 | BUG_ON(atomic_read(&mm->mm_users) <= 0); | |
152 | ||
153 | ret = -ENOMEM; | |
154 | mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); | |
155 | if (unlikely(!mmu_notifier_mm)) | |
156 | goto out; | |
157 | ||
158 | if (take_mmap_sem) | |
159 | down_write(&mm->mmap_sem); | |
160 | ret = mm_take_all_locks(mm); | |
161 | if (unlikely(ret)) | |
162 | goto out_cleanup; | |
163 | ||
164 | if (!mm_has_notifiers(mm)) { | |
165 | INIT_HLIST_HEAD(&mmu_notifier_mm->list); | |
166 | spin_lock_init(&mmu_notifier_mm->lock); | |
167 | mm->mmu_notifier_mm = mmu_notifier_mm; | |
168 | mmu_notifier_mm = NULL; | |
169 | } | |
170 | atomic_inc(&mm->mm_count); | |
171 | ||
172 | /* | |
173 | * Serialize the update against mmu_notifier_unregister. A | |
174 | * side note: mmu_notifier_release can't run concurrently with | |
175 | * us because we hold the mm_users pin (either implicitly as | |
176 | * current->mm or explicitly with get_task_mm() or similar). | |
177 | * We can't race against any other mmu notifier method either | |
178 | * thanks to mm_take_all_locks(). | |
179 | */ | |
180 | spin_lock(&mm->mmu_notifier_mm->lock); | |
181 | hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); | |
182 | spin_unlock(&mm->mmu_notifier_mm->lock); | |
183 | ||
184 | mm_drop_all_locks(mm); | |
185 | out_cleanup: | |
186 | if (take_mmap_sem) | |
187 | up_write(&mm->mmap_sem); | |
188 | /* kfree() does nothing if mmu_notifier_mm is NULL */ | |
189 | kfree(mmu_notifier_mm); | |
190 | out: | |
191 | BUG_ON(atomic_read(&mm->mm_users) <= 0); | |
192 | return ret; | |
193 | } | |
194 | ||
195 | /* | |
196 | * Must not hold mmap_sem nor any other VM related lock when calling | |
197 | * this registration function. Must also ensure mm_users can't go down | |
198 | * to zero while this runs to avoid races with mmu_notifier_release, | |
199 | * so mm has to be current->mm or the mm should be pinned safely such | |
200 | * as with get_task_mm(). If the mm is not current->mm, the mm_users | |
201 | * pin should be released by calling mmput after mmu_notifier_register | |
202 | * returns. mmu_notifier_unregister must be always called to | |
203 | * unregister the notifier. mm_count is automatically pinned to allow | |
204 | * mmu_notifier_unregister to safely run at any time later, before or | |
205 | * after exit_mmap. ->release will always be called before exit_mmap | |
206 | * frees the pages. | |
207 | */ | |
208 | int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) | |
209 | { | |
210 | return do_mmu_notifier_register(mn, mm, 1); | |
211 | } | |
212 | EXPORT_SYMBOL_GPL(mmu_notifier_register); | |
213 | ||
214 | /* | |
215 | * Same as mmu_notifier_register but here the caller must hold the | |
216 | * mmap_sem in write mode. | |
217 | */ | |
218 | int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) | |
219 | { | |
220 | return do_mmu_notifier_register(mn, mm, 0); | |
221 | } | |
222 | EXPORT_SYMBOL_GPL(__mmu_notifier_register); | |
223 | ||
224 | /* this is called after the last mmu_notifier_unregister() returned */ | |
225 | void __mmu_notifier_mm_destroy(struct mm_struct *mm) | |
226 | { | |
227 | BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list)); | |
228 | kfree(mm->mmu_notifier_mm); | |
229 | mm->mmu_notifier_mm = LIST_POISON1; /* debug */ | |
230 | } | |
231 | ||
232 | /* | |
233 | * This releases the mm_count pin automatically and frees the mm | |
234 | * structure if it was the last user of it. It serializes against | |
235 | * running mmu notifiers with RCU and against mmu_notifier_unregister | |
236 | * with the unregister lock + RCU. All sptes must be dropped before | |
237 | * calling mmu_notifier_unregister. ->release or any other notifier | |
238 | * method may be invoked concurrently with mmu_notifier_unregister, | |
239 | * and only after mmu_notifier_unregister returned we're guaranteed | |
240 | * that ->release or any other method can't run anymore. | |
241 | */ | |
242 | void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) | |
243 | { | |
244 | BUG_ON(atomic_read(&mm->mm_count) <= 0); | |
245 | ||
246 | spin_lock(&mm->mmu_notifier_mm->lock); | |
247 | if (!hlist_unhashed(&mn->hlist)) { | |
248 | hlist_del_rcu(&mn->hlist); | |
249 | ||
250 | /* | |
251 | * RCU here will force exit_mmap to wait ->release to finish | |
252 | * before freeing the pages. | |
253 | */ | |
254 | rcu_read_lock(); | |
255 | spin_unlock(&mm->mmu_notifier_mm->lock); | |
256 | /* | |
257 | * exit_mmap will block in mmu_notifier_release to | |
258 | * guarantee ->release is called before freeing the | |
259 | * pages. | |
260 | */ | |
261 | if (mn->ops->release) | |
262 | mn->ops->release(mn, mm); | |
263 | rcu_read_unlock(); | |
264 | } else | |
265 | spin_unlock(&mm->mmu_notifier_mm->lock); | |
266 | ||
267 | /* | |
268 | * Wait any running method to finish, of course including | |
269 | * ->release if it was run by mmu_notifier_relase instead of us. | |
270 | */ | |
271 | synchronize_rcu(); | |
272 | ||
273 | BUG_ON(atomic_read(&mm->mm_count) <= 0); | |
274 | ||
275 | mmdrop(mm); | |
276 | } | |
277 | EXPORT_SYMBOL_GPL(mmu_notifier_unregister); |