]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * mm/rmap.c - physical to virtual reverse mappings | |
3 | * | |
4 | * Copyright 2001, Rik van Riel <[email protected]> | |
5 | * Released under the General Public License (GPL). | |
6 | * | |
7 | * Simple, low overhead reverse mapping scheme. | |
8 | * Please try to keep this thing as modular as possible. | |
9 | * | |
10 | * Provides methods for unmapping each kind of mapped page: | |
11 | * the anon methods track anonymous pages, and | |
12 | * the file methods track pages belonging to an inode. | |
13 | * | |
14 | * Original design by Rik van Riel <[email protected]> 2001 | |
15 | * File methods by Dave McCracken <[email protected]> 2003, 2004 | |
16 | * Anonymous methods by Andrea Arcangeli <[email protected]> 2004 | |
98f32602 | 17 | * Contributions by Hugh Dickins 2003, 2004 |
1da177e4 LT |
18 | */ |
19 | ||
20 | /* | |
21 | * Lock ordering in mm: | |
22 | * | |
9608703e | 23 | * inode->i_rwsem (while writing or truncating, not reading or faulting) |
c1e8d7c6 | 24 | * mm->mmap_lock |
730633f0 | 25 | * mapping->invalidate_lock (in filemap_fault) |
4dc7d373 | 26 | * folio_lock |
8d9bfb26 | 27 | * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below) |
55fd6fcc SB |
28 | * vma_start_write |
29 | * mapping->i_mmap_rwsem | |
30 | * anon_vma->rwsem | |
31 | * mm->page_table_lock or pte_lock | |
32 | * swap_lock (in swap_duplicate, swap_info_get) | |
33 | * mmlist_lock (in mmput, drain_mmlist and others) | |
34 | * mapping->private_lock (in block_dirty_folio) | |
35 | * folio_lock_memcg move_lock (in block_dirty_folio) | |
36 | * i_pages lock (widely used) | |
37 | * lruvec->lru_lock (in folio_lruvec_lock_irq) | |
38 | * inode->i_lock (in set_page_dirty's __mark_inode_dirty) | |
39 | * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) | |
40 | * sb_lock (within inode_lock in fs/fs-writeback.c) | |
41 | * i_pages lock (widely used, in set_page_dirty, | |
42 | * in arch-dependent flush_dcache_mmap_lock, | |
43 | * within bdi.wb->list_lock in __sync_single_inode) | |
6a46079c | 44 | * |
9608703e | 45 | * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) |
9b679320 | 46 | * ->tasklist_lock |
6a46079c | 47 | * pte map lock |
c0d0381a | 48 | * |
8d9bfb26 MK |
49 | * hugetlbfs PageHuge() take locks in this order: |
50 | * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) | |
51 | * vma_lock (hugetlb specific lock for pmd_sharing) | |
52 | * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing) | |
4dc7d373 | 53 | * folio_lock |
1da177e4 LT |
54 | */ |
55 | ||
56 | #include <linux/mm.h> | |
6e84f315 | 57 | #include <linux/sched/mm.h> |
29930025 | 58 | #include <linux/sched/task.h> |
1da177e4 LT |
59 | #include <linux/pagemap.h> |
60 | #include <linux/swap.h> | |
61 | #include <linux/swapops.h> | |
62 | #include <linux/slab.h> | |
63 | #include <linux/init.h> | |
5ad64688 | 64 | #include <linux/ksm.h> |
1da177e4 LT |
65 | #include <linux/rmap.h> |
66 | #include <linux/rcupdate.h> | |
b95f1b31 | 67 | #include <linux/export.h> |
8a9f3ccd | 68 | #include <linux/memcontrol.h> |
cddb8a5c | 69 | #include <linux/mmu_notifier.h> |
64cdd548 | 70 | #include <linux/migrate.h> |
0fe6e20b | 71 | #include <linux/hugetlb.h> |
444f84fd | 72 | #include <linux/huge_mm.h> |
ef5d437f | 73 | #include <linux/backing-dev.h> |
33c3fc71 | 74 | #include <linux/page_idle.h> |
a5430dda | 75 | #include <linux/memremap.h> |
bce73e48 | 76 | #include <linux/userfaultfd_k.h> |
999dad82 | 77 | #include <linux/mm_inline.h> |
1da177e4 LT |
78 | |
79 | #include <asm/tlbflush.h> | |
80 | ||
4cc79b33 | 81 | #define CREATE_TRACE_POINTS |
72b252ae | 82 | #include <trace/events/tlb.h> |
4cc79b33 | 83 | #include <trace/events/migrate.h> |
72b252ae | 84 | |
b291f000 NP |
85 | #include "internal.h" |
86 | ||
fdd2e5f8 | 87 | static struct kmem_cache *anon_vma_cachep; |
5beb4930 | 88 | static struct kmem_cache *anon_vma_chain_cachep; |
fdd2e5f8 AB |
89 | |
90 | static inline struct anon_vma *anon_vma_alloc(void) | |
91 | { | |
01d8b20d PZ |
92 | struct anon_vma *anon_vma; |
93 | ||
94 | anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); | |
95 | if (anon_vma) { | |
96 | atomic_set(&anon_vma->refcount, 1); | |
2555283e JH |
97 | anon_vma->num_children = 0; |
98 | anon_vma->num_active_vmas = 0; | |
7a3ef208 | 99 | anon_vma->parent = anon_vma; |
01d8b20d PZ |
100 | /* |
101 | * Initialise the anon_vma root to point to itself. If called | |
102 | * from fork, the root will be reset to the parents anon_vma. | |
103 | */ | |
104 | anon_vma->root = anon_vma; | |
105 | } | |
106 | ||
107 | return anon_vma; | |
fdd2e5f8 AB |
108 | } |
109 | ||
01d8b20d | 110 | static inline void anon_vma_free(struct anon_vma *anon_vma) |
fdd2e5f8 | 111 | { |
01d8b20d | 112 | VM_BUG_ON(atomic_read(&anon_vma->refcount)); |
88c22088 PZ |
113 | |
114 | /* | |
2f031c6f | 115 | * Synchronize against folio_lock_anon_vma_read() such that |
88c22088 PZ |
116 | * we can safely hold the lock without the anon_vma getting |
117 | * freed. | |
118 | * | |
119 | * Relies on the full mb implied by the atomic_dec_and_test() from | |
120 | * put_anon_vma() against the acquire barrier implied by | |
2f031c6f | 121 | * down_read_trylock() from folio_lock_anon_vma_read(). This orders: |
88c22088 | 122 | * |
2f031c6f | 123 | * folio_lock_anon_vma_read() VS put_anon_vma() |
4fc3f1d6 | 124 | * down_read_trylock() atomic_dec_and_test() |
88c22088 | 125 | * LOCK MB |
4fc3f1d6 | 126 | * atomic_read() rwsem_is_locked() |
88c22088 PZ |
127 | * |
128 | * LOCK should suffice since the actual taking of the lock must | |
129 | * happen _before_ what follows. | |
130 | */ | |
7f39dda9 | 131 | might_sleep(); |
5a505085 | 132 | if (rwsem_is_locked(&anon_vma->root->rwsem)) { |
4fc3f1d6 | 133 | anon_vma_lock_write(anon_vma); |
08b52706 | 134 | anon_vma_unlock_write(anon_vma); |
88c22088 PZ |
135 | } |
136 | ||
fdd2e5f8 AB |
137 | kmem_cache_free(anon_vma_cachep, anon_vma); |
138 | } | |
1da177e4 | 139 | |
dd34739c | 140 | static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) |
5beb4930 | 141 | { |
dd34739c | 142 | return kmem_cache_alloc(anon_vma_chain_cachep, gfp); |
5beb4930 RR |
143 | } |
144 | ||
e574b5fd | 145 | static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) |
5beb4930 RR |
146 | { |
147 | kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); | |
148 | } | |
149 | ||
6583a843 KC |
150 | static void anon_vma_chain_link(struct vm_area_struct *vma, |
151 | struct anon_vma_chain *avc, | |
152 | struct anon_vma *anon_vma) | |
153 | { | |
154 | avc->vma = vma; | |
155 | avc->anon_vma = anon_vma; | |
156 | list_add(&avc->same_vma, &vma->anon_vma_chain); | |
bf181b9f | 157 | anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); |
6583a843 KC |
158 | } |
159 | ||
d9d332e0 | 160 | /** |
d5a187da | 161 | * __anon_vma_prepare - attach an anon_vma to a memory region |
d9d332e0 LT |
162 | * @vma: the memory region in question |
163 | * | |
164 | * This makes sure the memory mapping described by 'vma' has | |
165 | * an 'anon_vma' attached to it, so that we can associate the | |
166 | * anonymous pages mapped into it with that anon_vma. | |
167 | * | |
d5a187da VB |
168 | * The common case will be that we already have one, which |
169 | * is handled inline by anon_vma_prepare(). But if | |
23a0790a | 170 | * not we either need to find an adjacent mapping that we |
d9d332e0 LT |
171 | * can re-use the anon_vma from (very common when the only |
172 | * reason for splitting a vma has been mprotect()), or we | |
173 | * allocate a new one. | |
174 | * | |
175 | * Anon-vma allocations are very subtle, because we may have | |
2f031c6f | 176 | * optimistically looked up an anon_vma in folio_lock_anon_vma_read() |
aaf1f990 | 177 | * and that may actually touch the rwsem even in the newly |
d9d332e0 LT |
178 | * allocated vma (it depends on RCU to make sure that the |
179 | * anon_vma isn't actually destroyed). | |
180 | * | |
181 | * As a result, we need to do proper anon_vma locking even | |
182 | * for the new allocation. At the same time, we do not want | |
183 | * to do any locking for the common case of already having | |
184 | * an anon_vma. | |
d9d332e0 | 185 | */ |
d5a187da | 186 | int __anon_vma_prepare(struct vm_area_struct *vma) |
1da177e4 | 187 | { |
d5a187da VB |
188 | struct mm_struct *mm = vma->vm_mm; |
189 | struct anon_vma *anon_vma, *allocated; | |
5beb4930 | 190 | struct anon_vma_chain *avc; |
1da177e4 | 191 | |
3be51060 | 192 | mmap_assert_locked(mm); |
1da177e4 | 193 | might_sleep(); |
1da177e4 | 194 | |
d5a187da VB |
195 | avc = anon_vma_chain_alloc(GFP_KERNEL); |
196 | if (!avc) | |
197 | goto out_enomem; | |
198 | ||
199 | anon_vma = find_mergeable_anon_vma(vma); | |
200 | allocated = NULL; | |
201 | if (!anon_vma) { | |
202 | anon_vma = anon_vma_alloc(); | |
203 | if (unlikely(!anon_vma)) | |
204 | goto out_enomem_free_avc; | |
2555283e | 205 | anon_vma->num_children++; /* self-parent link for new root */ |
d5a187da VB |
206 | allocated = anon_vma; |
207 | } | |
5beb4930 | 208 | |
d5a187da VB |
209 | anon_vma_lock_write(anon_vma); |
210 | /* page_table_lock to protect against threads */ | |
211 | spin_lock(&mm->page_table_lock); | |
212 | if (likely(!vma->anon_vma)) { | |
213 | vma->anon_vma = anon_vma; | |
214 | anon_vma_chain_link(vma, avc, anon_vma); | |
2555283e | 215 | anon_vma->num_active_vmas++; |
d9d332e0 | 216 | allocated = NULL; |
d5a187da VB |
217 | avc = NULL; |
218 | } | |
219 | spin_unlock(&mm->page_table_lock); | |
220 | anon_vma_unlock_write(anon_vma); | |
1da177e4 | 221 | |
d5a187da VB |
222 | if (unlikely(allocated)) |
223 | put_anon_vma(allocated); | |
224 | if (unlikely(avc)) | |
225 | anon_vma_chain_free(avc); | |
31f2b0eb | 226 | |
1da177e4 | 227 | return 0; |
5beb4930 RR |
228 | |
229 | out_enomem_free_avc: | |
230 | anon_vma_chain_free(avc); | |
231 | out_enomem: | |
232 | return -ENOMEM; | |
1da177e4 LT |
233 | } |
234 | ||
bb4aa396 LT |
235 | /* |
236 | * This is a useful helper function for locking the anon_vma root as | |
237 | * we traverse the vma->anon_vma_chain, looping over anon_vma's that | |
238 | * have the same vma. | |
239 | * | |
240 | * Such anon_vma's should have the same root, so you'd expect to see | |
241 | * just a single mutex_lock for the whole traversal. | |
242 | */ | |
243 | static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) | |
244 | { | |
245 | struct anon_vma *new_root = anon_vma->root; | |
246 | if (new_root != root) { | |
247 | if (WARN_ON_ONCE(root)) | |
5a505085 | 248 | up_write(&root->rwsem); |
bb4aa396 | 249 | root = new_root; |
5a505085 | 250 | down_write(&root->rwsem); |
bb4aa396 LT |
251 | } |
252 | return root; | |
253 | } | |
254 | ||
255 | static inline void unlock_anon_vma_root(struct anon_vma *root) | |
256 | { | |
257 | if (root) | |
5a505085 | 258 | up_write(&root->rwsem); |
bb4aa396 LT |
259 | } |
260 | ||
5beb4930 RR |
261 | /* |
262 | * Attach the anon_vmas from src to dst. | |
263 | * Returns 0 on success, -ENOMEM on failure. | |
7a3ef208 | 264 | * |
0503ea8f LH |
265 | * anon_vma_clone() is called by vma_expand(), vma_merge(), __split_vma(), |
266 | * copy_vma() and anon_vma_fork(). The first four want an exact copy of src, | |
267 | * while the last one, anon_vma_fork(), may try to reuse an existing anon_vma to | |
268 | * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before | |
269 | * call, we can identify this case by checking (!dst->anon_vma && | |
270 | * src->anon_vma). | |
47b390d2 WY |
271 | * |
272 | * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find | |
273 | * and reuse existing anon_vma which has no vmas and only one child anon_vma. | |
274 | * This prevents degradation of anon_vma hierarchy to endless linear chain in | |
275 | * case of constantly forking task. On the other hand, an anon_vma with more | |
276 | * than one child isn't reused even if there was no alive vma, thus rmap | |
277 | * walker has a good chance of avoiding scanning the whole hierarchy when it | |
278 | * searches where page is mapped. | |
5beb4930 RR |
279 | */ |
280 | int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) | |
1da177e4 | 281 | { |
5beb4930 | 282 | struct anon_vma_chain *avc, *pavc; |
bb4aa396 | 283 | struct anon_vma *root = NULL; |
5beb4930 | 284 | |
646d87b4 | 285 | list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { |
bb4aa396 LT |
286 | struct anon_vma *anon_vma; |
287 | ||
dd34739c LT |
288 | avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); |
289 | if (unlikely(!avc)) { | |
290 | unlock_anon_vma_root(root); | |
291 | root = NULL; | |
292 | avc = anon_vma_chain_alloc(GFP_KERNEL); | |
293 | if (!avc) | |
294 | goto enomem_failure; | |
295 | } | |
bb4aa396 LT |
296 | anon_vma = pavc->anon_vma; |
297 | root = lock_anon_vma_root(root, anon_vma); | |
298 | anon_vma_chain_link(dst, avc, anon_vma); | |
7a3ef208 KK |
299 | |
300 | /* | |
2555283e JH |
301 | * Reuse existing anon_vma if it has no vma and only one |
302 | * anon_vma child. | |
7a3ef208 | 303 | * |
2555283e | 304 | * Root anon_vma is never reused: |
7a3ef208 KK |
305 | * it has self-parent reference and at least one child. |
306 | */ | |
47b390d2 | 307 | if (!dst->anon_vma && src->anon_vma && |
2555283e JH |
308 | anon_vma->num_children < 2 && |
309 | anon_vma->num_active_vmas == 0) | |
7a3ef208 | 310 | dst->anon_vma = anon_vma; |
5beb4930 | 311 | } |
7a3ef208 | 312 | if (dst->anon_vma) |
2555283e | 313 | dst->anon_vma->num_active_vmas++; |
bb4aa396 | 314 | unlock_anon_vma_root(root); |
5beb4930 | 315 | return 0; |
1da177e4 | 316 | |
5beb4930 | 317 | enomem_failure: |
3fe89b3e | 318 | /* |
d8e454eb MW |
319 | * dst->anon_vma is dropped here otherwise its num_active_vmas can |
320 | * be incorrectly decremented in unlink_anon_vmas(). | |
3fe89b3e LY |
321 | * We can safely do this because callers of anon_vma_clone() don't care |
322 | * about dst->anon_vma if anon_vma_clone() failed. | |
323 | */ | |
324 | dst->anon_vma = NULL; | |
5beb4930 RR |
325 | unlink_anon_vmas(dst); |
326 | return -ENOMEM; | |
1da177e4 LT |
327 | } |
328 | ||
5beb4930 RR |
329 | /* |
330 | * Attach vma to its own anon_vma, as well as to the anon_vmas that | |
331 | * the corresponding VMA in the parent process is attached to. | |
332 | * Returns 0 on success, non-zero on failure. | |
333 | */ | |
334 | int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) | |
1da177e4 | 335 | { |
5beb4930 RR |
336 | struct anon_vma_chain *avc; |
337 | struct anon_vma *anon_vma; | |
c4ea95d7 | 338 | int error; |
1da177e4 | 339 | |
5beb4930 RR |
340 | /* Don't bother if the parent process has no anon_vma here. */ |
341 | if (!pvma->anon_vma) | |
342 | return 0; | |
343 | ||
7a3ef208 KK |
344 | /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ |
345 | vma->anon_vma = NULL; | |
346 | ||
5beb4930 RR |
347 | /* |
348 | * First, attach the new VMA to the parent VMA's anon_vmas, | |
349 | * so rmap can find non-COWed pages in child processes. | |
350 | */ | |
c4ea95d7 DF |
351 | error = anon_vma_clone(vma, pvma); |
352 | if (error) | |
353 | return error; | |
5beb4930 | 354 | |
7a3ef208 KK |
355 | /* An existing anon_vma has been reused, all done then. */ |
356 | if (vma->anon_vma) | |
357 | return 0; | |
358 | ||
5beb4930 RR |
359 | /* Then add our own anon_vma. */ |
360 | anon_vma = anon_vma_alloc(); | |
361 | if (!anon_vma) | |
362 | goto out_error; | |
2555283e | 363 | anon_vma->num_active_vmas++; |
dd34739c | 364 | avc = anon_vma_chain_alloc(GFP_KERNEL); |
5beb4930 RR |
365 | if (!avc) |
366 | goto out_error_free_anon_vma; | |
5c341ee1 RR |
367 | |
368 | /* | |
aaf1f990 | 369 | * The root anon_vma's rwsem is the lock actually used when we |
5c341ee1 RR |
370 | * lock any of the anon_vmas in this anon_vma tree. |
371 | */ | |
372 | anon_vma->root = pvma->anon_vma->root; | |
7a3ef208 | 373 | anon_vma->parent = pvma->anon_vma; |
76545066 | 374 | /* |
01d8b20d PZ |
375 | * With refcounts, an anon_vma can stay around longer than the |
376 | * process it belongs to. The root anon_vma needs to be pinned until | |
377 | * this anon_vma is freed, because the lock lives in the root. | |
76545066 RR |
378 | */ |
379 | get_anon_vma(anon_vma->root); | |
5beb4930 RR |
380 | /* Mark this anon_vma as the one where our new (COWed) pages go. */ |
381 | vma->anon_vma = anon_vma; | |
4fc3f1d6 | 382 | anon_vma_lock_write(anon_vma); |
5c341ee1 | 383 | anon_vma_chain_link(vma, avc, anon_vma); |
2555283e | 384 | anon_vma->parent->num_children++; |
08b52706 | 385 | anon_vma_unlock_write(anon_vma); |
5beb4930 RR |
386 | |
387 | return 0; | |
388 | ||
389 | out_error_free_anon_vma: | |
01d8b20d | 390 | put_anon_vma(anon_vma); |
5beb4930 | 391 | out_error: |
4946d54c | 392 | unlink_anon_vmas(vma); |
5beb4930 | 393 | return -ENOMEM; |
1da177e4 LT |
394 | } |
395 | ||
5beb4930 RR |
396 | void unlink_anon_vmas(struct vm_area_struct *vma) |
397 | { | |
398 | struct anon_vma_chain *avc, *next; | |
eee2acba | 399 | struct anon_vma *root = NULL; |
5beb4930 | 400 | |
5c341ee1 RR |
401 | /* |
402 | * Unlink each anon_vma chained to the VMA. This list is ordered | |
403 | * from newest to oldest, ensuring the root anon_vma gets freed last. | |
404 | */ | |
5beb4930 | 405 | list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { |
eee2acba PZ |
406 | struct anon_vma *anon_vma = avc->anon_vma; |
407 | ||
408 | root = lock_anon_vma_root(root, anon_vma); | |
bf181b9f | 409 | anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); |
eee2acba PZ |
410 | |
411 | /* | |
412 | * Leave empty anon_vmas on the list - we'll need | |
413 | * to free them outside the lock. | |
414 | */ | |
f808c13f | 415 | if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { |
2555283e | 416 | anon_vma->parent->num_children--; |
eee2acba | 417 | continue; |
7a3ef208 | 418 | } |
eee2acba PZ |
419 | |
420 | list_del(&avc->same_vma); | |
421 | anon_vma_chain_free(avc); | |
422 | } | |
ee8ab190 | 423 | if (vma->anon_vma) { |
2555283e | 424 | vma->anon_vma->num_active_vmas--; |
ee8ab190 LX |
425 | |
426 | /* | |
427 | * vma would still be needed after unlink, and anon_vma will be prepared | |
428 | * when handle fault. | |
429 | */ | |
430 | vma->anon_vma = NULL; | |
431 | } | |
eee2acba PZ |
432 | unlock_anon_vma_root(root); |
433 | ||
434 | /* | |
435 | * Iterate the list once more, it now only contains empty and unlinked | |
436 | * anon_vmas, destroy them. Could not do before due to __put_anon_vma() | |
5a505085 | 437 | * needing to write-acquire the anon_vma->root->rwsem. |
eee2acba PZ |
438 | */ |
439 | list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { | |
440 | struct anon_vma *anon_vma = avc->anon_vma; | |
441 | ||
2555283e JH |
442 | VM_WARN_ON(anon_vma->num_children); |
443 | VM_WARN_ON(anon_vma->num_active_vmas); | |
eee2acba PZ |
444 | put_anon_vma(anon_vma); |
445 | ||
5beb4930 RR |
446 | list_del(&avc->same_vma); |
447 | anon_vma_chain_free(avc); | |
448 | } | |
449 | } | |
450 | ||
51cc5068 | 451 | static void anon_vma_ctor(void *data) |
1da177e4 | 452 | { |
a35afb83 | 453 | struct anon_vma *anon_vma = data; |
1da177e4 | 454 | |
5a505085 | 455 | init_rwsem(&anon_vma->rwsem); |
83813267 | 456 | atomic_set(&anon_vma->refcount, 0); |
f808c13f | 457 | anon_vma->rb_root = RB_ROOT_CACHED; |
1da177e4 LT |
458 | } |
459 | ||
460 | void __init anon_vma_init(void) | |
461 | { | |
462 | anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), | |
5f0d5a3a | 463 | 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, |
5d097056 VD |
464 | anon_vma_ctor); |
465 | anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, | |
466 | SLAB_PANIC|SLAB_ACCOUNT); | |
1da177e4 LT |
467 | } |
468 | ||
469 | /* | |
6111e4ca PZ |
470 | * Getting a lock on a stable anon_vma from a page off the LRU is tricky! |
471 | * | |
4d8f7418 | 472 | * Since there is no serialization what so ever against folio_remove_rmap_*() |
ad8a20cf ML |
473 | * the best this function can do is return a refcount increased anon_vma |
474 | * that might have been relevant to this page. | |
6111e4ca PZ |
475 | * |
476 | * The page might have been remapped to a different anon_vma or the anon_vma | |
477 | * returned may already be freed (and even reused). | |
478 | * | |
bc658c96 PZ |
479 | * In case it was remapped to a different anon_vma, the new anon_vma will be a |
480 | * child of the old anon_vma, and the anon_vma lifetime rules will therefore | |
481 | * ensure that any anon_vma obtained from the page will still be valid for as | |
482 | * long as we observe page_mapped() [ hence all those page_mapped() tests ]. | |
483 | * | |
6111e4ca PZ |
484 | * All users of this function must be very careful when walking the anon_vma |
485 | * chain and verify that the page in question is indeed mapped in it | |
486 | * [ something equivalent to page_mapped_in_vma() ]. | |
487 | * | |
091e4299 | 488 | * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from |
4d8f7418 | 489 | * folio_remove_rmap_*() that the anon_vma pointer from page->mapping is valid |
091e4299 MC |
490 | * if there is a mapcount, we can dereference the anon_vma after observing |
491 | * those. | |
adef4406 AA |
492 | * |
493 | * NOTE: the caller should normally hold folio lock when calling this. If | |
494 | * not, the caller needs to double check the anon_vma didn't change after | |
495 | * taking the anon_vma lock for either read or write (UFFDIO_MOVE can modify it | |
496 | * concurrently without folio lock protection). See folio_lock_anon_vma_read() | |
497 | * which has already covered that, and comment above remap_pages(). | |
1da177e4 | 498 | */ |
29eea9b5 | 499 | struct anon_vma *folio_get_anon_vma(struct folio *folio) |
1da177e4 | 500 | { |
746b18d4 | 501 | struct anon_vma *anon_vma = NULL; |
1da177e4 LT |
502 | unsigned long anon_mapping; |
503 | ||
504 | rcu_read_lock(); | |
29eea9b5 | 505 | anon_mapping = (unsigned long)READ_ONCE(folio->mapping); |
3ca7b3c5 | 506 | if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) |
1da177e4 | 507 | goto out; |
29eea9b5 | 508 | if (!folio_mapped(folio)) |
1da177e4 LT |
509 | goto out; |
510 | ||
511 | anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); | |
746b18d4 PZ |
512 | if (!atomic_inc_not_zero(&anon_vma->refcount)) { |
513 | anon_vma = NULL; | |
514 | goto out; | |
515 | } | |
f1819427 HD |
516 | |
517 | /* | |
29eea9b5 | 518 | * If this folio is still mapped, then its anon_vma cannot have been |
746b18d4 PZ |
519 | * freed. But if it has been unmapped, we have no security against the |
520 | * anon_vma structure being freed and reused (for another anon_vma: | |
5f0d5a3a | 521 | * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() |
746b18d4 | 522 | * above cannot corrupt). |
f1819427 | 523 | */ |
29eea9b5 | 524 | if (!folio_mapped(folio)) { |
7f39dda9 | 525 | rcu_read_unlock(); |
746b18d4 | 526 | put_anon_vma(anon_vma); |
7f39dda9 | 527 | return NULL; |
746b18d4 | 528 | } |
1da177e4 LT |
529 | out: |
530 | rcu_read_unlock(); | |
746b18d4 PZ |
531 | |
532 | return anon_vma; | |
533 | } | |
534 | ||
88c22088 | 535 | /* |
29eea9b5 | 536 | * Similar to folio_get_anon_vma() except it locks the anon_vma. |
88c22088 PZ |
537 | * |
538 | * Its a little more complex as it tries to keep the fast path to a single | |
539 | * atomic op -- the trylock. If we fail the trylock, we fall back to getting a | |
29eea9b5 | 540 | * reference like with folio_get_anon_vma() and then block on the mutex |
6d4675e6 | 541 | * on !rwc->try_lock case. |
88c22088 | 542 | */ |
6d4675e6 MK |
543 | struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, |
544 | struct rmap_walk_control *rwc) | |
746b18d4 | 545 | { |
88c22088 | 546 | struct anon_vma *anon_vma = NULL; |
eee0f252 | 547 | struct anon_vma *root_anon_vma; |
88c22088 | 548 | unsigned long anon_mapping; |
746b18d4 | 549 | |
880a99b6 | 550 | retry: |
88c22088 | 551 | rcu_read_lock(); |
9595d769 | 552 | anon_mapping = (unsigned long)READ_ONCE(folio->mapping); |
88c22088 PZ |
553 | if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) |
554 | goto out; | |
9595d769 | 555 | if (!folio_mapped(folio)) |
88c22088 PZ |
556 | goto out; |
557 | ||
558 | anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); | |
4db0c3c2 | 559 | root_anon_vma = READ_ONCE(anon_vma->root); |
4fc3f1d6 | 560 | if (down_read_trylock(&root_anon_vma->rwsem)) { |
880a99b6 AA |
561 | /* |
562 | * folio_move_anon_rmap() might have changed the anon_vma as we | |
563 | * might not hold the folio lock here. | |
564 | */ | |
565 | if (unlikely((unsigned long)READ_ONCE(folio->mapping) != | |
566 | anon_mapping)) { | |
567 | up_read(&root_anon_vma->rwsem); | |
568 | rcu_read_unlock(); | |
569 | goto retry; | |
570 | } | |
571 | ||
88c22088 | 572 | /* |
9595d769 | 573 | * If the folio is still mapped, then this anon_vma is still |
eee0f252 | 574 | * its anon_vma, and holding the mutex ensures that it will |
bc658c96 | 575 | * not go away, see anon_vma_free(). |
88c22088 | 576 | */ |
9595d769 | 577 | if (!folio_mapped(folio)) { |
4fc3f1d6 | 578 | up_read(&root_anon_vma->rwsem); |
88c22088 PZ |
579 | anon_vma = NULL; |
580 | } | |
581 | goto out; | |
582 | } | |
746b18d4 | 583 | |
6d4675e6 MK |
584 | if (rwc && rwc->try_lock) { |
585 | anon_vma = NULL; | |
586 | rwc->contended = true; | |
587 | goto out; | |
588 | } | |
589 | ||
88c22088 PZ |
590 | /* trylock failed, we got to sleep */ |
591 | if (!atomic_inc_not_zero(&anon_vma->refcount)) { | |
592 | anon_vma = NULL; | |
593 | goto out; | |
594 | } | |
595 | ||
9595d769 | 596 | if (!folio_mapped(folio)) { |
7f39dda9 | 597 | rcu_read_unlock(); |
88c22088 | 598 | put_anon_vma(anon_vma); |
7f39dda9 | 599 | return NULL; |
88c22088 PZ |
600 | } |
601 | ||
602 | /* we pinned the anon_vma, its safe to sleep */ | |
603 | rcu_read_unlock(); | |
4fc3f1d6 | 604 | anon_vma_lock_read(anon_vma); |
88c22088 | 605 | |
880a99b6 AA |
606 | /* |
607 | * folio_move_anon_rmap() might have changed the anon_vma as we might | |
608 | * not hold the folio lock here. | |
609 | */ | |
610 | if (unlikely((unsigned long)READ_ONCE(folio->mapping) != | |
611 | anon_mapping)) { | |
612 | anon_vma_unlock_read(anon_vma); | |
613 | put_anon_vma(anon_vma); | |
614 | anon_vma = NULL; | |
615 | goto retry; | |
616 | } | |
617 | ||
88c22088 PZ |
618 | if (atomic_dec_and_test(&anon_vma->refcount)) { |
619 | /* | |
620 | * Oops, we held the last refcount, release the lock | |
621 | * and bail -- can't simply use put_anon_vma() because | |
4fc3f1d6 | 622 | * we'll deadlock on the anon_vma_lock_write() recursion. |
88c22088 | 623 | */ |
4fc3f1d6 | 624 | anon_vma_unlock_read(anon_vma); |
88c22088 PZ |
625 | __put_anon_vma(anon_vma); |
626 | anon_vma = NULL; | |
627 | } | |
628 | ||
629 | return anon_vma; | |
630 | ||
631 | out: | |
632 | rcu_read_unlock(); | |
746b18d4 | 633 | return anon_vma; |
34bbd704 ON |
634 | } |
635 | ||
72b252ae | 636 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
72b252ae MG |
637 | /* |
638 | * Flush TLB entries for recently unmapped pages from remote CPUs. It is | |
639 | * important if a PTE was dirty when it was unmapped that it's flushed | |
640 | * before any IO is initiated on the page to prevent lost writes. Similarly, | |
641 | * it must be flushed before freeing to prevent data leakage. | |
642 | */ | |
643 | void try_to_unmap_flush(void) | |
644 | { | |
645 | struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; | |
72b252ae MG |
646 | |
647 | if (!tlb_ubc->flush_required) | |
648 | return; | |
649 | ||
e73ad5ff | 650 | arch_tlbbatch_flush(&tlb_ubc->arch); |
72b252ae | 651 | tlb_ubc->flush_required = false; |
d950c947 | 652 | tlb_ubc->writable = false; |
72b252ae MG |
653 | } |
654 | ||
d950c947 MG |
655 | /* Flush iff there are potentially writable TLB entries that can race with IO */ |
656 | void try_to_unmap_flush_dirty(void) | |
657 | { | |
658 | struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; | |
659 | ||
660 | if (tlb_ubc->writable) | |
661 | try_to_unmap_flush(); | |
662 | } | |
663 | ||
5ee2fa2f YH |
664 | /* |
665 | * Bits 0-14 of mm->tlb_flush_batched record pending generations. | |
666 | * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. | |
667 | */ | |
668 | #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 | |
669 | #define TLB_FLUSH_BATCH_PENDING_MASK \ | |
670 | ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) | |
671 | #define TLB_FLUSH_BATCH_PENDING_LARGE \ | |
672 | (TLB_FLUSH_BATCH_PENDING_MASK / 2) | |
673 | ||
f73419bb BS |
674 | static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, |
675 | unsigned long uaddr) | |
72b252ae MG |
676 | { |
677 | struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; | |
bdeb9188 | 678 | int batch; |
4d4b6d66 YH |
679 | bool writable = pte_dirty(pteval); |
680 | ||
681 | if (!pte_accessible(mm, pteval)) | |
682 | return; | |
72b252ae | 683 | |
f73419bb | 684 | arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr); |
72b252ae | 685 | tlb_ubc->flush_required = true; |
d950c947 | 686 | |
3ea27719 MG |
687 | /* |
688 | * Ensure compiler does not re-order the setting of tlb_flush_batched | |
689 | * before the PTE is cleared. | |
690 | */ | |
691 | barrier(); | |
5ee2fa2f YH |
692 | batch = atomic_read(&mm->tlb_flush_batched); |
693 | retry: | |
694 | if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { | |
695 | /* | |
696 | * Prevent `pending' from catching up with `flushed' because of | |
697 | * overflow. Reset `pending' and `flushed' to be 1 and 0 if | |
698 | * `pending' becomes large. | |
699 | */ | |
bdeb9188 | 700 | if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1)) |
5ee2fa2f | 701 | goto retry; |
5ee2fa2f YH |
702 | } else { |
703 | atomic_inc(&mm->tlb_flush_batched); | |
704 | } | |
3ea27719 | 705 | |
d950c947 MG |
706 | /* |
707 | * If the PTE was dirty then it's best to assume it's writable. The | |
708 | * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() | |
709 | * before the page is queued for IO. | |
710 | */ | |
711 | if (writable) | |
712 | tlb_ubc->writable = true; | |
72b252ae MG |
713 | } |
714 | ||
715 | /* | |
716 | * Returns true if the TLB flush should be deferred to the end of a batch of | |
717 | * unmap operations to reduce IPIs. | |
718 | */ | |
719 | static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) | |
720 | { | |
72b252ae MG |
721 | if (!(flags & TTU_BATCH_FLUSH)) |
722 | return false; | |
723 | ||
65c8d30e | 724 | return arch_tlbbatch_should_defer(mm); |
72b252ae | 725 | } |
3ea27719 MG |
726 | |
727 | /* | |
728 | * Reclaim unmaps pages under the PTL but do not flush the TLB prior to | |
729 | * releasing the PTL if TLB flushes are batched. It's possible for a parallel | |
730 | * operation such as mprotect or munmap to race between reclaim unmapping | |
731 | * the page and flushing the page. If this race occurs, it potentially allows | |
732 | * access to data via a stale TLB entry. Tracking all mm's that have TLB | |
733 | * batching in flight would be expensive during reclaim so instead track | |
734 | * whether TLB batching occurred in the past and if so then do a flush here | |
735 | * if required. This will cost one additional flush per reclaim cycle paid | |
736 | * by the first operation at risk such as mprotect and mumap. | |
737 | * | |
738 | * This must be called under the PTL so that an access to tlb_flush_batched | |
739 | * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise | |
740 | * via the PTL. | |
741 | */ | |
742 | void flush_tlb_batched_pending(struct mm_struct *mm) | |
743 | { | |
5ee2fa2f YH |
744 | int batch = atomic_read(&mm->tlb_flush_batched); |
745 | int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK; | |
746 | int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT; | |
3ea27719 | 747 | |
5ee2fa2f | 748 | if (pending != flushed) { |
db6c1f6f | 749 | arch_flush_tlb_batched_pending(mm); |
3ea27719 | 750 | /* |
5ee2fa2f YH |
751 | * If the new TLB flushing is pending during flushing, leave |
752 | * mm->tlb_flush_batched as is, to avoid losing flushing. | |
3ea27719 | 753 | */ |
5ee2fa2f YH |
754 | atomic_cmpxchg(&mm->tlb_flush_batched, batch, |
755 | pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT)); | |
3ea27719 MG |
756 | } |
757 | } | |
72b252ae | 758 | #else |
f73419bb BS |
759 | static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, |
760 | unsigned long uaddr) | |
72b252ae MG |
761 | { |
762 | } | |
763 | ||
764 | static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) | |
765 | { | |
766 | return false; | |
767 | } | |
768 | #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ | |
769 | ||
1da177e4 | 770 | /* |
bf89c8c8 | 771 | * At what user virtual address is page expected in vma? |
ab941e0f | 772 | * Caller should check the page is actually part of the vma. |
1da177e4 LT |
773 | */ |
774 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) | |
775 | { | |
e05b3453 | 776 | struct folio *folio = page_folio(page); |
412ad5fb MWO |
777 | pgoff_t pgoff; |
778 | ||
e05b3453 MWO |
779 | if (folio_test_anon(folio)) { |
780 | struct anon_vma *page__anon_vma = folio_anon_vma(folio); | |
4829b906 HD |
781 | /* |
782 | * Note: swapoff's unuse_vma() is more efficient with this | |
783 | * check, and needs it to match anon_vma when KSM is active. | |
784 | */ | |
785 | if (!vma->anon_vma || !page__anon_vma || | |
786 | vma->anon_vma->root != page__anon_vma->root) | |
21d0d443 | 787 | return -EFAULT; |
31657170 JW |
788 | } else if (!vma->vm_file) { |
789 | return -EFAULT; | |
e05b3453 | 790 | } else if (vma->vm_file->f_mapping != folio->mapping) { |
1da177e4 | 791 | return -EFAULT; |
31657170 | 792 | } |
494334e4 | 793 | |
412ad5fb MWO |
794 | /* The !page__anon_vma above handles KSM folios */ |
795 | pgoff = folio->index + folio_page_idx(folio, page); | |
e0abfbb6 | 796 | return vma_address(vma, pgoff, 1); |
1da177e4 LT |
797 | } |
798 | ||
50722804 ZK |
799 | /* |
800 | * Returns the actual pmd_t* where we expect 'address' to be mapped from, or | |
801 | * NULL if it doesn't exist. No guarantees / checks on what the pmd_t* | |
802 | * represents. | |
803 | */ | |
6219049a BL |
804 | pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) |
805 | { | |
806 | pgd_t *pgd; | |
c2febafc | 807 | p4d_t *p4d; |
6219049a BL |
808 | pud_t *pud; |
809 | pmd_t *pmd = NULL; | |
810 | ||
811 | pgd = pgd_offset(mm, address); | |
812 | if (!pgd_present(*pgd)) | |
813 | goto out; | |
814 | ||
c2febafc KS |
815 | p4d = p4d_offset(pgd, address); |
816 | if (!p4d_present(*p4d)) | |
817 | goto out; | |
818 | ||
819 | pud = pud_offset(p4d, address); | |
6219049a BL |
820 | if (!pud_present(*pud)) |
821 | goto out; | |
822 | ||
823 | pmd = pmd_offset(pud, address); | |
6219049a BL |
824 | out: |
825 | return pmd; | |
826 | } | |
827 | ||
b3ac0413 | 828 | struct folio_referenced_arg { |
8749cfea VD |
829 | int mapcount; |
830 | int referenced; | |
831 | unsigned long vm_flags; | |
832 | struct mem_cgroup *memcg; | |
833 | }; | |
1acbc3f9 | 834 | |
8749cfea | 835 | /* |
b3ac0413 | 836 | * arg: folio_referenced_arg will be passed |
8749cfea | 837 | */ |
2f031c6f MWO |
838 | static bool folio_referenced_one(struct folio *folio, |
839 | struct vm_area_struct *vma, unsigned long address, void *arg) | |
8749cfea | 840 | { |
b3ac0413 MWO |
841 | struct folio_referenced_arg *pra = arg; |
842 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); | |
8749cfea | 843 | int referenced = 0; |
1acbc3f9 | 844 | unsigned long start = address, ptes = 0; |
8749cfea | 845 | |
8eaedede KS |
846 | while (page_vma_mapped_walk(&pvmw)) { |
847 | address = pvmw.address; | |
b20ce5e0 | 848 | |
1acbc3f9 YF |
849 | if (vma->vm_flags & VM_LOCKED) { |
850 | if (!folio_test_large(folio) || !pvmw.pte) { | |
851 | /* Restore the mlock which got missed */ | |
852 | mlock_vma_folio(folio, vma); | |
853 | page_vma_mapped_walk_done(&pvmw); | |
854 | pra->vm_flags |= VM_LOCKED; | |
855 | return false; /* To break the loop */ | |
856 | } | |
857 | /* | |
858 | * For large folio fully mapped to VMA, will | |
859 | * be handled after the pvmw loop. | |
860 | * | |
861 | * For large folio cross VMA boundaries, it's | |
862 | * expected to be picked by page reclaim. But | |
863 | * should skip reference of pages which are in | |
864 | * the range of VM_LOCKED vma. As page reclaim | |
865 | * should just count the reference of pages out | |
866 | * the range of VM_LOCKED vma. | |
867 | */ | |
868 | ptes++; | |
869 | pra->mapcount--; | |
870 | continue; | |
8eaedede | 871 | } |
71e3aac0 | 872 | |
8eaedede | 873 | if (pvmw.pte) { |
c33c7948 RR |
874 | if (lru_gen_enabled() && |
875 | pte_young(ptep_get(pvmw.pte))) { | |
018ee47f YZ |
876 | lru_gen_look_around(&pvmw); |
877 | referenced++; | |
878 | } | |
879 | ||
8eaedede | 880 | if (ptep_clear_flush_young_notify(vma, address, |
8788f678 YZ |
881 | pvmw.pte)) |
882 | referenced++; | |
8eaedede KS |
883 | } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { |
884 | if (pmdp_clear_flush_young_notify(vma, address, | |
885 | pvmw.pmd)) | |
8749cfea | 886 | referenced++; |
8eaedede | 887 | } else { |
b3ac0413 | 888 | /* unexpected pmd-mapped folio? */ |
8eaedede | 889 | WARN_ON_ONCE(1); |
8749cfea | 890 | } |
8eaedede KS |
891 | |
892 | pra->mapcount--; | |
b20ce5e0 | 893 | } |
b20ce5e0 | 894 | |
1acbc3f9 YF |
895 | if ((vma->vm_flags & VM_LOCKED) && |
896 | folio_test_large(folio) && | |
897 | folio_within_vma(folio, vma)) { | |
898 | unsigned long s_align, e_align; | |
899 | ||
900 | s_align = ALIGN_DOWN(start, PMD_SIZE); | |
901 | e_align = ALIGN_DOWN(start + folio_size(folio) - 1, PMD_SIZE); | |
902 | ||
903 | /* folio doesn't cross page table boundary and fully mapped */ | |
904 | if ((s_align == e_align) && (ptes == folio_nr_pages(folio))) { | |
905 | /* Restore the mlock which got missed */ | |
906 | mlock_vma_folio(folio, vma); | |
907 | pra->vm_flags |= VM_LOCKED; | |
908 | return false; /* To break the loop */ | |
909 | } | |
910 | } | |
911 | ||
33c3fc71 | 912 | if (referenced) |
b3ac0413 MWO |
913 | folio_clear_idle(folio); |
914 | if (folio_test_clear_young(folio)) | |
33c3fc71 VD |
915 | referenced++; |
916 | ||
9f32624b JK |
917 | if (referenced) { |
918 | pra->referenced++; | |
47d4f3ee | 919 | pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; |
1da177e4 | 920 | } |
34bbd704 | 921 | |
9f32624b | 922 | if (!pra->mapcount) |
e4b82222 | 923 | return false; /* To break the loop */ |
9f32624b | 924 | |
e4b82222 | 925 | return true; |
1da177e4 LT |
926 | } |
927 | ||
b3ac0413 | 928 | static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) |
1da177e4 | 929 | { |
b3ac0413 | 930 | struct folio_referenced_arg *pra = arg; |
9f32624b | 931 | struct mem_cgroup *memcg = pra->memcg; |
1da177e4 | 932 | |
8788f678 YZ |
933 | /* |
934 | * Ignore references from this mapping if it has no recency. If the | |
935 | * folio has been used in another mapping, we will catch it; if this | |
936 | * other mapping is already gone, the unmap path will have set the | |
937 | * referenced flag or activated the folio in zap_pte_range(). | |
938 | */ | |
939 | if (!vma_has_recency(vma)) | |
940 | return true; | |
941 | ||
942 | /* | |
943 | * If we are reclaiming on behalf of a cgroup, skip counting on behalf | |
944 | * of references from different cgroups. | |
945 | */ | |
946 | if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) | |
9f32624b | 947 | return true; |
1da177e4 | 948 | |
9f32624b | 949 | return false; |
1da177e4 LT |
950 | } |
951 | ||
952 | /** | |
b3ac0413 MWO |
953 | * folio_referenced() - Test if the folio was referenced. |
954 | * @folio: The folio to test. | |
955 | * @is_locked: Caller holds lock on the folio. | |
72835c86 | 956 | * @memcg: target memory cgroup |
b3ac0413 | 957 | * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. |
1da177e4 | 958 | * |
b3ac0413 MWO |
959 | * Quick test_and_clear_referenced for all mappings of a folio, |
960 | * | |
6d4675e6 MK |
961 | * Return: The number of mappings which referenced the folio. Return -1 if |
962 | * the function bailed out due to rmap lock contention. | |
1da177e4 | 963 | */ |
b3ac0413 MWO |
964 | int folio_referenced(struct folio *folio, int is_locked, |
965 | struct mem_cgroup *memcg, unsigned long *vm_flags) | |
1da177e4 | 966 | { |
637a900b | 967 | bool we_locked = false; |
b3ac0413 MWO |
968 | struct folio_referenced_arg pra = { |
969 | .mapcount = folio_mapcount(folio), | |
9f32624b JK |
970 | .memcg = memcg, |
971 | }; | |
972 | struct rmap_walk_control rwc = { | |
b3ac0413 | 973 | .rmap_one = folio_referenced_one, |
9f32624b | 974 | .arg = (void *)&pra, |
2f031c6f | 975 | .anon_lock = folio_lock_anon_vma_read, |
6d4675e6 | 976 | .try_lock = true, |
8788f678 | 977 | .invalid_vma = invalid_folio_referenced_vma, |
9f32624b | 978 | }; |
1da177e4 | 979 | |
6fe6b7e3 | 980 | *vm_flags = 0; |
059d8442 | 981 | if (!pra.mapcount) |
9f32624b JK |
982 | return 0; |
983 | ||
b3ac0413 | 984 | if (!folio_raw_mapping(folio)) |
9f32624b JK |
985 | return 0; |
986 | ||
b3ac0413 MWO |
987 | if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { |
988 | we_locked = folio_trylock(folio); | |
9f32624b JK |
989 | if (!we_locked) |
990 | return 1; | |
1da177e4 | 991 | } |
9f32624b | 992 | |
2f031c6f | 993 | rmap_walk(folio, &rwc); |
9f32624b JK |
994 | *vm_flags = pra.vm_flags; |
995 | ||
996 | if (we_locked) | |
b3ac0413 | 997 | folio_unlock(folio); |
9f32624b | 998 | |
6d4675e6 | 999 | return rwc.contended ? -1 : pra.referenced; |
1da177e4 LT |
1000 | } |
1001 | ||
6a8e0596 | 1002 | static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) |
d08b3851 | 1003 | { |
6a8e0596 MS |
1004 | int cleaned = 0; |
1005 | struct vm_area_struct *vma = pvmw->vma; | |
ac46d4f3 | 1006 | struct mmu_notifier_range range; |
6a8e0596 | 1007 | unsigned long address = pvmw->address; |
d08b3851 | 1008 | |
369ea824 JG |
1009 | /* |
1010 | * We have to assume the worse case ie pmd for invalidation. Note that | |
e83c09a2 | 1011 | * the folio can not be freed from this function. |
369ea824 | 1012 | */ |
7d4a8be0 AP |
1013 | mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, |
1014 | vma->vm_mm, address, vma_address_end(pvmw)); | |
ac46d4f3 | 1015 | mmu_notifier_invalidate_range_start(&range); |
369ea824 | 1016 | |
6a8e0596 | 1017 | while (page_vma_mapped_walk(pvmw)) { |
f27176cf | 1018 | int ret = 0; |
369ea824 | 1019 | |
6a8e0596 MS |
1020 | address = pvmw->address; |
1021 | if (pvmw->pte) { | |
6a8e0596 | 1022 | pte_t *pte = pvmw->pte; |
c33c7948 | 1023 | pte_t entry = ptep_get(pte); |
f27176cf | 1024 | |
c33c7948 | 1025 | if (!pte_dirty(entry) && !pte_write(entry)) |
f27176cf KS |
1026 | continue; |
1027 | ||
c33c7948 | 1028 | flush_cache_page(vma, address, pte_pfn(entry)); |
785373b4 | 1029 | entry = ptep_clear_flush(vma, address, pte); |
f27176cf KS |
1030 | entry = pte_wrprotect(entry); |
1031 | entry = pte_mkclean(entry); | |
785373b4 | 1032 | set_pte_at(vma->vm_mm, address, pte, entry); |
f27176cf KS |
1033 | ret = 1; |
1034 | } else { | |
396bcc52 | 1035 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
6a8e0596 | 1036 | pmd_t *pmd = pvmw->pmd; |
f27176cf KS |
1037 | pmd_t entry; |
1038 | ||
1039 | if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) | |
1040 | continue; | |
1041 | ||
7f9c9b60 MS |
1042 | flush_cache_range(vma, address, |
1043 | address + HPAGE_PMD_SIZE); | |
024eee0e | 1044 | entry = pmdp_invalidate(vma, address, pmd); |
f27176cf KS |
1045 | entry = pmd_wrprotect(entry); |
1046 | entry = pmd_mkclean(entry); | |
785373b4 | 1047 | set_pmd_at(vma->vm_mm, address, pmd, entry); |
f27176cf KS |
1048 | ret = 1; |
1049 | #else | |
e83c09a2 | 1050 | /* unexpected pmd-mapped folio? */ |
f27176cf KS |
1051 | WARN_ON_ONCE(1); |
1052 | #endif | |
1053 | } | |
d08b3851 | 1054 | |
0f10851e | 1055 | if (ret) |
6a8e0596 | 1056 | cleaned++; |
c2fda5fe | 1057 | } |
d08b3851 | 1058 | |
ac46d4f3 | 1059 | mmu_notifier_invalidate_range_end(&range); |
369ea824 | 1060 | |
6a8e0596 MS |
1061 | return cleaned; |
1062 | } | |
1063 | ||
1064 | static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, | |
1065 | unsigned long address, void *arg) | |
1066 | { | |
1067 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); | |
1068 | int *cleaned = arg; | |
1069 | ||
1070 | *cleaned += page_vma_mkclean_one(&pvmw); | |
1071 | ||
e4b82222 | 1072 | return true; |
d08b3851 PZ |
1073 | } |
1074 | ||
9853a407 | 1075 | static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) |
d08b3851 | 1076 | { |
9853a407 | 1077 | if (vma->vm_flags & VM_SHARED) |
871beb8c | 1078 | return false; |
d08b3851 | 1079 | |
871beb8c | 1080 | return true; |
d08b3851 PZ |
1081 | } |
1082 | ||
d9c08e22 | 1083 | int folio_mkclean(struct folio *folio) |
d08b3851 | 1084 | { |
9853a407 JK |
1085 | int cleaned = 0; |
1086 | struct address_space *mapping; | |
1087 | struct rmap_walk_control rwc = { | |
1088 | .arg = (void *)&cleaned, | |
1089 | .rmap_one = page_mkclean_one, | |
1090 | .invalid_vma = invalid_mkclean_vma, | |
1091 | }; | |
d08b3851 | 1092 | |
d9c08e22 | 1093 | BUG_ON(!folio_test_locked(folio)); |
d08b3851 | 1094 | |
d9c08e22 | 1095 | if (!folio_mapped(folio)) |
9853a407 JK |
1096 | return 0; |
1097 | ||
d9c08e22 | 1098 | mapping = folio_mapping(folio); |
9853a407 JK |
1099 | if (!mapping) |
1100 | return 0; | |
1101 | ||
2f031c6f | 1102 | rmap_walk(folio, &rwc); |
d08b3851 | 1103 | |
9853a407 | 1104 | return cleaned; |
d08b3851 | 1105 | } |
d9c08e22 | 1106 | EXPORT_SYMBOL_GPL(folio_mkclean); |
d08b3851 | 1107 | |
6a8e0596 MS |
1108 | /** |
1109 | * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of | |
1110 | * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) | |
1111 | * within the @vma of shared mappings. And since clean PTEs | |
1112 | * should also be readonly, write protects them too. | |
1113 | * @pfn: start pfn. | |
1114 | * @nr_pages: number of physically contiguous pages srarting with @pfn. | |
1115 | * @pgoff: page offset that the @pfn mapped with. | |
1116 | * @vma: vma that @pfn mapped within. | |
1117 | * | |
1118 | * Returns the number of cleaned PTEs (including PMDs). | |
1119 | */ | |
1120 | int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, | |
1121 | struct vm_area_struct *vma) | |
1122 | { | |
1123 | struct page_vma_mapped_walk pvmw = { | |
1124 | .pfn = pfn, | |
1125 | .nr_pages = nr_pages, | |
1126 | .pgoff = pgoff, | |
1127 | .vma = vma, | |
1128 | .flags = PVMW_SYNC, | |
1129 | }; | |
1130 | ||
1131 | if (invalid_mkclean_vma(vma, NULL)) | |
1132 | return 0; | |
1133 | ||
e0abfbb6 | 1134 | pvmw.address = vma_address(vma, pgoff, nr_pages); |
6a8e0596 MS |
1135 | VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); |
1136 | ||
1137 | return page_vma_mkclean_one(&pvmw); | |
1138 | } | |
1139 | ||
96fd7495 DH |
1140 | static __always_inline unsigned int __folio_add_rmap(struct folio *folio, |
1141 | struct page *page, int nr_pages, enum rmap_level level, | |
1142 | int *nr_pmdmapped) | |
1143 | { | |
1144 | atomic_t *mapped = &folio->_nr_pages_mapped; | |
05c5323b | 1145 | const int orig_nr_pages = nr_pages; |
96fd7495 DH |
1146 | int first, nr = 0; |
1147 | ||
1148 | __folio_rmap_sanity_checks(folio, page, nr_pages, level); | |
1149 | ||
1150 | switch (level) { | |
1151 | case RMAP_LEVEL_PTE: | |
46d62de7 DH |
1152 | if (!folio_test_large(folio)) { |
1153 | nr = atomic_inc_and_test(&page->_mapcount); | |
1154 | break; | |
1155 | } | |
1156 | ||
96fd7495 DH |
1157 | do { |
1158 | first = atomic_inc_and_test(&page->_mapcount); | |
46d62de7 | 1159 | if (first) { |
96fd7495 | 1160 | first = atomic_inc_return_relaxed(mapped); |
46d62de7 DH |
1161 | if (first < ENTIRELY_MAPPED) |
1162 | nr++; | |
96fd7495 | 1163 | } |
96fd7495 | 1164 | } while (page++, --nr_pages > 0); |
05c5323b | 1165 | atomic_add(orig_nr_pages, &folio->_large_mapcount); |
96fd7495 DH |
1166 | break; |
1167 | case RMAP_LEVEL_PMD: | |
1168 | first = atomic_inc_and_test(&folio->_entire_mapcount); | |
1169 | if (first) { | |
e78a13fd DH |
1170 | nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped); |
1171 | if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) { | |
96fd7495 DH |
1172 | *nr_pmdmapped = folio_nr_pages(folio); |
1173 | nr = *nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); | |
1174 | /* Raced ahead of a remove and another add? */ | |
1175 | if (unlikely(nr < 0)) | |
1176 | nr = 0; | |
1177 | } else { | |
e78a13fd | 1178 | /* Raced ahead of a remove of ENTIRELY_MAPPED */ |
96fd7495 DH |
1179 | nr = 0; |
1180 | } | |
1181 | } | |
05c5323b | 1182 | atomic_inc(&folio->_large_mapcount); |
96fd7495 DH |
1183 | break; |
1184 | } | |
1185 | return nr; | |
1186 | } | |
1187 | ||
c44b6743 | 1188 | /** |
06968625 DH |
1189 | * folio_move_anon_rmap - move a folio to our anon_vma |
1190 | * @folio: The folio to move to our anon_vma | |
1191 | * @vma: The vma the folio belongs to | |
c44b6743 | 1192 | * |
06968625 DH |
1193 | * When a folio belongs exclusively to one process after a COW event, |
1194 | * that folio can be moved into the anon_vma that belongs to just that | |
1195 | * process, so the rmap code will not search the parent or sibling processes. | |
c44b6743 | 1196 | */ |
06968625 | 1197 | void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma) |
c44b6743 | 1198 | { |
595af4c9 | 1199 | void *anon_vma = vma->anon_vma; |
5a49973d | 1200 | |
595af4c9 | 1201 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
81d1b09c | 1202 | VM_BUG_ON_VMA(!anon_vma, vma); |
c44b6743 | 1203 | |
595af4c9 | 1204 | anon_vma += PAGE_MAPPING_ANON; |
414e2fb8 VD |
1205 | /* |
1206 | * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written | |
b3ac0413 MWO |
1207 | * simultaneously, so a concurrent reader (eg folio_referenced()'s |
1208 | * folio_test_anon()) will not see one without the other. | |
414e2fb8 | 1209 | */ |
595af4c9 | 1210 | WRITE_ONCE(folio->mapping, anon_vma); |
c44b6743 RR |
1211 | } |
1212 | ||
9617d95e | 1213 | /** |
c66db8c0 DH |
1214 | * __folio_set_anon - set up a new anonymous rmap for a folio |
1215 | * @folio: The folio to set up the new anonymous rmap for. | |
1216 | * @vma: VM area to add the folio to. | |
c33c7948 | 1217 | * @address: User virtual address of the mapping |
c66db8c0 | 1218 | * @exclusive: Whether the folio is exclusive to the process. |
9617d95e | 1219 | */ |
c66db8c0 DH |
1220 | static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma, |
1221 | unsigned long address, bool exclusive) | |
9617d95e | 1222 | { |
e8a03feb | 1223 | struct anon_vma *anon_vma = vma->anon_vma; |
ea90002b | 1224 | |
e8a03feb | 1225 | BUG_ON(!anon_vma); |
ea90002b LT |
1226 | |
1227 | /* | |
c66db8c0 DH |
1228 | * If the folio isn't exclusive to this vma, we must use the _oldest_ |
1229 | * possible anon_vma for the folio mapping! | |
ea90002b | 1230 | */ |
4e1c1975 | 1231 | if (!exclusive) |
288468c3 | 1232 | anon_vma = anon_vma->root; |
9617d95e | 1233 | |
16f5e707 | 1234 | /* |
5b4bd90f | 1235 | * page_idle does a lockless/optimistic rmap scan on folio->mapping. |
16f5e707 AS |
1236 | * Make sure the compiler doesn't split the stores of anon_vma and |
1237 | * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code | |
1238 | * could mistake the mapping for a struct address_space and crash. | |
1239 | */ | |
9617d95e | 1240 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; |
5b4bd90f MWO |
1241 | WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); |
1242 | folio->index = linear_page_index(vma, address); | |
9617d95e NP |
1243 | } |
1244 | ||
c97a9e10 | 1245 | /** |
43d8eac4 | 1246 | * __page_check_anon_rmap - sanity check anonymous rmap addition |
dba438bd MWO |
1247 | * @folio: The folio containing @page. |
1248 | * @page: the page to check the mapping of | |
c97a9e10 NP |
1249 | * @vma: the vm area in which the mapping is added |
1250 | * @address: the user virtual address mapped | |
1251 | */ | |
dba438bd | 1252 | static void __page_check_anon_rmap(struct folio *folio, struct page *page, |
c97a9e10 NP |
1253 | struct vm_area_struct *vma, unsigned long address) |
1254 | { | |
c97a9e10 NP |
1255 | /* |
1256 | * The page's anon-rmap details (mapping and index) are guaranteed to | |
1257 | * be set up correctly at this point. | |
1258 | * | |
84f0169e | 1259 | * We have exclusion against folio_add_anon_rmap_*() because the caller |
90aaca85 | 1260 | * always holds the page locked. |
c97a9e10 | 1261 | * |
cb9089ba | 1262 | * We have exclusion against folio_add_new_anon_rmap because those pages |
c97a9e10 | 1263 | * are initially only visible via the pagetables, and the pte is locked |
cb9089ba | 1264 | * over the call to folio_add_new_anon_rmap. |
c97a9e10 | 1265 | */ |
e05b3453 MWO |
1266 | VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, |
1267 | folio); | |
30c46382 YS |
1268 | VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), |
1269 | page); | |
c97a9e10 NP |
1270 | } |
1271 | ||
15c0536f YA |
1272 | static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped) |
1273 | { | |
1274 | int idx; | |
1275 | ||
1276 | if (nr) { | |
1277 | idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; | |
1278 | __lruvec_stat_mod_folio(folio, idx, nr); | |
1279 | } | |
1280 | if (nr_pmdmapped) { | |
1281 | if (folio_test_anon(folio)) { | |
1282 | idx = NR_ANON_THPS; | |
1283 | __lruvec_stat_mod_folio(folio, idx, nr_pmdmapped); | |
1284 | } else { | |
1285 | /* NR_*_PMDMAPPED are not maintained per-memcg */ | |
1286 | idx = folio_test_swapbacked(folio) ? | |
1287 | NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED; | |
1288 | __mod_node_page_state(folio_pgdat(folio), idx, | |
1289 | nr_pmdmapped); | |
1290 | } | |
1291 | } | |
1292 | } | |
1293 | ||
8bd51300 DH |
1294 | static __always_inline void __folio_add_anon_rmap(struct folio *folio, |
1295 | struct page *page, int nr_pages, struct vm_area_struct *vma, | |
1296 | unsigned long address, rmap_t flags, enum rmap_level level) | |
1297 | { | |
1298 | int i, nr, nr_pmdmapped = 0; | |
cb67f428 | 1299 | |
4c1171f1 BS |
1300 | VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); |
1301 | ||
8bd51300 | 1302 | nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); |
5ad64688 | 1303 | |
4c1171f1 | 1304 | if (likely(!folio_test_ksm(folio))) |
c5c54003 | 1305 | __page_check_anon_rmap(folio, page, vma, address); |
8bd51300 | 1306 | |
15c0536f YA |
1307 | __folio_mod_stat(folio, nr, nr_pmdmapped); |
1308 | ||
8bd51300 DH |
1309 | if (flags & RMAP_EXCLUSIVE) { |
1310 | switch (level) { | |
1311 | case RMAP_LEVEL_PTE: | |
1312 | for (i = 0; i < nr_pages; i++) | |
1313 | SetPageAnonExclusive(page + i); | |
1314 | break; | |
1315 | case RMAP_LEVEL_PMD: | |
1316 | SetPageAnonExclusive(page); | |
1317 | break; | |
1318 | } | |
1319 | } | |
1320 | for (i = 0; i < nr_pages; i++) { | |
1321 | struct page *cur_page = page + i; | |
1322 | ||
1323 | /* While PTE-mapping a THP we have a PMD and a PTE mapping. */ | |
1324 | VM_WARN_ON_FOLIO((atomic_read(&cur_page->_mapcount) > 0 || | |
1325 | (folio_test_large(folio) && | |
1326 | folio_entire_mapcount(folio) > 1)) && | |
1327 | PageAnonExclusive(cur_page), folio); | |
1328 | } | |
cea86fe2 | 1329 | |
1acbc3f9 YF |
1330 | /* |
1331 | * For large folio, only mlock it if it's fully mapped to VMA. It's | |
1332 | * not easy to check whether the large folio is fully mapped to VMA | |
1333 | * here. Only mlock normal 4K folio and leave page reclaim to handle | |
1334 | * large folio. | |
1335 | */ | |
1336 | if (!folio_test_large(folio)) | |
1337 | mlock_vma_folio(folio, vma); | |
1da177e4 LT |
1338 | } |
1339 | ||
8bd51300 DH |
1340 | /** |
1341 | * folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio | |
1342 | * @folio: The folio to add the mappings to | |
1343 | * @page: The first page to add | |
1344 | * @nr_pages: The number of pages which will be mapped | |
1345 | * @vma: The vm area in which the mappings are added | |
1346 | * @address: The user virtual address of the first page to map | |
1347 | * @flags: The rmap flags | |
1348 | * | |
1349 | * The page range of folio is defined by [first_page, first_page + nr_pages) | |
1350 | * | |
1351 | * The caller needs to hold the page table lock, and the page must be locked in | |
1352 | * the anon_vma case: to serialize mapping,index checking after setting, | |
1353 | * and to ensure that an anon folio is not being upgraded racily to a KSM folio | |
1354 | * (but KSM folios are never downgraded). | |
1355 | */ | |
1356 | void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page, | |
1357 | int nr_pages, struct vm_area_struct *vma, unsigned long address, | |
1358 | rmap_t flags) | |
1359 | { | |
1360 | __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags, | |
1361 | RMAP_LEVEL_PTE); | |
1362 | } | |
1363 | ||
1364 | /** | |
1365 | * folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio | |
1366 | * @folio: The folio to add the mapping to | |
1367 | * @page: The first page to add | |
1368 | * @vma: The vm area in which the mapping is added | |
1369 | * @address: The user virtual address of the first page to map | |
1370 | * @flags: The rmap flags | |
1371 | * | |
1372 | * The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR) | |
1373 | * | |
1374 | * The caller needs to hold the page table lock, and the page must be locked in | |
1375 | * the anon_vma case: to serialize mapping,index checking after setting. | |
1376 | */ | |
1377 | void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page, | |
1378 | struct vm_area_struct *vma, unsigned long address, rmap_t flags) | |
1379 | { | |
1380 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
1381 | __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags, | |
1382 | RMAP_LEVEL_PMD); | |
1383 | #else | |
1384 | WARN_ON_ONCE(true); | |
1385 | #endif | |
1386 | } | |
1387 | ||
43d8eac4 | 1388 | /** |
4d510f3d MWO |
1389 | * folio_add_new_anon_rmap - Add mapping to a new anonymous folio. |
1390 | * @folio: The folio to add the mapping to. | |
9617d95e NP |
1391 | * @vma: the vm area in which the mapping is added |
1392 | * @address: the user virtual address mapped | |
15bde4ab | 1393 | * @flags: The rmap flags |
40f2bbf7 | 1394 | * |
84f0169e | 1395 | * Like folio_add_anon_rmap_*() but must only be called on *new* folios. |
9617d95e | 1396 | * This means the inc-and-test can be bypassed. |
15bde4ab BS |
1397 | * The folio doesn't necessarily need to be locked while it's exclusive |
1398 | * unless two threads map it concurrently. However, the folio must be | |
1399 | * locked if it's shared. | |
4d510f3d | 1400 | * |
15bde4ab | 1401 | * If the folio is pmd-mappable, it is accounted as a THP. |
9617d95e | 1402 | */ |
4d510f3d | 1403 | void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, |
15bde4ab | 1404 | unsigned long address, rmap_t flags) |
9617d95e | 1405 | { |
15bde4ab BS |
1406 | const int nr = folio_nr_pages(folio); |
1407 | const bool exclusive = flags & RMAP_EXCLUSIVE; | |
15c0536f | 1408 | int nr_pmdmapped = 0; |
d281ee61 | 1409 | |
a4ea1864 | 1410 | VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); |
15bde4ab | 1411 | VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio); |
372cbd4d RR |
1412 | VM_BUG_ON_VMA(address < vma->vm_start || |
1413 | address + (nr << PAGE_SHIFT) > vma->vm_end, vma); | |
9ae2feac | 1414 | |
9651fced JD |
1415 | /* |
1416 | * VM_DROPPABLE mappings don't swap; instead they're just dropped when | |
1417 | * under memory pressure. | |
1418 | */ | |
7a3fad30 | 1419 | if (!folio_test_swapbacked(folio) && !(vma->vm_flags & VM_DROPPABLE)) |
9ae2feac | 1420 | __folio_set_swapbacked(folio); |
15bde4ab | 1421 | __folio_set_anon(folio, vma, address, exclusive); |
d8dd5e97 | 1422 | |
372cbd4d | 1423 | if (likely(!folio_test_large(folio))) { |
d8dd5e97 | 1424 | /* increment count (starts at -1) */ |
4d510f3d | 1425 | atomic_set(&folio->_mapcount, 0); |
15bde4ab BS |
1426 | if (exclusive) |
1427 | SetPageAnonExclusive(&folio->page); | |
372cbd4d RR |
1428 | } else if (!folio_test_pmd_mappable(folio)) { |
1429 | int i; | |
1430 | ||
1431 | for (i = 0; i < nr; i++) { | |
1432 | struct page *page = folio_page(folio, i); | |
1433 | ||
1434 | /* increment count (starts at -1) */ | |
1435 | atomic_set(&page->_mapcount, 0); | |
15bde4ab BS |
1436 | if (exclusive) |
1437 | SetPageAnonExclusive(page); | |
372cbd4d RR |
1438 | } |
1439 | ||
05c5323b DH |
1440 | /* increment count (starts at -1) */ |
1441 | atomic_set(&folio->_large_mapcount, nr - 1); | |
372cbd4d | 1442 | atomic_set(&folio->_nr_pages_mapped, nr); |
d8dd5e97 | 1443 | } else { |
53f9263b | 1444 | /* increment count (starts at -1) */ |
4d510f3d | 1445 | atomic_set(&folio->_entire_mapcount, 0); |
05c5323b DH |
1446 | /* increment count (starts at -1) */ |
1447 | atomic_set(&folio->_large_mapcount, 0); | |
e78a13fd | 1448 | atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED); |
15bde4ab BS |
1449 | if (exclusive) |
1450 | SetPageAnonExclusive(&folio->page); | |
15c0536f | 1451 | nr_pmdmapped = nr; |
d281ee61 | 1452 | } |
d8dd5e97 | 1453 | |
15c0536f | 1454 | __folio_mod_stat(folio, nr, nr_pmdmapped); |
9617d95e NP |
1455 | } |
1456 | ||
68f03208 DH |
1457 | static __always_inline void __folio_add_file_rmap(struct folio *folio, |
1458 | struct page *page, int nr_pages, struct vm_area_struct *vma, | |
1459 | enum rmap_level level) | |
1da177e4 | 1460 | { |
96fd7495 | 1461 | int nr, nr_pmdmapped = 0; |
dd78fedd | 1462 | |
68f03208 | 1463 | VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); |
9bd3155e | 1464 | |
96fd7495 | 1465 | nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); |
15c0536f | 1466 | __folio_mod_stat(folio, nr, nr_pmdmapped); |
cea86fe2 | 1467 | |
84f0169e | 1468 | /* See comments in folio_add_anon_rmap_*() */ |
1acbc3f9 YF |
1469 | if (!folio_test_large(folio)) |
1470 | mlock_vma_folio(folio, vma); | |
1da177e4 LT |
1471 | } |
1472 | ||
68f03208 DH |
1473 | /** |
1474 | * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio | |
1475 | * @folio: The folio to add the mappings to | |
1476 | * @page: The first page to add | |
1477 | * @nr_pages: The number of pages that will be mapped using PTEs | |
1478 | * @vma: The vm area in which the mappings are added | |
1479 | * | |
1480 | * The page range of the folio is defined by [page, page + nr_pages) | |
1481 | * | |
1482 | * The caller needs to hold the page table lock. | |
1483 | */ | |
1484 | void folio_add_file_rmap_ptes(struct folio *folio, struct page *page, | |
1485 | int nr_pages, struct vm_area_struct *vma) | |
1486 | { | |
1487 | __folio_add_file_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE); | |
1488 | } | |
1489 | ||
1490 | /** | |
1491 | * folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio | |
1492 | * @folio: The folio to add the mapping to | |
1493 | * @page: The first page to add | |
1494 | * @vma: The vm area in which the mapping is added | |
1495 | * | |
1496 | * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) | |
1497 | * | |
1498 | * The caller needs to hold the page table lock. | |
1499 | */ | |
1500 | void folio_add_file_rmap_pmd(struct folio *folio, struct page *page, | |
1501 | struct vm_area_struct *vma) | |
1502 | { | |
1503 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
1504 | __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD); | |
1505 | #else | |
1506 | WARN_ON_ONCE(true); | |
1507 | #endif | |
1508 | } | |
1509 | ||
b06dc281 DH |
1510 | static __always_inline void __folio_remove_rmap(struct folio *folio, |
1511 | struct page *page, int nr_pages, struct vm_area_struct *vma, | |
1512 | enum rmap_level level) | |
1513 | { | |
62beb906 | 1514 | atomic_t *mapped = &folio->_nr_pages_mapped; |
b06dc281 | 1515 | int last, nr = 0, nr_pmdmapped = 0; |
7491f3f3 | 1516 | bool partially_mapped = false; |
dd78fedd | 1517 | |
b06dc281 DH |
1518 | __folio_rmap_sanity_checks(folio, page, nr_pages, level); |
1519 | ||
1520 | switch (level) { | |
1521 | case RMAP_LEVEL_PTE: | |
46d62de7 DH |
1522 | if (!folio_test_large(folio)) { |
1523 | nr = atomic_add_negative(-1, &page->_mapcount); | |
1524 | break; | |
1525 | } | |
1526 | ||
05c5323b | 1527 | atomic_sub(nr_pages, &folio->_large_mapcount); |
b06dc281 DH |
1528 | do { |
1529 | last = atomic_add_negative(-1, &page->_mapcount); | |
46d62de7 | 1530 | if (last) { |
b06dc281 | 1531 | last = atomic_dec_return_relaxed(mapped); |
46d62de7 DH |
1532 | if (last < ENTIRELY_MAPPED) |
1533 | nr++; | |
b06dc281 | 1534 | } |
b06dc281 | 1535 | } while (page++, --nr_pages > 0); |
7491f3f3 ZY |
1536 | |
1537 | partially_mapped = nr && atomic_read(mapped); | |
b06dc281 DH |
1538 | break; |
1539 | case RMAP_LEVEL_PMD: | |
05c5323b | 1540 | atomic_dec(&folio->_large_mapcount); |
62beb906 | 1541 | last = atomic_add_negative(-1, &folio->_entire_mapcount); |
9bd3155e | 1542 | if (last) { |
e78a13fd DH |
1543 | nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped); |
1544 | if (likely(nr < ENTIRELY_MAPPED)) { | |
62beb906 | 1545 | nr_pmdmapped = folio_nr_pages(folio); |
eec20426 | 1546 | nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); |
6287b7da HD |
1547 | /* Raced ahead of another remove and an add? */ |
1548 | if (unlikely(nr < 0)) | |
1549 | nr = 0; | |
1550 | } else { | |
e78a13fd | 1551 | /* An add of ENTIRELY_MAPPED raced ahead */ |
6287b7da HD |
1552 | nr = 0; |
1553 | } | |
9bd3155e | 1554 | } |
7491f3f3 ZY |
1555 | |
1556 | partially_mapped = nr < nr_pmdmapped; | |
b06dc281 | 1557 | break; |
dd78fedd | 1558 | } |
cb67f428 | 1559 | |
9bd3155e | 1560 | if (nr) { |
f1fe80d4 | 1561 | /* |
7dc7c5ef | 1562 | * Queue anon large folio for deferred split if at least one |
62beb906 MWO |
1563 | * page of the folio is unmapped and at least one page |
1564 | * is still mapped. | |
7491f3f3 ZY |
1565 | * |
1566 | * Check partially_mapped first to ensure it is a large folio. | |
f1fe80d4 | 1567 | */ |
7491f3f3 ZY |
1568 | if (folio_test_anon(folio) && partially_mapped && |
1569 | list_empty(&folio->_deferred_list)) | |
1570 | deferred_split_folio(folio); | |
53f9263b | 1571 | } |
15c0536f | 1572 | __folio_mod_stat(folio, -nr, -nr_pmdmapped); |
53f9263b | 1573 | |
b904dcfe | 1574 | /* |
672aa27d | 1575 | * It would be tidy to reset folio_test_anon mapping when fully |
84f0169e | 1576 | * unmapped, but that might overwrite a racing folio_add_anon_rmap_*() |
672aa27d MWO |
1577 | * which increments mapcount after us but sets mapping before us: |
1578 | * so leave the reset to free_pages_prepare, and remember that | |
1579 | * it's only reliable while mapped. | |
b904dcfe | 1580 | */ |
9bd3155e | 1581 | |
1acbc3f9 | 1582 | munlock_vma_folio(folio, vma); |
1da177e4 LT |
1583 | } |
1584 | ||
b06dc281 DH |
1585 | /** |
1586 | * folio_remove_rmap_ptes - remove PTE mappings from a page range of a folio | |
1587 | * @folio: The folio to remove the mappings from | |
1588 | * @page: The first page to remove | |
1589 | * @nr_pages: The number of pages that will be removed from the mapping | |
1590 | * @vma: The vm area from which the mappings are removed | |
1591 | * | |
1592 | * The page range of the folio is defined by [page, page + nr_pages) | |
1593 | * | |
1594 | * The caller needs to hold the page table lock. | |
1595 | */ | |
1596 | void folio_remove_rmap_ptes(struct folio *folio, struct page *page, | |
1597 | int nr_pages, struct vm_area_struct *vma) | |
1598 | { | |
1599 | __folio_remove_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE); | |
1600 | } | |
1601 | ||
1602 | /** | |
1603 | * folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio | |
1604 | * @folio: The folio to remove the mapping from | |
1605 | * @page: The first page to remove | |
1606 | * @vma: The vm area from which the mapping is removed | |
1607 | * | |
1608 | * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) | |
1609 | * | |
1610 | * The caller needs to hold the page table lock. | |
1611 | */ | |
1612 | void folio_remove_rmap_pmd(struct folio *folio, struct page *page, | |
1613 | struct vm_area_struct *vma) | |
1614 | { | |
1615 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
1616 | __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD); | |
1617 | #else | |
1618 | WARN_ON_ONCE(true); | |
1619 | #endif | |
1620 | } | |
1621 | ||
1da177e4 | 1622 | /* |
52629506 | 1623 | * @arg: enum ttu_flags will be passed to this argument |
1da177e4 | 1624 | */ |
2f031c6f | 1625 | static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, |
52629506 | 1626 | unsigned long address, void *arg) |
1da177e4 LT |
1627 | { |
1628 | struct mm_struct *mm = vma->vm_mm; | |
869f7ee6 | 1629 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); |
1da177e4 | 1630 | pte_t pteval; |
c7ab0d2f | 1631 | struct page *subpage; |
6c287605 | 1632 | bool anon_exclusive, ret = true; |
ac46d4f3 | 1633 | struct mmu_notifier_range range; |
4708f318 | 1634 | enum ttu_flags flags = (enum ttu_flags)(long)arg; |
c33c7948 | 1635 | unsigned long pfn; |
935d4f0c | 1636 | unsigned long hsz = 0; |
1da177e4 | 1637 | |
732ed558 HD |
1638 | /* |
1639 | * When racing against e.g. zap_pte_range() on another cpu, | |
ca1a0746 | 1640 | * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), |
1fb08ac6 | 1641 | * try_to_unmap() may return before page_mapped() has become false, |
732ed558 HD |
1642 | * if page table locking is skipped: use TTU_SYNC to wait for that. |
1643 | */ | |
1644 | if (flags & TTU_SYNC) | |
1645 | pvmw.flags = PVMW_SYNC; | |
1646 | ||
369ea824 | 1647 | /* |
017b1660 MK |
1648 | * For THP, we have to assume the worse case ie pmd for invalidation. |
1649 | * For hugetlb, it could be much worse if we need to do pud | |
1650 | * invalidation in the case of pmd sharing. | |
1651 | * | |
869f7ee6 MWO |
1652 | * Note that the folio can not be freed in this function as call of |
1653 | * try_to_unmap() must hold a reference on the folio. | |
369ea824 | 1654 | */ |
2aff7a47 | 1655 | range.end = vma_address_end(&pvmw); |
7d4a8be0 | 1656 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, |
494334e4 | 1657 | address, range.end); |
869f7ee6 | 1658 | if (folio_test_hugetlb(folio)) { |
017b1660 MK |
1659 | /* |
1660 | * If sharing is possible, start and end will be adjusted | |
1661 | * accordingly. | |
1662 | */ | |
ac46d4f3 JG |
1663 | adjust_range_if_pmd_sharing_possible(vma, &range.start, |
1664 | &range.end); | |
935d4f0c RR |
1665 | |
1666 | /* We need the huge page size for set_huge_pte_at() */ | |
1667 | hsz = huge_page_size(hstate_vma(vma)); | |
017b1660 | 1668 | } |
ac46d4f3 | 1669 | mmu_notifier_invalidate_range_start(&range); |
369ea824 | 1670 | |
c7ab0d2f | 1671 | while (page_vma_mapped_walk(&pvmw)) { |
c7ab0d2f | 1672 | /* |
869f7ee6 | 1673 | * If the folio is in an mlock()d vma, we must not swap it out. |
c7ab0d2f | 1674 | */ |
efdb6720 HD |
1675 | if (!(flags & TTU_IGNORE_MLOCK) && |
1676 | (vma->vm_flags & VM_LOCKED)) { | |
cea86fe2 | 1677 | /* Restore the mlock which got missed */ |
1acbc3f9 YF |
1678 | if (!folio_test_large(folio)) |
1679 | mlock_vma_folio(folio, vma); | |
26d21b18 | 1680 | goto walk_abort; |
b87537d9 | 1681 | } |
c7ab0d2f | 1682 | |
735ecdfa LY |
1683 | if (!pvmw.pte) { |
1684 | if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, | |
1685 | folio)) | |
1686 | goto walk_done; | |
1687 | ||
1688 | if (flags & TTU_SPLIT_HUGE_PMD) { | |
1689 | /* | |
1690 | * We temporarily have to drop the PTL and | |
1691 | * restart so we can process the PTE-mapped THP. | |
1692 | */ | |
1693 | split_huge_pmd_locked(vma, pvmw.address, | |
1694 | pvmw.pmd, false, folio); | |
1695 | flags &= ~TTU_SPLIT_HUGE_PMD; | |
1696 | page_vma_mapped_walk_restart(&pvmw); | |
1697 | continue; | |
1698 | } | |
29e847d2 LY |
1699 | } |
1700 | ||
1701 | /* Unexpected PMD-mapped THP? */ | |
1702 | VM_BUG_ON_FOLIO(!pvmw.pte, folio); | |
1703 | ||
c33c7948 RR |
1704 | pfn = pte_pfn(ptep_get(pvmw.pte)); |
1705 | subpage = folio_page(folio, pfn - folio_pfn(folio)); | |
785373b4 | 1706 | address = pvmw.address; |
6c287605 DH |
1707 | anon_exclusive = folio_test_anon(folio) && |
1708 | PageAnonExclusive(subpage); | |
785373b4 | 1709 | |
dfc7ab57 | 1710 | if (folio_test_hugetlb(folio)) { |
0506c31d BW |
1711 | bool anon = folio_test_anon(folio); |
1712 | ||
a00a8759 BW |
1713 | /* |
1714 | * The try_to_unmap() is only passed a hugetlb page | |
1715 | * in the case where the hugetlb page is poisoned. | |
1716 | */ | |
1717 | VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage); | |
54205e9c BW |
1718 | /* |
1719 | * huge_pmd_unshare may unmap an entire PMD page. | |
1720 | * There is no way of knowing exactly which PMDs may | |
1721 | * be cached for this mm, so we must flush them all. | |
1722 | * start/end were already adjusted above to cover this | |
1723 | * range. | |
1724 | */ | |
1725 | flush_cache_range(vma, range.start, range.end); | |
1726 | ||
0506c31d BW |
1727 | /* |
1728 | * To call huge_pmd_unshare, i_mmap_rwsem must be | |
1729 | * held in write mode. Caller needs to explicitly | |
1730 | * do this outside rmap routines. | |
40549ba8 MK |
1731 | * |
1732 | * We also must hold hugetlb vma_lock in write mode. | |
1733 | * Lock order dictates acquiring vma_lock BEFORE | |
1734 | * i_mmap_rwsem. We can only try lock here and fail | |
1735 | * if unsuccessful. | |
0506c31d | 1736 | */ |
40549ba8 MK |
1737 | if (!anon) { |
1738 | VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); | |
26d21b18 LY |
1739 | if (!hugetlb_vma_trylock_write(vma)) |
1740 | goto walk_abort; | |
40549ba8 MK |
1741 | if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { |
1742 | hugetlb_vma_unlock_write(vma); | |
1743 | flush_tlb_range(vma, | |
1744 | range.start, range.end); | |
40549ba8 MK |
1745 | /* |
1746 | * The ref count of the PMD page was | |
1747 | * dropped which is part of the way map | |
1748 | * counting is done for shared PMDs. | |
1749 | * Return 'true' here. When there is | |
1750 | * no other sharing, huge_pmd_unshare | |
1751 | * returns false and we will unmap the | |
1752 | * actual page and drop map count | |
1753 | * to zero. | |
1754 | */ | |
26d21b18 | 1755 | goto walk_done; |
40549ba8 MK |
1756 | } |
1757 | hugetlb_vma_unlock_write(vma); | |
017b1660 | 1758 | } |
a00a8759 | 1759 | pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); |
54205e9c | 1760 | } else { |
c33c7948 | 1761 | flush_cache_page(vma, address, pfn); |
088b8aa5 DH |
1762 | /* Nuke the page table entry. */ |
1763 | if (should_defer_flush(mm, flags)) { | |
a00a8759 BW |
1764 | /* |
1765 | * We clear the PTE but do not flush so potentially | |
1766 | * a remote CPU could still be writing to the folio. | |
1767 | * If the entry was previously clean then the | |
1768 | * architecture must guarantee that a clear->dirty | |
1769 | * transition on a cached TLB entry is written through | |
1770 | * and traps if the PTE is unmapped. | |
1771 | */ | |
1772 | pteval = ptep_get_and_clear(mm, address, pvmw.pte); | |
c7ab0d2f | 1773 | |
f73419bb | 1774 | set_tlb_ubc_flush_pending(mm, pteval, address); |
a00a8759 BW |
1775 | } else { |
1776 | pteval = ptep_clear_flush(vma, address, pvmw.pte); | |
1777 | } | |
c7ab0d2f | 1778 | } |
72b252ae | 1779 | |
999dad82 PX |
1780 | /* |
1781 | * Now the pte is cleared. If this pte was uffd-wp armed, | |
1782 | * we may want to replace a none pte with a marker pte if | |
1783 | * it's file-backed, so we don't lose the tracking info. | |
1784 | */ | |
1785 | pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval); | |
1786 | ||
869f7ee6 | 1787 | /* Set the dirty flag on the folio now the pte is gone. */ |
c7ab0d2f | 1788 | if (pte_dirty(pteval)) |
869f7ee6 | 1789 | folio_mark_dirty(folio); |
1da177e4 | 1790 | |
c7ab0d2f KS |
1791 | /* Update high watermark before we lower rss */ |
1792 | update_hiwater_rss(mm); | |
1da177e4 | 1793 | |
6da6b1d4 | 1794 | if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) { |
5fd27b8e | 1795 | pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); |
869f7ee6 MWO |
1796 | if (folio_test_hugetlb(folio)) { |
1797 | hugetlb_count_sub(folio_nr_pages(folio), mm); | |
935d4f0c RR |
1798 | set_huge_pte_at(mm, address, pvmw.pte, pteval, |
1799 | hsz); | |
c7ab0d2f | 1800 | } else { |
a23f517b | 1801 | dec_mm_counter(mm, mm_counter(folio)); |
785373b4 | 1802 | set_pte_at(mm, address, pvmw.pte, pteval); |
c7ab0d2f | 1803 | } |
365e9c87 | 1804 | |
bce73e48 | 1805 | } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { |
c7ab0d2f KS |
1806 | /* |
1807 | * The guest indicated that the page content is of no | |
1808 | * interest anymore. Simply discard the pte, vmscan | |
1809 | * will take care of the rest. | |
bce73e48 CB |
1810 | * A future reference will then fault in a new zero |
1811 | * page. When userfaultfd is active, we must not drop | |
1812 | * this page though, as its main user (postcopy | |
1813 | * migration) will not expect userfaults on already | |
1814 | * copied pages. | |
c7ab0d2f | 1815 | */ |
a23f517b | 1816 | dec_mm_counter(mm, mm_counter(folio)); |
869f7ee6 | 1817 | } else if (folio_test_anon(folio)) { |
cfeed8ff | 1818 | swp_entry_t entry = page_swap_entry(subpage); |
c7ab0d2f KS |
1819 | pte_t swp_pte; |
1820 | /* | |
1821 | * Store the swap location in the pte. | |
1822 | * See handle_pte_fault() ... | |
1823 | */ | |
869f7ee6 MWO |
1824 | if (unlikely(folio_test_swapbacked(folio) != |
1825 | folio_test_swapcache(folio))) { | |
eb94a878 | 1826 | WARN_ON_ONCE(1); |
26d21b18 | 1827 | goto walk_abort; |
eb94a878 | 1828 | } |
c7ab0d2f | 1829 | |
802a3a92 | 1830 | /* MADV_FREE page check */ |
869f7ee6 | 1831 | if (!folio_test_swapbacked(folio)) { |
6c8e2a25 MFO |
1832 | int ref_count, map_count; |
1833 | ||
1834 | /* | |
1835 | * Synchronize with gup_pte_range(): | |
1836 | * - clear PTE; barrier; read refcount | |
1837 | * - inc refcount; barrier; read PTE | |
1838 | */ | |
1839 | smp_mb(); | |
1840 | ||
1841 | ref_count = folio_ref_count(folio); | |
1842 | map_count = folio_mapcount(folio); | |
1843 | ||
1844 | /* | |
1845 | * Order reads for page refcount and dirty flag | |
1846 | * (see comments in __remove_mapping()). | |
1847 | */ | |
1848 | smp_rmb(); | |
1849 | ||
1850 | /* | |
1851 | * The only page refs must be one from isolation | |
1852 | * plus the rmap(s) (dropped by discard:). | |
1853 | */ | |
1854 | if (ref_count == 1 + map_count && | |
9651fced JD |
1855 | (!folio_test_dirty(folio) || |
1856 | /* | |
1857 | * Unlike MADV_FREE mappings, VM_DROPPABLE | |
1858 | * ones can be dropped even if they've | |
1859 | * been dirtied. | |
1860 | */ | |
1861 | (vma->vm_flags & VM_DROPPABLE))) { | |
802a3a92 SL |
1862 | dec_mm_counter(mm, MM_ANONPAGES); |
1863 | goto discard; | |
1864 | } | |
1865 | ||
1866 | /* | |
869f7ee6 | 1867 | * If the folio was redirtied, it cannot be |
802a3a92 SL |
1868 | * discarded. Remap the page to page table. |
1869 | */ | |
785373b4 | 1870 | set_pte_at(mm, address, pvmw.pte, pteval); |
9651fced JD |
1871 | /* |
1872 | * Unlike MADV_FREE mappings, VM_DROPPABLE ones | |
1873 | * never get swap backed on failure to drop. | |
1874 | */ | |
1875 | if (!(vma->vm_flags & VM_DROPPABLE)) | |
1876 | folio_set_swapbacked(folio); | |
26d21b18 | 1877 | goto walk_abort; |
c7ab0d2f | 1878 | } |
854e9ed0 | 1879 | |
c7ab0d2f | 1880 | if (swap_duplicate(entry) < 0) { |
785373b4 | 1881 | set_pte_at(mm, address, pvmw.pte, pteval); |
26d21b18 | 1882 | goto walk_abort; |
c7ab0d2f | 1883 | } |
ca827d55 | 1884 | if (arch_unmap_one(mm, vma, address, pteval) < 0) { |
322842ea | 1885 | swap_free(entry); |
ca827d55 | 1886 | set_pte_at(mm, address, pvmw.pte, pteval); |
26d21b18 | 1887 | goto walk_abort; |
ca827d55 | 1888 | } |
088b8aa5 | 1889 | |
e3b4b137 | 1890 | /* See folio_try_share_anon_rmap(): clear PTE first. */ |
6c287605 | 1891 | if (anon_exclusive && |
e3b4b137 | 1892 | folio_try_share_anon_rmap_pte(folio, subpage)) { |
6c287605 DH |
1893 | swap_free(entry); |
1894 | set_pte_at(mm, address, pvmw.pte, pteval); | |
26d21b18 | 1895 | goto walk_abort; |
6c287605 | 1896 | } |
c7ab0d2f KS |
1897 | if (list_empty(&mm->mmlist)) { |
1898 | spin_lock(&mmlist_lock); | |
1899 | if (list_empty(&mm->mmlist)) | |
1900 | list_add(&mm->mmlist, &init_mm.mmlist); | |
1901 | spin_unlock(&mmlist_lock); | |
1902 | } | |
854e9ed0 | 1903 | dec_mm_counter(mm, MM_ANONPAGES); |
c7ab0d2f KS |
1904 | inc_mm_counter(mm, MM_SWAPENTS); |
1905 | swp_pte = swp_entry_to_pte(entry); | |
1493a191 DH |
1906 | if (anon_exclusive) |
1907 | swp_pte = pte_swp_mkexclusive(swp_pte); | |
c7ab0d2f KS |
1908 | if (pte_soft_dirty(pteval)) |
1909 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | |
f45ec5ff PX |
1910 | if (pte_uffd_wp(pteval)) |
1911 | swp_pte = pte_swp_mkuffd_wp(swp_pte); | |
785373b4 | 1912 | set_pte_at(mm, address, pvmw.pte, swp_pte); |
0f10851e JG |
1913 | } else { |
1914 | /* | |
869f7ee6 MWO |
1915 | * This is a locked file-backed folio, |
1916 | * so it cannot be removed from the page | |
1917 | * cache and replaced by a new folio before | |
1918 | * mmu_notifier_invalidate_range_end, so no | |
1919 | * concurrent thread might update its page table | |
1920 | * to point at a new folio while a device is | |
1921 | * still using this folio. | |
0f10851e | 1922 | * |
ee65728e | 1923 | * See Documentation/mm/mmu_notifier.rst |
0f10851e | 1924 | */ |
6b27cc6c | 1925 | dec_mm_counter(mm, mm_counter_file(folio)); |
0f10851e | 1926 | } |
854e9ed0 | 1927 | discard: |
e135826b DH |
1928 | if (unlikely(folio_test_hugetlb(folio))) |
1929 | hugetlb_remove_rmap(folio); | |
1930 | else | |
ca1a0746 | 1931 | folio_remove_rmap_pte(folio, subpage, vma); |
b7435507 | 1932 | if (vma->vm_flags & VM_LOCKED) |
96f97c43 | 1933 | mlock_drain_local(); |
869f7ee6 | 1934 | folio_put(folio); |
26d21b18 LY |
1935 | continue; |
1936 | walk_abort: | |
1937 | ret = false; | |
1938 | walk_done: | |
1939 | page_vma_mapped_walk_done(&pvmw); | |
1940 | break; | |
c7ab0d2f | 1941 | } |
369ea824 | 1942 | |
ac46d4f3 | 1943 | mmu_notifier_invalidate_range_end(&range); |
369ea824 | 1944 | |
caed0f48 | 1945 | return ret; |
1da177e4 LT |
1946 | } |
1947 | ||
52629506 JK |
1948 | static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) |
1949 | { | |
222100ee | 1950 | return vma_is_temporary_stack(vma); |
52629506 JK |
1951 | } |
1952 | ||
f3ad032c | 1953 | static int folio_not_mapped(struct folio *folio) |
52629506 | 1954 | { |
2f031c6f | 1955 | return !folio_mapped(folio); |
2a52bcbc | 1956 | } |
52629506 | 1957 | |
1da177e4 | 1958 | /** |
869f7ee6 MWO |
1959 | * try_to_unmap - Try to remove all page table mappings to a folio. |
1960 | * @folio: The folio to unmap. | |
14fa31b8 | 1961 | * @flags: action and flags |
1da177e4 LT |
1962 | * |
1963 | * Tries to remove all the page table entries which are mapping this | |
869f7ee6 MWO |
1964 | * folio. It is the caller's responsibility to check if the folio is |
1965 | * still mapped if needed (use TTU_SYNC to prevent accounting races). | |
1da177e4 | 1966 | * |
869f7ee6 | 1967 | * Context: Caller must hold the folio lock. |
1da177e4 | 1968 | */ |
869f7ee6 | 1969 | void try_to_unmap(struct folio *folio, enum ttu_flags flags) |
1da177e4 | 1970 | { |
52629506 JK |
1971 | struct rmap_walk_control rwc = { |
1972 | .rmap_one = try_to_unmap_one, | |
802a3a92 | 1973 | .arg = (void *)flags, |
f3ad032c | 1974 | .done = folio_not_mapped, |
2f031c6f | 1975 | .anon_lock = folio_lock_anon_vma_read, |
52629506 | 1976 | }; |
1da177e4 | 1977 | |
a98a2f0c | 1978 | if (flags & TTU_RMAP_LOCKED) |
2f031c6f | 1979 | rmap_walk_locked(folio, &rwc); |
a98a2f0c | 1980 | else |
2f031c6f | 1981 | rmap_walk(folio, &rwc); |
a98a2f0c AP |
1982 | } |
1983 | ||
1984 | /* | |
1985 | * @arg: enum ttu_flags will be passed to this argument. | |
1986 | * | |
1987 | * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs | |
64b586d1 | 1988 | * containing migration entries. |
a98a2f0c | 1989 | */ |
2f031c6f | 1990 | static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, |
a98a2f0c AP |
1991 | unsigned long address, void *arg) |
1992 | { | |
1993 | struct mm_struct *mm = vma->vm_mm; | |
4b8554c5 | 1994 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); |
a98a2f0c AP |
1995 | pte_t pteval; |
1996 | struct page *subpage; | |
6c287605 | 1997 | bool anon_exclusive, ret = true; |
a98a2f0c AP |
1998 | struct mmu_notifier_range range; |
1999 | enum ttu_flags flags = (enum ttu_flags)(long)arg; | |
c33c7948 | 2000 | unsigned long pfn; |
935d4f0c | 2001 | unsigned long hsz = 0; |
a98a2f0c | 2002 | |
a98a2f0c AP |
2003 | /* |
2004 | * When racing against e.g. zap_pte_range() on another cpu, | |
ca1a0746 | 2005 | * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), |
a98a2f0c AP |
2006 | * try_to_migrate() may return before page_mapped() has become false, |
2007 | * if page table locking is skipped: use TTU_SYNC to wait for that. | |
2008 | */ | |
2009 | if (flags & TTU_SYNC) | |
2010 | pvmw.flags = PVMW_SYNC; | |
2011 | ||
2012 | /* | |
2013 | * unmap_page() in mm/huge_memory.c is the only user of migration with | |
2014 | * TTU_SPLIT_HUGE_PMD and it wants to freeze. | |
2015 | */ | |
2016 | if (flags & TTU_SPLIT_HUGE_PMD) | |
af28a988 | 2017 | split_huge_pmd_address(vma, address, true, folio); |
a98a2f0c AP |
2018 | |
2019 | /* | |
2020 | * For THP, we have to assume the worse case ie pmd for invalidation. | |
2021 | * For hugetlb, it could be much worse if we need to do pud | |
2022 | * invalidation in the case of pmd sharing. | |
2023 | * | |
2024 | * Note that the page can not be free in this function as call of | |
2025 | * try_to_unmap() must hold a reference on the page. | |
2026 | */ | |
2aff7a47 | 2027 | range.end = vma_address_end(&pvmw); |
7d4a8be0 | 2028 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, |
a98a2f0c | 2029 | address, range.end); |
4b8554c5 | 2030 | if (folio_test_hugetlb(folio)) { |
a98a2f0c AP |
2031 | /* |
2032 | * If sharing is possible, start and end will be adjusted | |
2033 | * accordingly. | |
2034 | */ | |
2035 | adjust_range_if_pmd_sharing_possible(vma, &range.start, | |
2036 | &range.end); | |
935d4f0c RR |
2037 | |
2038 | /* We need the huge page size for set_huge_pte_at() */ | |
2039 | hsz = huge_page_size(hstate_vma(vma)); | |
a98a2f0c AP |
2040 | } |
2041 | mmu_notifier_invalidate_range_start(&range); | |
2042 | ||
2043 | while (page_vma_mapped_walk(&pvmw)) { | |
2044 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION | |
2045 | /* PMD-mapped THP migration entry */ | |
2046 | if (!pvmw.pte) { | |
4b8554c5 MWO |
2047 | subpage = folio_page(folio, |
2048 | pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); | |
2049 | VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || | |
2050 | !folio_test_pmd_mappable(folio), folio); | |
a98a2f0c | 2051 | |
7f5abe60 DH |
2052 | if (set_pmd_migration_entry(&pvmw, subpage)) { |
2053 | ret = false; | |
2054 | page_vma_mapped_walk_done(&pvmw); | |
2055 | break; | |
2056 | } | |
a98a2f0c AP |
2057 | continue; |
2058 | } | |
2059 | #endif | |
2060 | ||
2061 | /* Unexpected PMD-mapped THP? */ | |
4b8554c5 | 2062 | VM_BUG_ON_FOLIO(!pvmw.pte, folio); |
a98a2f0c | 2063 | |
c33c7948 RR |
2064 | pfn = pte_pfn(ptep_get(pvmw.pte)); |
2065 | ||
1118234e DH |
2066 | if (folio_is_zone_device(folio)) { |
2067 | /* | |
2068 | * Our PTE is a non-present device exclusive entry and | |
2069 | * calculating the subpage as for the common case would | |
2070 | * result in an invalid pointer. | |
2071 | * | |
2072 | * Since only PAGE_SIZE pages can currently be | |
2073 | * migrated, just set it to page. This will need to be | |
2074 | * changed when hugepage migrations to device private | |
2075 | * memory are supported. | |
2076 | */ | |
2077 | VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio); | |
2078 | subpage = &folio->page; | |
2079 | } else { | |
c33c7948 | 2080 | subpage = folio_page(folio, pfn - folio_pfn(folio)); |
1118234e | 2081 | } |
a98a2f0c | 2082 | address = pvmw.address; |
6c287605 DH |
2083 | anon_exclusive = folio_test_anon(folio) && |
2084 | PageAnonExclusive(subpage); | |
a98a2f0c | 2085 | |
dfc7ab57 | 2086 | if (folio_test_hugetlb(folio)) { |
0506c31d BW |
2087 | bool anon = folio_test_anon(folio); |
2088 | ||
54205e9c BW |
2089 | /* |
2090 | * huge_pmd_unshare may unmap an entire PMD page. | |
2091 | * There is no way of knowing exactly which PMDs may | |
2092 | * be cached for this mm, so we must flush them all. | |
2093 | * start/end were already adjusted above to cover this | |
2094 | * range. | |
2095 | */ | |
2096 | flush_cache_range(vma, range.start, range.end); | |
2097 | ||
0506c31d BW |
2098 | /* |
2099 | * To call huge_pmd_unshare, i_mmap_rwsem must be | |
2100 | * held in write mode. Caller needs to explicitly | |
2101 | * do this outside rmap routines. | |
40549ba8 MK |
2102 | * |
2103 | * We also must hold hugetlb vma_lock in write mode. | |
2104 | * Lock order dictates acquiring vma_lock BEFORE | |
2105 | * i_mmap_rwsem. We can only try lock here and | |
2106 | * fail if unsuccessful. | |
0506c31d | 2107 | */ |
40549ba8 MK |
2108 | if (!anon) { |
2109 | VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); | |
2110 | if (!hugetlb_vma_trylock_write(vma)) { | |
2111 | page_vma_mapped_walk_done(&pvmw); | |
2112 | ret = false; | |
2113 | break; | |
2114 | } | |
2115 | if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { | |
2116 | hugetlb_vma_unlock_write(vma); | |
2117 | flush_tlb_range(vma, | |
2118 | range.start, range.end); | |
40549ba8 MK |
2119 | |
2120 | /* | |
2121 | * The ref count of the PMD page was | |
2122 | * dropped which is part of the way map | |
2123 | * counting is done for shared PMDs. | |
2124 | * Return 'true' here. When there is | |
2125 | * no other sharing, huge_pmd_unshare | |
2126 | * returns false and we will unmap the | |
2127 | * actual page and drop map count | |
2128 | * to zero. | |
2129 | */ | |
2130 | page_vma_mapped_walk_done(&pvmw); | |
2131 | break; | |
2132 | } | |
2133 | hugetlb_vma_unlock_write(vma); | |
a98a2f0c | 2134 | } |
5d4af619 BW |
2135 | /* Nuke the hugetlb page table entry */ |
2136 | pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); | |
54205e9c | 2137 | } else { |
c33c7948 | 2138 | flush_cache_page(vma, address, pfn); |
5d4af619 | 2139 | /* Nuke the page table entry. */ |
7e12beb8 YH |
2140 | if (should_defer_flush(mm, flags)) { |
2141 | /* | |
2142 | * We clear the PTE but do not flush so potentially | |
2143 | * a remote CPU could still be writing to the folio. | |
2144 | * If the entry was previously clean then the | |
2145 | * architecture must guarantee that a clear->dirty | |
2146 | * transition on a cached TLB entry is written through | |
2147 | * and traps if the PTE is unmapped. | |
2148 | */ | |
2149 | pteval = ptep_get_and_clear(mm, address, pvmw.pte); | |
2150 | ||
f73419bb | 2151 | set_tlb_ubc_flush_pending(mm, pteval, address); |
7e12beb8 YH |
2152 | } else { |
2153 | pteval = ptep_clear_flush(vma, address, pvmw.pte); | |
2154 | } | |
a98a2f0c AP |
2155 | } |
2156 | ||
4b8554c5 | 2157 | /* Set the dirty flag on the folio now the pte is gone. */ |
a98a2f0c | 2158 | if (pte_dirty(pteval)) |
4b8554c5 | 2159 | folio_mark_dirty(folio); |
a98a2f0c AP |
2160 | |
2161 | /* Update high watermark before we lower rss */ | |
2162 | update_hiwater_rss(mm); | |
2163 | ||
f25cbb7a | 2164 | if (folio_is_device_private(folio)) { |
4b8554c5 | 2165 | unsigned long pfn = folio_pfn(folio); |
a98a2f0c AP |
2166 | swp_entry_t entry; |
2167 | pte_t swp_pte; | |
2168 | ||
6c287605 | 2169 | if (anon_exclusive) |
e3b4b137 DH |
2170 | WARN_ON_ONCE(folio_try_share_anon_rmap_pte(folio, |
2171 | subpage)); | |
6c287605 | 2172 | |
a98a2f0c AP |
2173 | /* |
2174 | * Store the pfn of the page in a special migration | |
2175 | * pte. do_swap_page() will wait until the migration | |
2176 | * pte is removed and then restart fault handling. | |
2177 | */ | |
3d88705c AP |
2178 | entry = pte_to_swp_entry(pteval); |
2179 | if (is_writable_device_private_entry(entry)) | |
2180 | entry = make_writable_migration_entry(pfn); | |
6c287605 DH |
2181 | else if (anon_exclusive) |
2182 | entry = make_readable_exclusive_migration_entry(pfn); | |
3d88705c AP |
2183 | else |
2184 | entry = make_readable_migration_entry(pfn); | |
a98a2f0c AP |
2185 | swp_pte = swp_entry_to_pte(entry); |
2186 | ||
2187 | /* | |
2188 | * pteval maps a zone device page and is therefore | |
2189 | * a swap pte. | |
2190 | */ | |
2191 | if (pte_swp_soft_dirty(pteval)) | |
2192 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | |
2193 | if (pte_swp_uffd_wp(pteval)) | |
2194 | swp_pte = pte_swp_mkuffd_wp(swp_pte); | |
2195 | set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); | |
4cc79b33 | 2196 | trace_set_migration_pte(pvmw.address, pte_val(swp_pte), |
059ab7be | 2197 | folio_order(folio)); |
a98a2f0c AP |
2198 | /* |
2199 | * No need to invalidate here it will synchronize on | |
2200 | * against the special swap migration pte. | |
a98a2f0c | 2201 | */ |
da358d5c | 2202 | } else if (PageHWPoison(subpage)) { |
a98a2f0c | 2203 | pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); |
4b8554c5 MWO |
2204 | if (folio_test_hugetlb(folio)) { |
2205 | hugetlb_count_sub(folio_nr_pages(folio), mm); | |
935d4f0c RR |
2206 | set_huge_pte_at(mm, address, pvmw.pte, pteval, |
2207 | hsz); | |
a98a2f0c | 2208 | } else { |
a23f517b | 2209 | dec_mm_counter(mm, mm_counter(folio)); |
a98a2f0c AP |
2210 | set_pte_at(mm, address, pvmw.pte, pteval); |
2211 | } | |
2212 | ||
2213 | } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { | |
2214 | /* | |
2215 | * The guest indicated that the page content is of no | |
2216 | * interest anymore. Simply discard the pte, vmscan | |
2217 | * will take care of the rest. | |
2218 | * A future reference will then fault in a new zero | |
2219 | * page. When userfaultfd is active, we must not drop | |
2220 | * this page though, as its main user (postcopy | |
2221 | * migration) will not expect userfaults on already | |
2222 | * copied pages. | |
2223 | */ | |
a23f517b | 2224 | dec_mm_counter(mm, mm_counter(folio)); |
a98a2f0c AP |
2225 | } else { |
2226 | swp_entry_t entry; | |
2227 | pte_t swp_pte; | |
2228 | ||
2229 | if (arch_unmap_one(mm, vma, address, pteval) < 0) { | |
5d4af619 | 2230 | if (folio_test_hugetlb(folio)) |
935d4f0c RR |
2231 | set_huge_pte_at(mm, address, pvmw.pte, |
2232 | pteval, hsz); | |
5d4af619 BW |
2233 | else |
2234 | set_pte_at(mm, address, pvmw.pte, pteval); | |
a98a2f0c AP |
2235 | ret = false; |
2236 | page_vma_mapped_walk_done(&pvmw); | |
2237 | break; | |
2238 | } | |
6c287605 DH |
2239 | VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && |
2240 | !anon_exclusive, subpage); | |
088b8aa5 | 2241 | |
e3b4b137 | 2242 | /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ |
0c2ec32b DH |
2243 | if (folio_test_hugetlb(folio)) { |
2244 | if (anon_exclusive && | |
2245 | hugetlb_try_share_anon_rmap(folio)) { | |
935d4f0c RR |
2246 | set_huge_pte_at(mm, address, pvmw.pte, |
2247 | pteval, hsz); | |
0c2ec32b DH |
2248 | ret = false; |
2249 | page_vma_mapped_walk_done(&pvmw); | |
2250 | break; | |
2251 | } | |
2252 | } else if (anon_exclusive && | |
e3b4b137 | 2253 | folio_try_share_anon_rmap_pte(folio, subpage)) { |
0c2ec32b | 2254 | set_pte_at(mm, address, pvmw.pte, pteval); |
6c287605 DH |
2255 | ret = false; |
2256 | page_vma_mapped_walk_done(&pvmw); | |
2257 | break; | |
2258 | } | |
a98a2f0c AP |
2259 | |
2260 | /* | |
2261 | * Store the pfn of the page in a special migration | |
2262 | * pte. do_swap_page() will wait until the migration | |
2263 | * pte is removed and then restart fault handling. | |
2264 | */ | |
2265 | if (pte_write(pteval)) | |
2266 | entry = make_writable_migration_entry( | |
2267 | page_to_pfn(subpage)); | |
6c287605 DH |
2268 | else if (anon_exclusive) |
2269 | entry = make_readable_exclusive_migration_entry( | |
2270 | page_to_pfn(subpage)); | |
a98a2f0c AP |
2271 | else |
2272 | entry = make_readable_migration_entry( | |
2273 | page_to_pfn(subpage)); | |
2e346877 PX |
2274 | if (pte_young(pteval)) |
2275 | entry = make_migration_entry_young(entry); | |
2276 | if (pte_dirty(pteval)) | |
2277 | entry = make_migration_entry_dirty(entry); | |
a98a2f0c AP |
2278 | swp_pte = swp_entry_to_pte(entry); |
2279 | if (pte_soft_dirty(pteval)) | |
2280 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | |
2281 | if (pte_uffd_wp(pteval)) | |
2282 | swp_pte = pte_swp_mkuffd_wp(swp_pte); | |
5d4af619 | 2283 | if (folio_test_hugetlb(folio)) |
935d4f0c RR |
2284 | set_huge_pte_at(mm, address, pvmw.pte, swp_pte, |
2285 | hsz); | |
5d4af619 BW |
2286 | else |
2287 | set_pte_at(mm, address, pvmw.pte, swp_pte); | |
4cc79b33 | 2288 | trace_set_migration_pte(address, pte_val(swp_pte), |
059ab7be | 2289 | folio_order(folio)); |
a98a2f0c AP |
2290 | /* |
2291 | * No need to invalidate here it will synchronize on | |
2292 | * against the special swap migration pte. | |
2293 | */ | |
2294 | } | |
2295 | ||
e135826b DH |
2296 | if (unlikely(folio_test_hugetlb(folio))) |
2297 | hugetlb_remove_rmap(folio); | |
2298 | else | |
ca1a0746 | 2299 | folio_remove_rmap_pte(folio, subpage, vma); |
b7435507 | 2300 | if (vma->vm_flags & VM_LOCKED) |
96f97c43 | 2301 | mlock_drain_local(); |
4b8554c5 | 2302 | folio_put(folio); |
a98a2f0c AP |
2303 | } |
2304 | ||
2305 | mmu_notifier_invalidate_range_end(&range); | |
2306 | ||
2307 | return ret; | |
2308 | } | |
2309 | ||
2310 | /** | |
2311 | * try_to_migrate - try to replace all page table mappings with swap entries | |
4b8554c5 | 2312 | * @folio: the folio to replace page table entries for |
a98a2f0c AP |
2313 | * @flags: action and flags |
2314 | * | |
4b8554c5 MWO |
2315 | * Tries to remove all the page table entries which are mapping this folio and |
2316 | * replace them with special swap entries. Caller must hold the folio lock. | |
a98a2f0c | 2317 | */ |
4b8554c5 | 2318 | void try_to_migrate(struct folio *folio, enum ttu_flags flags) |
a98a2f0c AP |
2319 | { |
2320 | struct rmap_walk_control rwc = { | |
2321 | .rmap_one = try_to_migrate_one, | |
2322 | .arg = (void *)flags, | |
f3ad032c | 2323 | .done = folio_not_mapped, |
2f031c6f | 2324 | .anon_lock = folio_lock_anon_vma_read, |
a98a2f0c AP |
2325 | }; |
2326 | ||
2327 | /* | |
2328 | * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and | |
7e12beb8 | 2329 | * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags. |
a98a2f0c AP |
2330 | */ |
2331 | if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | | |
7e12beb8 | 2332 | TTU_SYNC | TTU_BATCH_FLUSH))) |
a98a2f0c AP |
2333 | return; |
2334 | ||
f25cbb7a AS |
2335 | if (folio_is_zone_device(folio) && |
2336 | (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) | |
6c855fce HD |
2337 | return; |
2338 | ||
52629506 JK |
2339 | /* |
2340 | * During exec, a temporary VMA is setup and later moved. | |
2341 | * The VMA is moved under the anon_vma lock but not the | |
2342 | * page tables leading to a race where migration cannot | |
2343 | * find the migration ptes. Rather than increasing the | |
2344 | * locking requirements of exec(), migration skips | |
2345 | * temporary VMAs until after exec() completes. | |
2346 | */ | |
4b8554c5 | 2347 | if (!folio_test_ksm(folio) && folio_test_anon(folio)) |
52629506 JK |
2348 | rwc.invalid_vma = invalid_migration_vma; |
2349 | ||
2a52bcbc | 2350 | if (flags & TTU_RMAP_LOCKED) |
2f031c6f | 2351 | rmap_walk_locked(folio, &rwc); |
2a52bcbc | 2352 | else |
2f031c6f | 2353 | rmap_walk(folio, &rwc); |
b291f000 | 2354 | } |
e9995ef9 | 2355 | |
b756a3b5 AP |
2356 | #ifdef CONFIG_DEVICE_PRIVATE |
2357 | struct make_exclusive_args { | |
2358 | struct mm_struct *mm; | |
2359 | unsigned long address; | |
2360 | void *owner; | |
2361 | bool valid; | |
2362 | }; | |
2363 | ||
2f031c6f | 2364 | static bool page_make_device_exclusive_one(struct folio *folio, |
b756a3b5 AP |
2365 | struct vm_area_struct *vma, unsigned long address, void *priv) |
2366 | { | |
2367 | struct mm_struct *mm = vma->vm_mm; | |
0d251485 | 2368 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); |
b756a3b5 AP |
2369 | struct make_exclusive_args *args = priv; |
2370 | pte_t pteval; | |
2371 | struct page *subpage; | |
2372 | bool ret = true; | |
2373 | struct mmu_notifier_range range; | |
2374 | swp_entry_t entry; | |
2375 | pte_t swp_pte; | |
c33c7948 | 2376 | pte_t ptent; |
b756a3b5 | 2377 | |
7d4a8be0 | 2378 | mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, |
b756a3b5 | 2379 | vma->vm_mm, address, min(vma->vm_end, |
0d251485 MWO |
2380 | address + folio_size(folio)), |
2381 | args->owner); | |
b756a3b5 AP |
2382 | mmu_notifier_invalidate_range_start(&range); |
2383 | ||
2384 | while (page_vma_mapped_walk(&pvmw)) { | |
2385 | /* Unexpected PMD-mapped THP? */ | |
0d251485 | 2386 | VM_BUG_ON_FOLIO(!pvmw.pte, folio); |
b756a3b5 | 2387 | |
c33c7948 RR |
2388 | ptent = ptep_get(pvmw.pte); |
2389 | if (!pte_present(ptent)) { | |
b756a3b5 AP |
2390 | ret = false; |
2391 | page_vma_mapped_walk_done(&pvmw); | |
2392 | break; | |
2393 | } | |
2394 | ||
0d251485 | 2395 | subpage = folio_page(folio, |
c33c7948 | 2396 | pte_pfn(ptent) - folio_pfn(folio)); |
b756a3b5 AP |
2397 | address = pvmw.address; |
2398 | ||
2399 | /* Nuke the page table entry. */ | |
c33c7948 | 2400 | flush_cache_page(vma, address, pte_pfn(ptent)); |
b756a3b5 AP |
2401 | pteval = ptep_clear_flush(vma, address, pvmw.pte); |
2402 | ||
0d251485 | 2403 | /* Set the dirty flag on the folio now the pte is gone. */ |
b756a3b5 | 2404 | if (pte_dirty(pteval)) |
0d251485 | 2405 | folio_mark_dirty(folio); |
b756a3b5 AP |
2406 | |
2407 | /* | |
2408 | * Check that our target page is still mapped at the expected | |
2409 | * address. | |
2410 | */ | |
2411 | if (args->mm == mm && args->address == address && | |
2412 | pte_write(pteval)) | |
2413 | args->valid = true; | |
2414 | ||
2415 | /* | |
2416 | * Store the pfn of the page in a special migration | |
2417 | * pte. do_swap_page() will wait until the migration | |
2418 | * pte is removed and then restart fault handling. | |
2419 | */ | |
2420 | if (pte_write(pteval)) | |
2421 | entry = make_writable_device_exclusive_entry( | |
2422 | page_to_pfn(subpage)); | |
2423 | else | |
2424 | entry = make_readable_device_exclusive_entry( | |
2425 | page_to_pfn(subpage)); | |
2426 | swp_pte = swp_entry_to_pte(entry); | |
2427 | if (pte_soft_dirty(pteval)) | |
2428 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | |
2429 | if (pte_uffd_wp(pteval)) | |
2430 | swp_pte = pte_swp_mkuffd_wp(swp_pte); | |
2431 | ||
2432 | set_pte_at(mm, address, pvmw.pte, swp_pte); | |
2433 | ||
2434 | /* | |
2435 | * There is a reference on the page for the swap entry which has | |
2436 | * been removed, so shouldn't take another. | |
2437 | */ | |
ca1a0746 | 2438 | folio_remove_rmap_pte(folio, subpage, vma); |
b756a3b5 AP |
2439 | } |
2440 | ||
2441 | mmu_notifier_invalidate_range_end(&range); | |
2442 | ||
2443 | return ret; | |
2444 | } | |
2445 | ||
2446 | /** | |
0d251485 MWO |
2447 | * folio_make_device_exclusive - Mark the folio exclusively owned by a device. |
2448 | * @folio: The folio to replace page table entries for. | |
2449 | * @mm: The mm_struct where the folio is expected to be mapped. | |
2450 | * @address: Address where the folio is expected to be mapped. | |
b756a3b5 AP |
2451 | * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks |
2452 | * | |
0d251485 MWO |
2453 | * Tries to remove all the page table entries which are mapping this |
2454 | * folio and replace them with special device exclusive swap entries to | |
2455 | * grant a device exclusive access to the folio. | |
b756a3b5 | 2456 | * |
0d251485 MWO |
2457 | * Context: Caller must hold the folio lock. |
2458 | * Return: false if the page is still mapped, or if it could not be unmapped | |
b756a3b5 AP |
2459 | * from the expected address. Otherwise returns true (success). |
2460 | */ | |
0d251485 MWO |
2461 | static bool folio_make_device_exclusive(struct folio *folio, |
2462 | struct mm_struct *mm, unsigned long address, void *owner) | |
b756a3b5 AP |
2463 | { |
2464 | struct make_exclusive_args args = { | |
2465 | .mm = mm, | |
2466 | .address = address, | |
2467 | .owner = owner, | |
2468 | .valid = false, | |
2469 | }; | |
2470 | struct rmap_walk_control rwc = { | |
2471 | .rmap_one = page_make_device_exclusive_one, | |
f3ad032c | 2472 | .done = folio_not_mapped, |
2f031c6f | 2473 | .anon_lock = folio_lock_anon_vma_read, |
b756a3b5 AP |
2474 | .arg = &args, |
2475 | }; | |
2476 | ||
2477 | /* | |
0d251485 MWO |
2478 | * Restrict to anonymous folios for now to avoid potential writeback |
2479 | * issues. | |
b756a3b5 | 2480 | */ |
0d251485 | 2481 | if (!folio_test_anon(folio)) |
b756a3b5 AP |
2482 | return false; |
2483 | ||
2f031c6f | 2484 | rmap_walk(folio, &rwc); |
b756a3b5 | 2485 | |
0d251485 | 2486 | return args.valid && !folio_mapcount(folio); |
b756a3b5 AP |
2487 | } |
2488 | ||
2489 | /** | |
2490 | * make_device_exclusive_range() - Mark a range for exclusive use by a device | |
dd062302 | 2491 | * @mm: mm_struct of associated target process |
b756a3b5 AP |
2492 | * @start: start of the region to mark for exclusive device access |
2493 | * @end: end address of region | |
2494 | * @pages: returns the pages which were successfully marked for exclusive access | |
2495 | * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering | |
2496 | * | |
2497 | * Returns: number of pages found in the range by GUP. A page is marked for | |
2498 | * exclusive access only if the page pointer is non-NULL. | |
2499 | * | |
2500 | * This function finds ptes mapping page(s) to the given address range, locks | |
2501 | * them and replaces mappings with special swap entries preventing userspace CPU | |
2502 | * access. On fault these entries are replaced with the original mapping after | |
2503 | * calling MMU notifiers. | |
2504 | * | |
2505 | * A driver using this to program access from a device must use a mmu notifier | |
2506 | * critical section to hold a device specific lock during programming. Once | |
2507 | * programming is complete it should drop the page lock and reference after | |
2508 | * which point CPU access to the page will revoke the exclusive access. | |
2509 | */ | |
2510 | int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, | |
2511 | unsigned long end, struct page **pages, | |
2512 | void *owner) | |
2513 | { | |
2514 | long npages = (end - start) >> PAGE_SHIFT; | |
2515 | long i; | |
2516 | ||
2517 | npages = get_user_pages_remote(mm, start, npages, | |
2518 | FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, | |
ca5e8632 | 2519 | pages, NULL); |
b756a3b5 AP |
2520 | if (npages < 0) |
2521 | return npages; | |
2522 | ||
2523 | for (i = 0; i < npages; i++, start += PAGE_SIZE) { | |
0d251485 MWO |
2524 | struct folio *folio = page_folio(pages[i]); |
2525 | if (PageTail(pages[i]) || !folio_trylock(folio)) { | |
2526 | folio_put(folio); | |
b756a3b5 AP |
2527 | pages[i] = NULL; |
2528 | continue; | |
2529 | } | |
2530 | ||
0d251485 MWO |
2531 | if (!folio_make_device_exclusive(folio, mm, start, owner)) { |
2532 | folio_unlock(folio); | |
2533 | folio_put(folio); | |
b756a3b5 AP |
2534 | pages[i] = NULL; |
2535 | } | |
2536 | } | |
2537 | ||
2538 | return npages; | |
2539 | } | |
2540 | EXPORT_SYMBOL_GPL(make_device_exclusive_range); | |
2541 | #endif | |
2542 | ||
01d8b20d | 2543 | void __put_anon_vma(struct anon_vma *anon_vma) |
76545066 | 2544 | { |
01d8b20d | 2545 | struct anon_vma *root = anon_vma->root; |
76545066 | 2546 | |
624483f3 | 2547 | anon_vma_free(anon_vma); |
01d8b20d PZ |
2548 | if (root != anon_vma && atomic_dec_and_test(&root->refcount)) |
2549 | anon_vma_free(root); | |
76545066 | 2550 | } |
76545066 | 2551 | |
2f031c6f | 2552 | static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, |
6d4675e6 | 2553 | struct rmap_walk_control *rwc) |
faecd8dd JK |
2554 | { |
2555 | struct anon_vma *anon_vma; | |
2556 | ||
0dd1c7bb | 2557 | if (rwc->anon_lock) |
6d4675e6 | 2558 | return rwc->anon_lock(folio, rwc); |
0dd1c7bb | 2559 | |
faecd8dd | 2560 | /* |
2f031c6f | 2561 | * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() |
faecd8dd | 2562 | * because that depends on page_mapped(); but not all its usages |
c1e8d7c6 | 2563 | * are holding mmap_lock. Users without mmap_lock are required to |
faecd8dd JK |
2564 | * take a reference count to prevent the anon_vma disappearing |
2565 | */ | |
e05b3453 | 2566 | anon_vma = folio_anon_vma(folio); |
faecd8dd JK |
2567 | if (!anon_vma) |
2568 | return NULL; | |
2569 | ||
6d4675e6 MK |
2570 | if (anon_vma_trylock_read(anon_vma)) |
2571 | goto out; | |
2572 | ||
2573 | if (rwc->try_lock) { | |
2574 | anon_vma = NULL; | |
2575 | rwc->contended = true; | |
2576 | goto out; | |
2577 | } | |
2578 | ||
faecd8dd | 2579 | anon_vma_lock_read(anon_vma); |
6d4675e6 | 2580 | out: |
faecd8dd JK |
2581 | return anon_vma; |
2582 | } | |
2583 | ||
e9995ef9 | 2584 | /* |
e8351ac9 JK |
2585 | * rmap_walk_anon - do something to anonymous page using the object-based |
2586 | * rmap method | |
89be82b4 | 2587 | * @folio: the folio to be handled |
e8351ac9 | 2588 | * @rwc: control variable according to each walk type |
89be82b4 | 2589 | * @locked: caller holds relevant rmap lock |
e8351ac9 | 2590 | * |
89be82b4 KS |
2591 | * Find all the mappings of a folio using the mapping pointer and the vma |
2592 | * chains contained in the anon_vma struct it points to. | |
e9995ef9 | 2593 | */ |
84fbbe21 | 2594 | static void rmap_walk_anon(struct folio *folio, |
6d4675e6 | 2595 | struct rmap_walk_control *rwc, bool locked) |
e9995ef9 HD |
2596 | { |
2597 | struct anon_vma *anon_vma; | |
a8fa41ad | 2598 | pgoff_t pgoff_start, pgoff_end; |
5beb4930 | 2599 | struct anon_vma_chain *avc; |
e9995ef9 | 2600 | |
b9773199 | 2601 | if (locked) { |
e05b3453 | 2602 | anon_vma = folio_anon_vma(folio); |
b9773199 | 2603 | /* anon_vma disappear under us? */ |
e05b3453 | 2604 | VM_BUG_ON_FOLIO(!anon_vma, folio); |
b9773199 | 2605 | } else { |
2f031c6f | 2606 | anon_vma = rmap_walk_anon_lock(folio, rwc); |
b9773199 | 2607 | } |
e9995ef9 | 2608 | if (!anon_vma) |
1df631ae | 2609 | return; |
faecd8dd | 2610 | |
2f031c6f MWO |
2611 | pgoff_start = folio_pgoff(folio); |
2612 | pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; | |
a8fa41ad KS |
2613 | anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, |
2614 | pgoff_start, pgoff_end) { | |
5beb4930 | 2615 | struct vm_area_struct *vma = avc->vma; |
e0abfbb6 MWO |
2616 | unsigned long address = vma_address(vma, pgoff_start, |
2617 | folio_nr_pages(folio)); | |
0dd1c7bb | 2618 | |
494334e4 | 2619 | VM_BUG_ON_VMA(address == -EFAULT, vma); |
ad12695f AA |
2620 | cond_resched(); |
2621 | ||
0dd1c7bb JK |
2622 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
2623 | continue; | |
2624 | ||
2f031c6f | 2625 | if (!rwc->rmap_one(folio, vma, address, rwc->arg)) |
e9995ef9 | 2626 | break; |
2f031c6f | 2627 | if (rwc->done && rwc->done(folio)) |
0dd1c7bb | 2628 | break; |
e9995ef9 | 2629 | } |
b9773199 KS |
2630 | |
2631 | if (!locked) | |
2632 | anon_vma_unlock_read(anon_vma); | |
e9995ef9 HD |
2633 | } |
2634 | ||
e8351ac9 JK |
2635 | /* |
2636 | * rmap_walk_file - do something to file page using the object-based rmap method | |
89be82b4 | 2637 | * @folio: the folio to be handled |
e8351ac9 | 2638 | * @rwc: control variable according to each walk type |
89be82b4 | 2639 | * @locked: caller holds relevant rmap lock |
e8351ac9 | 2640 | * |
89be82b4 | 2641 | * Find all the mappings of a folio using the mapping pointer and the vma chains |
e8351ac9 | 2642 | * contained in the address_space struct it points to. |
e8351ac9 | 2643 | */ |
84fbbe21 | 2644 | static void rmap_walk_file(struct folio *folio, |
6d4675e6 | 2645 | struct rmap_walk_control *rwc, bool locked) |
e9995ef9 | 2646 | { |
2f031c6f | 2647 | struct address_space *mapping = folio_mapping(folio); |
a8fa41ad | 2648 | pgoff_t pgoff_start, pgoff_end; |
e9995ef9 | 2649 | struct vm_area_struct *vma; |
e9995ef9 | 2650 | |
9f32624b JK |
2651 | /* |
2652 | * The page lock not only makes sure that page->mapping cannot | |
2653 | * suddenly be NULLified by truncation, it makes sure that the | |
2654 | * structure at mapping cannot be freed and reused yet, | |
c8c06efa | 2655 | * so we can safely take mapping->i_mmap_rwsem. |
9f32624b | 2656 | */ |
2f031c6f | 2657 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
9f32624b | 2658 | |
e9995ef9 | 2659 | if (!mapping) |
1df631ae | 2660 | return; |
3dec0ba0 | 2661 | |
2f031c6f MWO |
2662 | pgoff_start = folio_pgoff(folio); |
2663 | pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; | |
6d4675e6 MK |
2664 | if (!locked) { |
2665 | if (i_mmap_trylock_read(mapping)) | |
2666 | goto lookup; | |
2667 | ||
2668 | if (rwc->try_lock) { | |
2669 | rwc->contended = true; | |
2670 | return; | |
2671 | } | |
2672 | ||
b9773199 | 2673 | i_mmap_lock_read(mapping); |
6d4675e6 MK |
2674 | } |
2675 | lookup: | |
a8fa41ad KS |
2676 | vma_interval_tree_foreach(vma, &mapping->i_mmap, |
2677 | pgoff_start, pgoff_end) { | |
e0abfbb6 MWO |
2678 | unsigned long address = vma_address(vma, pgoff_start, |
2679 | folio_nr_pages(folio)); | |
0dd1c7bb | 2680 | |
494334e4 | 2681 | VM_BUG_ON_VMA(address == -EFAULT, vma); |
ad12695f AA |
2682 | cond_resched(); |
2683 | ||
0dd1c7bb JK |
2684 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
2685 | continue; | |
2686 | ||
2f031c6f | 2687 | if (!rwc->rmap_one(folio, vma, address, rwc->arg)) |
0dd1c7bb | 2688 | goto done; |
2f031c6f | 2689 | if (rwc->done && rwc->done(folio)) |
0dd1c7bb | 2690 | goto done; |
e9995ef9 | 2691 | } |
0dd1c7bb | 2692 | |
0dd1c7bb | 2693 | done: |
b9773199 KS |
2694 | if (!locked) |
2695 | i_mmap_unlock_read(mapping); | |
e9995ef9 HD |
2696 | } |
2697 | ||
6d4675e6 | 2698 | void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) |
e9995ef9 | 2699 | { |
2f031c6f MWO |
2700 | if (unlikely(folio_test_ksm(folio))) |
2701 | rmap_walk_ksm(folio, rwc); | |
2702 | else if (folio_test_anon(folio)) | |
2703 | rmap_walk_anon(folio, rwc, false); | |
b9773199 | 2704 | else |
2f031c6f | 2705 | rmap_walk_file(folio, rwc, false); |
b9773199 KS |
2706 | } |
2707 | ||
2708 | /* Like rmap_walk, but caller holds relevant rmap lock */ | |
6d4675e6 | 2709 | void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) |
b9773199 KS |
2710 | { |
2711 | /* no ksm support for now */ | |
2f031c6f MWO |
2712 | VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); |
2713 | if (folio_test_anon(folio)) | |
2714 | rmap_walk_anon(folio, rwc, true); | |
e9995ef9 | 2715 | else |
2f031c6f | 2716 | rmap_walk_file(folio, rwc, true); |
e9995ef9 | 2717 | } |
0fe6e20b | 2718 | |
e3390f67 | 2719 | #ifdef CONFIG_HUGETLB_PAGE |
0fe6e20b | 2720 | /* |
451b9514 | 2721 | * The following two functions are for anonymous (private mapped) hugepages. |
0fe6e20b NH |
2722 | * Unlike common anonymous pages, anonymous hugepages have no accounting code |
2723 | * and no lru code, because we handle hugepages differently from common pages. | |
2724 | */ | |
9d5fafd5 DH |
2725 | void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, |
2726 | unsigned long address, rmap_t flags) | |
0fe6e20b | 2727 | { |
a4ea1864 | 2728 | VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); |
c5c54003 DH |
2729 | VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); |
2730 | ||
132b180f | 2731 | atomic_inc(&folio->_entire_mapcount); |
05c5323b | 2732 | atomic_inc(&folio->_large_mapcount); |
c66db8c0 | 2733 | if (flags & RMAP_EXCLUSIVE) |
09c55050 | 2734 | SetPageAnonExclusive(&folio->page); |
132b180f | 2735 | VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 && |
09c55050 | 2736 | PageAnonExclusive(&folio->page), folio); |
0fe6e20b NH |
2737 | } |
2738 | ||
9d5fafd5 DH |
2739 | void hugetlb_add_new_anon_rmap(struct folio *folio, |
2740 | struct vm_area_struct *vma, unsigned long address) | |
0fe6e20b | 2741 | { |
a4ea1864 DH |
2742 | VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); |
2743 | ||
0fe6e20b | 2744 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); |
cb67f428 | 2745 | /* increment count (starts at -1) */ |
db4e5dbd | 2746 | atomic_set(&folio->_entire_mapcount, 0); |
05c5323b | 2747 | atomic_set(&folio->_large_mapcount, 0); |
db4e5dbd | 2748 | folio_clear_hugetlb_restore_reserve(folio); |
c66db8c0 DH |
2749 | __folio_set_anon(folio, vma, address, true); |
2750 | SetPageAnonExclusive(&folio->page); | |
0fe6e20b | 2751 | } |
e3390f67 | 2752 | #endif /* CONFIG_HUGETLB_PAGE */ |