]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * mm/rmap.c - physical to virtual reverse mappings | |
3 | * | |
4 | * Copyright 2001, Rik van Riel <[email protected]> | |
5 | * Released under the General Public License (GPL). | |
6 | * | |
7 | * Simple, low overhead reverse mapping scheme. | |
8 | * Please try to keep this thing as modular as possible. | |
9 | * | |
10 | * Provides methods for unmapping each kind of mapped page: | |
11 | * the anon methods track anonymous pages, and | |
12 | * the file methods track pages belonging to an inode. | |
13 | * | |
14 | * Original design by Rik van Riel <[email protected]> 2001 | |
15 | * File methods by Dave McCracken <[email protected]> 2003, 2004 | |
16 | * Anonymous methods by Andrea Arcangeli <[email protected]> 2004 | |
98f32602 | 17 | * Contributions by Hugh Dickins 2003, 2004 |
1da177e4 LT |
18 | */ |
19 | ||
20 | /* | |
21 | * Lock ordering in mm: | |
22 | * | |
9608703e | 23 | * inode->i_rwsem (while writing or truncating, not reading or faulting) |
c1e8d7c6 | 24 | * mm->mmap_lock |
730633f0 | 25 | * mapping->invalidate_lock (in filemap_fault) |
3a47c54f | 26 | * page->flags PG_locked (lock_page) |
8d9bfb26 | 27 | * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below) |
55fd6fcc SB |
28 | * vma_start_write |
29 | * mapping->i_mmap_rwsem | |
30 | * anon_vma->rwsem | |
31 | * mm->page_table_lock or pte_lock | |
32 | * swap_lock (in swap_duplicate, swap_info_get) | |
33 | * mmlist_lock (in mmput, drain_mmlist and others) | |
34 | * mapping->private_lock (in block_dirty_folio) | |
35 | * folio_lock_memcg move_lock (in block_dirty_folio) | |
36 | * i_pages lock (widely used) | |
37 | * lruvec->lru_lock (in folio_lruvec_lock_irq) | |
38 | * inode->i_lock (in set_page_dirty's __mark_inode_dirty) | |
39 | * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) | |
40 | * sb_lock (within inode_lock in fs/fs-writeback.c) | |
41 | * i_pages lock (widely used, in set_page_dirty, | |
42 | * in arch-dependent flush_dcache_mmap_lock, | |
43 | * within bdi.wb->list_lock in __sync_single_inode) | |
6a46079c | 44 | * |
9608703e | 45 | * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) |
9b679320 | 46 | * ->tasklist_lock |
6a46079c | 47 | * pte map lock |
c0d0381a | 48 | * |
8d9bfb26 MK |
49 | * hugetlbfs PageHuge() take locks in this order: |
50 | * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) | |
51 | * vma_lock (hugetlb specific lock for pmd_sharing) | |
52 | * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing) | |
53 | * page->flags PG_locked (lock_page) | |
1da177e4 LT |
54 | */ |
55 | ||
56 | #include <linux/mm.h> | |
6e84f315 | 57 | #include <linux/sched/mm.h> |
29930025 | 58 | #include <linux/sched/task.h> |
1da177e4 LT |
59 | #include <linux/pagemap.h> |
60 | #include <linux/swap.h> | |
61 | #include <linux/swapops.h> | |
62 | #include <linux/slab.h> | |
63 | #include <linux/init.h> | |
5ad64688 | 64 | #include <linux/ksm.h> |
1da177e4 LT |
65 | #include <linux/rmap.h> |
66 | #include <linux/rcupdate.h> | |
b95f1b31 | 67 | #include <linux/export.h> |
8a9f3ccd | 68 | #include <linux/memcontrol.h> |
cddb8a5c | 69 | #include <linux/mmu_notifier.h> |
64cdd548 | 70 | #include <linux/migrate.h> |
0fe6e20b | 71 | #include <linux/hugetlb.h> |
444f84fd | 72 | #include <linux/huge_mm.h> |
ef5d437f | 73 | #include <linux/backing-dev.h> |
33c3fc71 | 74 | #include <linux/page_idle.h> |
a5430dda | 75 | #include <linux/memremap.h> |
bce73e48 | 76 | #include <linux/userfaultfd_k.h> |
999dad82 | 77 | #include <linux/mm_inline.h> |
1da177e4 LT |
78 | |
79 | #include <asm/tlbflush.h> | |
80 | ||
4cc79b33 | 81 | #define CREATE_TRACE_POINTS |
72b252ae | 82 | #include <trace/events/tlb.h> |
4cc79b33 | 83 | #include <trace/events/migrate.h> |
72b252ae | 84 | |
b291f000 NP |
85 | #include "internal.h" |
86 | ||
fdd2e5f8 | 87 | static struct kmem_cache *anon_vma_cachep; |
5beb4930 | 88 | static struct kmem_cache *anon_vma_chain_cachep; |
fdd2e5f8 AB |
89 | |
90 | static inline struct anon_vma *anon_vma_alloc(void) | |
91 | { | |
01d8b20d PZ |
92 | struct anon_vma *anon_vma; |
93 | ||
94 | anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); | |
95 | if (anon_vma) { | |
96 | atomic_set(&anon_vma->refcount, 1); | |
2555283e JH |
97 | anon_vma->num_children = 0; |
98 | anon_vma->num_active_vmas = 0; | |
7a3ef208 | 99 | anon_vma->parent = anon_vma; |
01d8b20d PZ |
100 | /* |
101 | * Initialise the anon_vma root to point to itself. If called | |
102 | * from fork, the root will be reset to the parents anon_vma. | |
103 | */ | |
104 | anon_vma->root = anon_vma; | |
105 | } | |
106 | ||
107 | return anon_vma; | |
fdd2e5f8 AB |
108 | } |
109 | ||
01d8b20d | 110 | static inline void anon_vma_free(struct anon_vma *anon_vma) |
fdd2e5f8 | 111 | { |
01d8b20d | 112 | VM_BUG_ON(atomic_read(&anon_vma->refcount)); |
88c22088 PZ |
113 | |
114 | /* | |
2f031c6f | 115 | * Synchronize against folio_lock_anon_vma_read() such that |
88c22088 PZ |
116 | * we can safely hold the lock without the anon_vma getting |
117 | * freed. | |
118 | * | |
119 | * Relies on the full mb implied by the atomic_dec_and_test() from | |
120 | * put_anon_vma() against the acquire barrier implied by | |
2f031c6f | 121 | * down_read_trylock() from folio_lock_anon_vma_read(). This orders: |
88c22088 | 122 | * |
2f031c6f | 123 | * folio_lock_anon_vma_read() VS put_anon_vma() |
4fc3f1d6 | 124 | * down_read_trylock() atomic_dec_and_test() |
88c22088 | 125 | * LOCK MB |
4fc3f1d6 | 126 | * atomic_read() rwsem_is_locked() |
88c22088 PZ |
127 | * |
128 | * LOCK should suffice since the actual taking of the lock must | |
129 | * happen _before_ what follows. | |
130 | */ | |
7f39dda9 | 131 | might_sleep(); |
5a505085 | 132 | if (rwsem_is_locked(&anon_vma->root->rwsem)) { |
4fc3f1d6 | 133 | anon_vma_lock_write(anon_vma); |
08b52706 | 134 | anon_vma_unlock_write(anon_vma); |
88c22088 PZ |
135 | } |
136 | ||
fdd2e5f8 AB |
137 | kmem_cache_free(anon_vma_cachep, anon_vma); |
138 | } | |
1da177e4 | 139 | |
dd34739c | 140 | static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) |
5beb4930 | 141 | { |
dd34739c | 142 | return kmem_cache_alloc(anon_vma_chain_cachep, gfp); |
5beb4930 RR |
143 | } |
144 | ||
e574b5fd | 145 | static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) |
5beb4930 RR |
146 | { |
147 | kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); | |
148 | } | |
149 | ||
6583a843 KC |
150 | static void anon_vma_chain_link(struct vm_area_struct *vma, |
151 | struct anon_vma_chain *avc, | |
152 | struct anon_vma *anon_vma) | |
153 | { | |
154 | avc->vma = vma; | |
155 | avc->anon_vma = anon_vma; | |
156 | list_add(&avc->same_vma, &vma->anon_vma_chain); | |
bf181b9f | 157 | anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); |
6583a843 KC |
158 | } |
159 | ||
d9d332e0 | 160 | /** |
d5a187da | 161 | * __anon_vma_prepare - attach an anon_vma to a memory region |
d9d332e0 LT |
162 | * @vma: the memory region in question |
163 | * | |
164 | * This makes sure the memory mapping described by 'vma' has | |
165 | * an 'anon_vma' attached to it, so that we can associate the | |
166 | * anonymous pages mapped into it with that anon_vma. | |
167 | * | |
d5a187da VB |
168 | * The common case will be that we already have one, which |
169 | * is handled inline by anon_vma_prepare(). But if | |
23a0790a | 170 | * not we either need to find an adjacent mapping that we |
d9d332e0 LT |
171 | * can re-use the anon_vma from (very common when the only |
172 | * reason for splitting a vma has been mprotect()), or we | |
173 | * allocate a new one. | |
174 | * | |
175 | * Anon-vma allocations are very subtle, because we may have | |
2f031c6f | 176 | * optimistically looked up an anon_vma in folio_lock_anon_vma_read() |
aaf1f990 | 177 | * and that may actually touch the rwsem even in the newly |
d9d332e0 LT |
178 | * allocated vma (it depends on RCU to make sure that the |
179 | * anon_vma isn't actually destroyed). | |
180 | * | |
181 | * As a result, we need to do proper anon_vma locking even | |
182 | * for the new allocation. At the same time, we do not want | |
183 | * to do any locking for the common case of already having | |
184 | * an anon_vma. | |
185 | * | |
c1e8d7c6 | 186 | * This must be called with the mmap_lock held for reading. |
d9d332e0 | 187 | */ |
d5a187da | 188 | int __anon_vma_prepare(struct vm_area_struct *vma) |
1da177e4 | 189 | { |
d5a187da VB |
190 | struct mm_struct *mm = vma->vm_mm; |
191 | struct anon_vma *anon_vma, *allocated; | |
5beb4930 | 192 | struct anon_vma_chain *avc; |
1da177e4 LT |
193 | |
194 | might_sleep(); | |
1da177e4 | 195 | |
d5a187da VB |
196 | avc = anon_vma_chain_alloc(GFP_KERNEL); |
197 | if (!avc) | |
198 | goto out_enomem; | |
199 | ||
200 | anon_vma = find_mergeable_anon_vma(vma); | |
201 | allocated = NULL; | |
202 | if (!anon_vma) { | |
203 | anon_vma = anon_vma_alloc(); | |
204 | if (unlikely(!anon_vma)) | |
205 | goto out_enomem_free_avc; | |
2555283e | 206 | anon_vma->num_children++; /* self-parent link for new root */ |
d5a187da VB |
207 | allocated = anon_vma; |
208 | } | |
5beb4930 | 209 | |
d5a187da VB |
210 | anon_vma_lock_write(anon_vma); |
211 | /* page_table_lock to protect against threads */ | |
212 | spin_lock(&mm->page_table_lock); | |
213 | if (likely(!vma->anon_vma)) { | |
214 | vma->anon_vma = anon_vma; | |
215 | anon_vma_chain_link(vma, avc, anon_vma); | |
2555283e | 216 | anon_vma->num_active_vmas++; |
d9d332e0 | 217 | allocated = NULL; |
d5a187da VB |
218 | avc = NULL; |
219 | } | |
220 | spin_unlock(&mm->page_table_lock); | |
221 | anon_vma_unlock_write(anon_vma); | |
1da177e4 | 222 | |
d5a187da VB |
223 | if (unlikely(allocated)) |
224 | put_anon_vma(allocated); | |
225 | if (unlikely(avc)) | |
226 | anon_vma_chain_free(avc); | |
31f2b0eb | 227 | |
1da177e4 | 228 | return 0; |
5beb4930 RR |
229 | |
230 | out_enomem_free_avc: | |
231 | anon_vma_chain_free(avc); | |
232 | out_enomem: | |
233 | return -ENOMEM; | |
1da177e4 LT |
234 | } |
235 | ||
bb4aa396 LT |
236 | /* |
237 | * This is a useful helper function for locking the anon_vma root as | |
238 | * we traverse the vma->anon_vma_chain, looping over anon_vma's that | |
239 | * have the same vma. | |
240 | * | |
241 | * Such anon_vma's should have the same root, so you'd expect to see | |
242 | * just a single mutex_lock for the whole traversal. | |
243 | */ | |
244 | static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) | |
245 | { | |
246 | struct anon_vma *new_root = anon_vma->root; | |
247 | if (new_root != root) { | |
248 | if (WARN_ON_ONCE(root)) | |
5a505085 | 249 | up_write(&root->rwsem); |
bb4aa396 | 250 | root = new_root; |
5a505085 | 251 | down_write(&root->rwsem); |
bb4aa396 LT |
252 | } |
253 | return root; | |
254 | } | |
255 | ||
256 | static inline void unlock_anon_vma_root(struct anon_vma *root) | |
257 | { | |
258 | if (root) | |
5a505085 | 259 | up_write(&root->rwsem); |
bb4aa396 LT |
260 | } |
261 | ||
5beb4930 RR |
262 | /* |
263 | * Attach the anon_vmas from src to dst. | |
264 | * Returns 0 on success, -ENOMEM on failure. | |
7a3ef208 | 265 | * |
0503ea8f LH |
266 | * anon_vma_clone() is called by vma_expand(), vma_merge(), __split_vma(), |
267 | * copy_vma() and anon_vma_fork(). The first four want an exact copy of src, | |
268 | * while the last one, anon_vma_fork(), may try to reuse an existing anon_vma to | |
269 | * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before | |
270 | * call, we can identify this case by checking (!dst->anon_vma && | |
271 | * src->anon_vma). | |
47b390d2 WY |
272 | * |
273 | * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find | |
274 | * and reuse existing anon_vma which has no vmas and only one child anon_vma. | |
275 | * This prevents degradation of anon_vma hierarchy to endless linear chain in | |
276 | * case of constantly forking task. On the other hand, an anon_vma with more | |
277 | * than one child isn't reused even if there was no alive vma, thus rmap | |
278 | * walker has a good chance of avoiding scanning the whole hierarchy when it | |
279 | * searches where page is mapped. | |
5beb4930 RR |
280 | */ |
281 | int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) | |
1da177e4 | 282 | { |
5beb4930 | 283 | struct anon_vma_chain *avc, *pavc; |
bb4aa396 | 284 | struct anon_vma *root = NULL; |
5beb4930 | 285 | |
646d87b4 | 286 | list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { |
bb4aa396 LT |
287 | struct anon_vma *anon_vma; |
288 | ||
dd34739c LT |
289 | avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); |
290 | if (unlikely(!avc)) { | |
291 | unlock_anon_vma_root(root); | |
292 | root = NULL; | |
293 | avc = anon_vma_chain_alloc(GFP_KERNEL); | |
294 | if (!avc) | |
295 | goto enomem_failure; | |
296 | } | |
bb4aa396 LT |
297 | anon_vma = pavc->anon_vma; |
298 | root = lock_anon_vma_root(root, anon_vma); | |
299 | anon_vma_chain_link(dst, avc, anon_vma); | |
7a3ef208 KK |
300 | |
301 | /* | |
2555283e JH |
302 | * Reuse existing anon_vma if it has no vma and only one |
303 | * anon_vma child. | |
7a3ef208 | 304 | * |
2555283e | 305 | * Root anon_vma is never reused: |
7a3ef208 KK |
306 | * it has self-parent reference and at least one child. |
307 | */ | |
47b390d2 | 308 | if (!dst->anon_vma && src->anon_vma && |
2555283e JH |
309 | anon_vma->num_children < 2 && |
310 | anon_vma->num_active_vmas == 0) | |
7a3ef208 | 311 | dst->anon_vma = anon_vma; |
5beb4930 | 312 | } |
7a3ef208 | 313 | if (dst->anon_vma) |
2555283e | 314 | dst->anon_vma->num_active_vmas++; |
bb4aa396 | 315 | unlock_anon_vma_root(root); |
5beb4930 | 316 | return 0; |
1da177e4 | 317 | |
5beb4930 | 318 | enomem_failure: |
3fe89b3e | 319 | /* |
d8e454eb MW |
320 | * dst->anon_vma is dropped here otherwise its num_active_vmas can |
321 | * be incorrectly decremented in unlink_anon_vmas(). | |
3fe89b3e LY |
322 | * We can safely do this because callers of anon_vma_clone() don't care |
323 | * about dst->anon_vma if anon_vma_clone() failed. | |
324 | */ | |
325 | dst->anon_vma = NULL; | |
5beb4930 RR |
326 | unlink_anon_vmas(dst); |
327 | return -ENOMEM; | |
1da177e4 LT |
328 | } |
329 | ||
5beb4930 RR |
330 | /* |
331 | * Attach vma to its own anon_vma, as well as to the anon_vmas that | |
332 | * the corresponding VMA in the parent process is attached to. | |
333 | * Returns 0 on success, non-zero on failure. | |
334 | */ | |
335 | int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) | |
1da177e4 | 336 | { |
5beb4930 RR |
337 | struct anon_vma_chain *avc; |
338 | struct anon_vma *anon_vma; | |
c4ea95d7 | 339 | int error; |
1da177e4 | 340 | |
5beb4930 RR |
341 | /* Don't bother if the parent process has no anon_vma here. */ |
342 | if (!pvma->anon_vma) | |
343 | return 0; | |
344 | ||
7a3ef208 KK |
345 | /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ |
346 | vma->anon_vma = NULL; | |
347 | ||
5beb4930 RR |
348 | /* |
349 | * First, attach the new VMA to the parent VMA's anon_vmas, | |
350 | * so rmap can find non-COWed pages in child processes. | |
351 | */ | |
c4ea95d7 DF |
352 | error = anon_vma_clone(vma, pvma); |
353 | if (error) | |
354 | return error; | |
5beb4930 | 355 | |
7a3ef208 KK |
356 | /* An existing anon_vma has been reused, all done then. */ |
357 | if (vma->anon_vma) | |
358 | return 0; | |
359 | ||
5beb4930 RR |
360 | /* Then add our own anon_vma. */ |
361 | anon_vma = anon_vma_alloc(); | |
362 | if (!anon_vma) | |
363 | goto out_error; | |
2555283e | 364 | anon_vma->num_active_vmas++; |
dd34739c | 365 | avc = anon_vma_chain_alloc(GFP_KERNEL); |
5beb4930 RR |
366 | if (!avc) |
367 | goto out_error_free_anon_vma; | |
5c341ee1 RR |
368 | |
369 | /* | |
aaf1f990 | 370 | * The root anon_vma's rwsem is the lock actually used when we |
5c341ee1 RR |
371 | * lock any of the anon_vmas in this anon_vma tree. |
372 | */ | |
373 | anon_vma->root = pvma->anon_vma->root; | |
7a3ef208 | 374 | anon_vma->parent = pvma->anon_vma; |
76545066 | 375 | /* |
01d8b20d PZ |
376 | * With refcounts, an anon_vma can stay around longer than the |
377 | * process it belongs to. The root anon_vma needs to be pinned until | |
378 | * this anon_vma is freed, because the lock lives in the root. | |
76545066 RR |
379 | */ |
380 | get_anon_vma(anon_vma->root); | |
5beb4930 RR |
381 | /* Mark this anon_vma as the one where our new (COWed) pages go. */ |
382 | vma->anon_vma = anon_vma; | |
4fc3f1d6 | 383 | anon_vma_lock_write(anon_vma); |
5c341ee1 | 384 | anon_vma_chain_link(vma, avc, anon_vma); |
2555283e | 385 | anon_vma->parent->num_children++; |
08b52706 | 386 | anon_vma_unlock_write(anon_vma); |
5beb4930 RR |
387 | |
388 | return 0; | |
389 | ||
390 | out_error_free_anon_vma: | |
01d8b20d | 391 | put_anon_vma(anon_vma); |
5beb4930 | 392 | out_error: |
4946d54c | 393 | unlink_anon_vmas(vma); |
5beb4930 | 394 | return -ENOMEM; |
1da177e4 LT |
395 | } |
396 | ||
5beb4930 RR |
397 | void unlink_anon_vmas(struct vm_area_struct *vma) |
398 | { | |
399 | struct anon_vma_chain *avc, *next; | |
eee2acba | 400 | struct anon_vma *root = NULL; |
5beb4930 | 401 | |
5c341ee1 RR |
402 | /* |
403 | * Unlink each anon_vma chained to the VMA. This list is ordered | |
404 | * from newest to oldest, ensuring the root anon_vma gets freed last. | |
405 | */ | |
5beb4930 | 406 | list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { |
eee2acba PZ |
407 | struct anon_vma *anon_vma = avc->anon_vma; |
408 | ||
409 | root = lock_anon_vma_root(root, anon_vma); | |
bf181b9f | 410 | anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); |
eee2acba PZ |
411 | |
412 | /* | |
413 | * Leave empty anon_vmas on the list - we'll need | |
414 | * to free them outside the lock. | |
415 | */ | |
f808c13f | 416 | if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { |
2555283e | 417 | anon_vma->parent->num_children--; |
eee2acba | 418 | continue; |
7a3ef208 | 419 | } |
eee2acba PZ |
420 | |
421 | list_del(&avc->same_vma); | |
422 | anon_vma_chain_free(avc); | |
423 | } | |
ee8ab190 | 424 | if (vma->anon_vma) { |
2555283e | 425 | vma->anon_vma->num_active_vmas--; |
ee8ab190 LX |
426 | |
427 | /* | |
428 | * vma would still be needed after unlink, and anon_vma will be prepared | |
429 | * when handle fault. | |
430 | */ | |
431 | vma->anon_vma = NULL; | |
432 | } | |
eee2acba PZ |
433 | unlock_anon_vma_root(root); |
434 | ||
435 | /* | |
436 | * Iterate the list once more, it now only contains empty and unlinked | |
437 | * anon_vmas, destroy them. Could not do before due to __put_anon_vma() | |
5a505085 | 438 | * needing to write-acquire the anon_vma->root->rwsem. |
eee2acba PZ |
439 | */ |
440 | list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { | |
441 | struct anon_vma *anon_vma = avc->anon_vma; | |
442 | ||
2555283e JH |
443 | VM_WARN_ON(anon_vma->num_children); |
444 | VM_WARN_ON(anon_vma->num_active_vmas); | |
eee2acba PZ |
445 | put_anon_vma(anon_vma); |
446 | ||
5beb4930 RR |
447 | list_del(&avc->same_vma); |
448 | anon_vma_chain_free(avc); | |
449 | } | |
450 | } | |
451 | ||
51cc5068 | 452 | static void anon_vma_ctor(void *data) |
1da177e4 | 453 | { |
a35afb83 | 454 | struct anon_vma *anon_vma = data; |
1da177e4 | 455 | |
5a505085 | 456 | init_rwsem(&anon_vma->rwsem); |
83813267 | 457 | atomic_set(&anon_vma->refcount, 0); |
f808c13f | 458 | anon_vma->rb_root = RB_ROOT_CACHED; |
1da177e4 LT |
459 | } |
460 | ||
461 | void __init anon_vma_init(void) | |
462 | { | |
463 | anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), | |
5f0d5a3a | 464 | 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, |
5d097056 VD |
465 | anon_vma_ctor); |
466 | anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, | |
467 | SLAB_PANIC|SLAB_ACCOUNT); | |
1da177e4 LT |
468 | } |
469 | ||
470 | /* | |
6111e4ca PZ |
471 | * Getting a lock on a stable anon_vma from a page off the LRU is tricky! |
472 | * | |
4d8f7418 | 473 | * Since there is no serialization what so ever against folio_remove_rmap_*() |
ad8a20cf ML |
474 | * the best this function can do is return a refcount increased anon_vma |
475 | * that might have been relevant to this page. | |
6111e4ca PZ |
476 | * |
477 | * The page might have been remapped to a different anon_vma or the anon_vma | |
478 | * returned may already be freed (and even reused). | |
479 | * | |
bc658c96 PZ |
480 | * In case it was remapped to a different anon_vma, the new anon_vma will be a |
481 | * child of the old anon_vma, and the anon_vma lifetime rules will therefore | |
482 | * ensure that any anon_vma obtained from the page will still be valid for as | |
483 | * long as we observe page_mapped() [ hence all those page_mapped() tests ]. | |
484 | * | |
6111e4ca PZ |
485 | * All users of this function must be very careful when walking the anon_vma |
486 | * chain and verify that the page in question is indeed mapped in it | |
487 | * [ something equivalent to page_mapped_in_vma() ]. | |
488 | * | |
091e4299 | 489 | * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from |
4d8f7418 | 490 | * folio_remove_rmap_*() that the anon_vma pointer from page->mapping is valid |
091e4299 MC |
491 | * if there is a mapcount, we can dereference the anon_vma after observing |
492 | * those. | |
adef4406 AA |
493 | * |
494 | * NOTE: the caller should normally hold folio lock when calling this. If | |
495 | * not, the caller needs to double check the anon_vma didn't change after | |
496 | * taking the anon_vma lock for either read or write (UFFDIO_MOVE can modify it | |
497 | * concurrently without folio lock protection). See folio_lock_anon_vma_read() | |
498 | * which has already covered that, and comment above remap_pages(). | |
1da177e4 | 499 | */ |
29eea9b5 | 500 | struct anon_vma *folio_get_anon_vma(struct folio *folio) |
1da177e4 | 501 | { |
746b18d4 | 502 | struct anon_vma *anon_vma = NULL; |
1da177e4 LT |
503 | unsigned long anon_mapping; |
504 | ||
505 | rcu_read_lock(); | |
29eea9b5 | 506 | anon_mapping = (unsigned long)READ_ONCE(folio->mapping); |
3ca7b3c5 | 507 | if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) |
1da177e4 | 508 | goto out; |
29eea9b5 | 509 | if (!folio_mapped(folio)) |
1da177e4 LT |
510 | goto out; |
511 | ||
512 | anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); | |
746b18d4 PZ |
513 | if (!atomic_inc_not_zero(&anon_vma->refcount)) { |
514 | anon_vma = NULL; | |
515 | goto out; | |
516 | } | |
f1819427 HD |
517 | |
518 | /* | |
29eea9b5 | 519 | * If this folio is still mapped, then its anon_vma cannot have been |
746b18d4 PZ |
520 | * freed. But if it has been unmapped, we have no security against the |
521 | * anon_vma structure being freed and reused (for another anon_vma: | |
5f0d5a3a | 522 | * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() |
746b18d4 | 523 | * above cannot corrupt). |
f1819427 | 524 | */ |
29eea9b5 | 525 | if (!folio_mapped(folio)) { |
7f39dda9 | 526 | rcu_read_unlock(); |
746b18d4 | 527 | put_anon_vma(anon_vma); |
7f39dda9 | 528 | return NULL; |
746b18d4 | 529 | } |
1da177e4 LT |
530 | out: |
531 | rcu_read_unlock(); | |
746b18d4 PZ |
532 | |
533 | return anon_vma; | |
534 | } | |
535 | ||
88c22088 | 536 | /* |
29eea9b5 | 537 | * Similar to folio_get_anon_vma() except it locks the anon_vma. |
88c22088 PZ |
538 | * |
539 | * Its a little more complex as it tries to keep the fast path to a single | |
540 | * atomic op -- the trylock. If we fail the trylock, we fall back to getting a | |
29eea9b5 | 541 | * reference like with folio_get_anon_vma() and then block on the mutex |
6d4675e6 | 542 | * on !rwc->try_lock case. |
88c22088 | 543 | */ |
6d4675e6 MK |
544 | struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, |
545 | struct rmap_walk_control *rwc) | |
746b18d4 | 546 | { |
88c22088 | 547 | struct anon_vma *anon_vma = NULL; |
eee0f252 | 548 | struct anon_vma *root_anon_vma; |
88c22088 | 549 | unsigned long anon_mapping; |
746b18d4 | 550 | |
880a99b6 | 551 | retry: |
88c22088 | 552 | rcu_read_lock(); |
9595d769 | 553 | anon_mapping = (unsigned long)READ_ONCE(folio->mapping); |
88c22088 PZ |
554 | if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) |
555 | goto out; | |
9595d769 | 556 | if (!folio_mapped(folio)) |
88c22088 PZ |
557 | goto out; |
558 | ||
559 | anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); | |
4db0c3c2 | 560 | root_anon_vma = READ_ONCE(anon_vma->root); |
4fc3f1d6 | 561 | if (down_read_trylock(&root_anon_vma->rwsem)) { |
880a99b6 AA |
562 | /* |
563 | * folio_move_anon_rmap() might have changed the anon_vma as we | |
564 | * might not hold the folio lock here. | |
565 | */ | |
566 | if (unlikely((unsigned long)READ_ONCE(folio->mapping) != | |
567 | anon_mapping)) { | |
568 | up_read(&root_anon_vma->rwsem); | |
569 | rcu_read_unlock(); | |
570 | goto retry; | |
571 | } | |
572 | ||
88c22088 | 573 | /* |
9595d769 | 574 | * If the folio is still mapped, then this anon_vma is still |
eee0f252 | 575 | * its anon_vma, and holding the mutex ensures that it will |
bc658c96 | 576 | * not go away, see anon_vma_free(). |
88c22088 | 577 | */ |
9595d769 | 578 | if (!folio_mapped(folio)) { |
4fc3f1d6 | 579 | up_read(&root_anon_vma->rwsem); |
88c22088 PZ |
580 | anon_vma = NULL; |
581 | } | |
582 | goto out; | |
583 | } | |
746b18d4 | 584 | |
6d4675e6 MK |
585 | if (rwc && rwc->try_lock) { |
586 | anon_vma = NULL; | |
587 | rwc->contended = true; | |
588 | goto out; | |
589 | } | |
590 | ||
88c22088 PZ |
591 | /* trylock failed, we got to sleep */ |
592 | if (!atomic_inc_not_zero(&anon_vma->refcount)) { | |
593 | anon_vma = NULL; | |
594 | goto out; | |
595 | } | |
596 | ||
9595d769 | 597 | if (!folio_mapped(folio)) { |
7f39dda9 | 598 | rcu_read_unlock(); |
88c22088 | 599 | put_anon_vma(anon_vma); |
7f39dda9 | 600 | return NULL; |
88c22088 PZ |
601 | } |
602 | ||
603 | /* we pinned the anon_vma, its safe to sleep */ | |
604 | rcu_read_unlock(); | |
4fc3f1d6 | 605 | anon_vma_lock_read(anon_vma); |
88c22088 | 606 | |
880a99b6 AA |
607 | /* |
608 | * folio_move_anon_rmap() might have changed the anon_vma as we might | |
609 | * not hold the folio lock here. | |
610 | */ | |
611 | if (unlikely((unsigned long)READ_ONCE(folio->mapping) != | |
612 | anon_mapping)) { | |
613 | anon_vma_unlock_read(anon_vma); | |
614 | put_anon_vma(anon_vma); | |
615 | anon_vma = NULL; | |
616 | goto retry; | |
617 | } | |
618 | ||
88c22088 PZ |
619 | if (atomic_dec_and_test(&anon_vma->refcount)) { |
620 | /* | |
621 | * Oops, we held the last refcount, release the lock | |
622 | * and bail -- can't simply use put_anon_vma() because | |
4fc3f1d6 | 623 | * we'll deadlock on the anon_vma_lock_write() recursion. |
88c22088 | 624 | */ |
4fc3f1d6 | 625 | anon_vma_unlock_read(anon_vma); |
88c22088 PZ |
626 | __put_anon_vma(anon_vma); |
627 | anon_vma = NULL; | |
628 | } | |
629 | ||
630 | return anon_vma; | |
631 | ||
632 | out: | |
633 | rcu_read_unlock(); | |
746b18d4 | 634 | return anon_vma; |
34bbd704 ON |
635 | } |
636 | ||
72b252ae | 637 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
72b252ae MG |
638 | /* |
639 | * Flush TLB entries for recently unmapped pages from remote CPUs. It is | |
640 | * important if a PTE was dirty when it was unmapped that it's flushed | |
641 | * before any IO is initiated on the page to prevent lost writes. Similarly, | |
642 | * it must be flushed before freeing to prevent data leakage. | |
643 | */ | |
644 | void try_to_unmap_flush(void) | |
645 | { | |
646 | struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; | |
72b252ae MG |
647 | |
648 | if (!tlb_ubc->flush_required) | |
649 | return; | |
650 | ||
e73ad5ff | 651 | arch_tlbbatch_flush(&tlb_ubc->arch); |
72b252ae | 652 | tlb_ubc->flush_required = false; |
d950c947 | 653 | tlb_ubc->writable = false; |
72b252ae MG |
654 | } |
655 | ||
d950c947 MG |
656 | /* Flush iff there are potentially writable TLB entries that can race with IO */ |
657 | void try_to_unmap_flush_dirty(void) | |
658 | { | |
659 | struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; | |
660 | ||
661 | if (tlb_ubc->writable) | |
662 | try_to_unmap_flush(); | |
663 | } | |
664 | ||
5ee2fa2f YH |
665 | /* |
666 | * Bits 0-14 of mm->tlb_flush_batched record pending generations. | |
667 | * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. | |
668 | */ | |
669 | #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 | |
670 | #define TLB_FLUSH_BATCH_PENDING_MASK \ | |
671 | ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) | |
672 | #define TLB_FLUSH_BATCH_PENDING_LARGE \ | |
673 | (TLB_FLUSH_BATCH_PENDING_MASK / 2) | |
674 | ||
f73419bb BS |
675 | static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, |
676 | unsigned long uaddr) | |
72b252ae MG |
677 | { |
678 | struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; | |
bdeb9188 | 679 | int batch; |
4d4b6d66 YH |
680 | bool writable = pte_dirty(pteval); |
681 | ||
682 | if (!pte_accessible(mm, pteval)) | |
683 | return; | |
72b252ae | 684 | |
f73419bb | 685 | arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr); |
72b252ae | 686 | tlb_ubc->flush_required = true; |
d950c947 | 687 | |
3ea27719 MG |
688 | /* |
689 | * Ensure compiler does not re-order the setting of tlb_flush_batched | |
690 | * before the PTE is cleared. | |
691 | */ | |
692 | barrier(); | |
5ee2fa2f YH |
693 | batch = atomic_read(&mm->tlb_flush_batched); |
694 | retry: | |
695 | if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { | |
696 | /* | |
697 | * Prevent `pending' from catching up with `flushed' because of | |
698 | * overflow. Reset `pending' and `flushed' to be 1 and 0 if | |
699 | * `pending' becomes large. | |
700 | */ | |
bdeb9188 | 701 | if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1)) |
5ee2fa2f | 702 | goto retry; |
5ee2fa2f YH |
703 | } else { |
704 | atomic_inc(&mm->tlb_flush_batched); | |
705 | } | |
3ea27719 | 706 | |
d950c947 MG |
707 | /* |
708 | * If the PTE was dirty then it's best to assume it's writable. The | |
709 | * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() | |
710 | * before the page is queued for IO. | |
711 | */ | |
712 | if (writable) | |
713 | tlb_ubc->writable = true; | |
72b252ae MG |
714 | } |
715 | ||
716 | /* | |
717 | * Returns true if the TLB flush should be deferred to the end of a batch of | |
718 | * unmap operations to reduce IPIs. | |
719 | */ | |
720 | static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) | |
721 | { | |
72b252ae MG |
722 | if (!(flags & TTU_BATCH_FLUSH)) |
723 | return false; | |
724 | ||
65c8d30e | 725 | return arch_tlbbatch_should_defer(mm); |
72b252ae | 726 | } |
3ea27719 MG |
727 | |
728 | /* | |
729 | * Reclaim unmaps pages under the PTL but do not flush the TLB prior to | |
730 | * releasing the PTL if TLB flushes are batched. It's possible for a parallel | |
731 | * operation such as mprotect or munmap to race between reclaim unmapping | |
732 | * the page and flushing the page. If this race occurs, it potentially allows | |
733 | * access to data via a stale TLB entry. Tracking all mm's that have TLB | |
734 | * batching in flight would be expensive during reclaim so instead track | |
735 | * whether TLB batching occurred in the past and if so then do a flush here | |
736 | * if required. This will cost one additional flush per reclaim cycle paid | |
737 | * by the first operation at risk such as mprotect and mumap. | |
738 | * | |
739 | * This must be called under the PTL so that an access to tlb_flush_batched | |
740 | * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise | |
741 | * via the PTL. | |
742 | */ | |
743 | void flush_tlb_batched_pending(struct mm_struct *mm) | |
744 | { | |
5ee2fa2f YH |
745 | int batch = atomic_read(&mm->tlb_flush_batched); |
746 | int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK; | |
747 | int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT; | |
3ea27719 | 748 | |
5ee2fa2f | 749 | if (pending != flushed) { |
db6c1f6f | 750 | arch_flush_tlb_batched_pending(mm); |
3ea27719 | 751 | /* |
5ee2fa2f YH |
752 | * If the new TLB flushing is pending during flushing, leave |
753 | * mm->tlb_flush_batched as is, to avoid losing flushing. | |
3ea27719 | 754 | */ |
5ee2fa2f YH |
755 | atomic_cmpxchg(&mm->tlb_flush_batched, batch, |
756 | pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT)); | |
3ea27719 MG |
757 | } |
758 | } | |
72b252ae | 759 | #else |
f73419bb BS |
760 | static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, |
761 | unsigned long uaddr) | |
72b252ae MG |
762 | { |
763 | } | |
764 | ||
765 | static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) | |
766 | { | |
767 | return false; | |
768 | } | |
769 | #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ | |
770 | ||
1da177e4 | 771 | /* |
bf89c8c8 | 772 | * At what user virtual address is page expected in vma? |
ab941e0f | 773 | * Caller should check the page is actually part of the vma. |
1da177e4 LT |
774 | */ |
775 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) | |
776 | { | |
e05b3453 MWO |
777 | struct folio *folio = page_folio(page); |
778 | if (folio_test_anon(folio)) { | |
779 | struct anon_vma *page__anon_vma = folio_anon_vma(folio); | |
4829b906 HD |
780 | /* |
781 | * Note: swapoff's unuse_vma() is more efficient with this | |
782 | * check, and needs it to match anon_vma when KSM is active. | |
783 | */ | |
784 | if (!vma->anon_vma || !page__anon_vma || | |
785 | vma->anon_vma->root != page__anon_vma->root) | |
21d0d443 | 786 | return -EFAULT; |
31657170 JW |
787 | } else if (!vma->vm_file) { |
788 | return -EFAULT; | |
e05b3453 | 789 | } else if (vma->vm_file->f_mapping != folio->mapping) { |
1da177e4 | 790 | return -EFAULT; |
31657170 | 791 | } |
494334e4 HD |
792 | |
793 | return vma_address(page, vma); | |
1da177e4 LT |
794 | } |
795 | ||
50722804 ZK |
796 | /* |
797 | * Returns the actual pmd_t* where we expect 'address' to be mapped from, or | |
798 | * NULL if it doesn't exist. No guarantees / checks on what the pmd_t* | |
799 | * represents. | |
800 | */ | |
6219049a BL |
801 | pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) |
802 | { | |
803 | pgd_t *pgd; | |
c2febafc | 804 | p4d_t *p4d; |
6219049a BL |
805 | pud_t *pud; |
806 | pmd_t *pmd = NULL; | |
807 | ||
808 | pgd = pgd_offset(mm, address); | |
809 | if (!pgd_present(*pgd)) | |
810 | goto out; | |
811 | ||
c2febafc KS |
812 | p4d = p4d_offset(pgd, address); |
813 | if (!p4d_present(*p4d)) | |
814 | goto out; | |
815 | ||
816 | pud = pud_offset(p4d, address); | |
6219049a BL |
817 | if (!pud_present(*pud)) |
818 | goto out; | |
819 | ||
820 | pmd = pmd_offset(pud, address); | |
6219049a BL |
821 | out: |
822 | return pmd; | |
823 | } | |
824 | ||
b3ac0413 | 825 | struct folio_referenced_arg { |
8749cfea VD |
826 | int mapcount; |
827 | int referenced; | |
828 | unsigned long vm_flags; | |
829 | struct mem_cgroup *memcg; | |
830 | }; | |
1acbc3f9 | 831 | |
8749cfea | 832 | /* |
b3ac0413 | 833 | * arg: folio_referenced_arg will be passed |
8749cfea | 834 | */ |
2f031c6f MWO |
835 | static bool folio_referenced_one(struct folio *folio, |
836 | struct vm_area_struct *vma, unsigned long address, void *arg) | |
8749cfea | 837 | { |
b3ac0413 MWO |
838 | struct folio_referenced_arg *pra = arg; |
839 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); | |
8749cfea | 840 | int referenced = 0; |
1acbc3f9 | 841 | unsigned long start = address, ptes = 0; |
8749cfea | 842 | |
8eaedede KS |
843 | while (page_vma_mapped_walk(&pvmw)) { |
844 | address = pvmw.address; | |
b20ce5e0 | 845 | |
1acbc3f9 YF |
846 | if (vma->vm_flags & VM_LOCKED) { |
847 | if (!folio_test_large(folio) || !pvmw.pte) { | |
848 | /* Restore the mlock which got missed */ | |
849 | mlock_vma_folio(folio, vma); | |
850 | page_vma_mapped_walk_done(&pvmw); | |
851 | pra->vm_flags |= VM_LOCKED; | |
852 | return false; /* To break the loop */ | |
853 | } | |
854 | /* | |
855 | * For large folio fully mapped to VMA, will | |
856 | * be handled after the pvmw loop. | |
857 | * | |
858 | * For large folio cross VMA boundaries, it's | |
859 | * expected to be picked by page reclaim. But | |
860 | * should skip reference of pages which are in | |
861 | * the range of VM_LOCKED vma. As page reclaim | |
862 | * should just count the reference of pages out | |
863 | * the range of VM_LOCKED vma. | |
864 | */ | |
865 | ptes++; | |
866 | pra->mapcount--; | |
867 | continue; | |
8eaedede | 868 | } |
71e3aac0 | 869 | |
8eaedede | 870 | if (pvmw.pte) { |
c33c7948 RR |
871 | if (lru_gen_enabled() && |
872 | pte_young(ptep_get(pvmw.pte))) { | |
018ee47f YZ |
873 | lru_gen_look_around(&pvmw); |
874 | referenced++; | |
875 | } | |
876 | ||
8eaedede | 877 | if (ptep_clear_flush_young_notify(vma, address, |
8788f678 YZ |
878 | pvmw.pte)) |
879 | referenced++; | |
8eaedede KS |
880 | } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { |
881 | if (pmdp_clear_flush_young_notify(vma, address, | |
882 | pvmw.pmd)) | |
8749cfea | 883 | referenced++; |
8eaedede | 884 | } else { |
b3ac0413 | 885 | /* unexpected pmd-mapped folio? */ |
8eaedede | 886 | WARN_ON_ONCE(1); |
8749cfea | 887 | } |
8eaedede KS |
888 | |
889 | pra->mapcount--; | |
b20ce5e0 | 890 | } |
b20ce5e0 | 891 | |
1acbc3f9 YF |
892 | if ((vma->vm_flags & VM_LOCKED) && |
893 | folio_test_large(folio) && | |
894 | folio_within_vma(folio, vma)) { | |
895 | unsigned long s_align, e_align; | |
896 | ||
897 | s_align = ALIGN_DOWN(start, PMD_SIZE); | |
898 | e_align = ALIGN_DOWN(start + folio_size(folio) - 1, PMD_SIZE); | |
899 | ||
900 | /* folio doesn't cross page table boundary and fully mapped */ | |
901 | if ((s_align == e_align) && (ptes == folio_nr_pages(folio))) { | |
902 | /* Restore the mlock which got missed */ | |
903 | mlock_vma_folio(folio, vma); | |
904 | pra->vm_flags |= VM_LOCKED; | |
905 | return false; /* To break the loop */ | |
906 | } | |
907 | } | |
908 | ||
33c3fc71 | 909 | if (referenced) |
b3ac0413 MWO |
910 | folio_clear_idle(folio); |
911 | if (folio_test_clear_young(folio)) | |
33c3fc71 VD |
912 | referenced++; |
913 | ||
9f32624b JK |
914 | if (referenced) { |
915 | pra->referenced++; | |
47d4f3ee | 916 | pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; |
1da177e4 | 917 | } |
34bbd704 | 918 | |
9f32624b | 919 | if (!pra->mapcount) |
e4b82222 | 920 | return false; /* To break the loop */ |
9f32624b | 921 | |
e4b82222 | 922 | return true; |
1da177e4 LT |
923 | } |
924 | ||
b3ac0413 | 925 | static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) |
1da177e4 | 926 | { |
b3ac0413 | 927 | struct folio_referenced_arg *pra = arg; |
9f32624b | 928 | struct mem_cgroup *memcg = pra->memcg; |
1da177e4 | 929 | |
8788f678 YZ |
930 | /* |
931 | * Ignore references from this mapping if it has no recency. If the | |
932 | * folio has been used in another mapping, we will catch it; if this | |
933 | * other mapping is already gone, the unmap path will have set the | |
934 | * referenced flag or activated the folio in zap_pte_range(). | |
935 | */ | |
936 | if (!vma_has_recency(vma)) | |
937 | return true; | |
938 | ||
939 | /* | |
940 | * If we are reclaiming on behalf of a cgroup, skip counting on behalf | |
941 | * of references from different cgroups. | |
942 | */ | |
943 | if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) | |
9f32624b | 944 | return true; |
1da177e4 | 945 | |
9f32624b | 946 | return false; |
1da177e4 LT |
947 | } |
948 | ||
949 | /** | |
b3ac0413 MWO |
950 | * folio_referenced() - Test if the folio was referenced. |
951 | * @folio: The folio to test. | |
952 | * @is_locked: Caller holds lock on the folio. | |
72835c86 | 953 | * @memcg: target memory cgroup |
b3ac0413 | 954 | * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. |
1da177e4 | 955 | * |
b3ac0413 MWO |
956 | * Quick test_and_clear_referenced for all mappings of a folio, |
957 | * | |
6d4675e6 MK |
958 | * Return: The number of mappings which referenced the folio. Return -1 if |
959 | * the function bailed out due to rmap lock contention. | |
1da177e4 | 960 | */ |
b3ac0413 MWO |
961 | int folio_referenced(struct folio *folio, int is_locked, |
962 | struct mem_cgroup *memcg, unsigned long *vm_flags) | |
1da177e4 | 963 | { |
5ad64688 | 964 | int we_locked = 0; |
b3ac0413 MWO |
965 | struct folio_referenced_arg pra = { |
966 | .mapcount = folio_mapcount(folio), | |
9f32624b JK |
967 | .memcg = memcg, |
968 | }; | |
969 | struct rmap_walk_control rwc = { | |
b3ac0413 | 970 | .rmap_one = folio_referenced_one, |
9f32624b | 971 | .arg = (void *)&pra, |
2f031c6f | 972 | .anon_lock = folio_lock_anon_vma_read, |
6d4675e6 | 973 | .try_lock = true, |
8788f678 | 974 | .invalid_vma = invalid_folio_referenced_vma, |
9f32624b | 975 | }; |
1da177e4 | 976 | |
6fe6b7e3 | 977 | *vm_flags = 0; |
059d8442 | 978 | if (!pra.mapcount) |
9f32624b JK |
979 | return 0; |
980 | ||
b3ac0413 | 981 | if (!folio_raw_mapping(folio)) |
9f32624b JK |
982 | return 0; |
983 | ||
b3ac0413 MWO |
984 | if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { |
985 | we_locked = folio_trylock(folio); | |
9f32624b JK |
986 | if (!we_locked) |
987 | return 1; | |
1da177e4 | 988 | } |
9f32624b | 989 | |
2f031c6f | 990 | rmap_walk(folio, &rwc); |
9f32624b JK |
991 | *vm_flags = pra.vm_flags; |
992 | ||
993 | if (we_locked) | |
b3ac0413 | 994 | folio_unlock(folio); |
9f32624b | 995 | |
6d4675e6 | 996 | return rwc.contended ? -1 : pra.referenced; |
1da177e4 LT |
997 | } |
998 | ||
6a8e0596 | 999 | static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) |
d08b3851 | 1000 | { |
6a8e0596 MS |
1001 | int cleaned = 0; |
1002 | struct vm_area_struct *vma = pvmw->vma; | |
ac46d4f3 | 1003 | struct mmu_notifier_range range; |
6a8e0596 | 1004 | unsigned long address = pvmw->address; |
d08b3851 | 1005 | |
369ea824 JG |
1006 | /* |
1007 | * We have to assume the worse case ie pmd for invalidation. Note that | |
e83c09a2 | 1008 | * the folio can not be freed from this function. |
369ea824 | 1009 | */ |
7d4a8be0 AP |
1010 | mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, |
1011 | vma->vm_mm, address, vma_address_end(pvmw)); | |
ac46d4f3 | 1012 | mmu_notifier_invalidate_range_start(&range); |
369ea824 | 1013 | |
6a8e0596 | 1014 | while (page_vma_mapped_walk(pvmw)) { |
f27176cf | 1015 | int ret = 0; |
369ea824 | 1016 | |
6a8e0596 MS |
1017 | address = pvmw->address; |
1018 | if (pvmw->pte) { | |
6a8e0596 | 1019 | pte_t *pte = pvmw->pte; |
c33c7948 | 1020 | pte_t entry = ptep_get(pte); |
f27176cf | 1021 | |
c33c7948 | 1022 | if (!pte_dirty(entry) && !pte_write(entry)) |
f27176cf KS |
1023 | continue; |
1024 | ||
c33c7948 | 1025 | flush_cache_page(vma, address, pte_pfn(entry)); |
785373b4 | 1026 | entry = ptep_clear_flush(vma, address, pte); |
f27176cf KS |
1027 | entry = pte_wrprotect(entry); |
1028 | entry = pte_mkclean(entry); | |
785373b4 | 1029 | set_pte_at(vma->vm_mm, address, pte, entry); |
f27176cf KS |
1030 | ret = 1; |
1031 | } else { | |
396bcc52 | 1032 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
6a8e0596 | 1033 | pmd_t *pmd = pvmw->pmd; |
f27176cf KS |
1034 | pmd_t entry; |
1035 | ||
1036 | if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) | |
1037 | continue; | |
1038 | ||
7f9c9b60 MS |
1039 | flush_cache_range(vma, address, |
1040 | address + HPAGE_PMD_SIZE); | |
024eee0e | 1041 | entry = pmdp_invalidate(vma, address, pmd); |
f27176cf KS |
1042 | entry = pmd_wrprotect(entry); |
1043 | entry = pmd_mkclean(entry); | |
785373b4 | 1044 | set_pmd_at(vma->vm_mm, address, pmd, entry); |
f27176cf KS |
1045 | ret = 1; |
1046 | #else | |
e83c09a2 | 1047 | /* unexpected pmd-mapped folio? */ |
f27176cf KS |
1048 | WARN_ON_ONCE(1); |
1049 | #endif | |
1050 | } | |
d08b3851 | 1051 | |
0f10851e | 1052 | if (ret) |
6a8e0596 | 1053 | cleaned++; |
c2fda5fe | 1054 | } |
d08b3851 | 1055 | |
ac46d4f3 | 1056 | mmu_notifier_invalidate_range_end(&range); |
369ea824 | 1057 | |
6a8e0596 MS |
1058 | return cleaned; |
1059 | } | |
1060 | ||
1061 | static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, | |
1062 | unsigned long address, void *arg) | |
1063 | { | |
1064 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); | |
1065 | int *cleaned = arg; | |
1066 | ||
1067 | *cleaned += page_vma_mkclean_one(&pvmw); | |
1068 | ||
e4b82222 | 1069 | return true; |
d08b3851 PZ |
1070 | } |
1071 | ||
9853a407 | 1072 | static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) |
d08b3851 | 1073 | { |
9853a407 | 1074 | if (vma->vm_flags & VM_SHARED) |
871beb8c | 1075 | return false; |
d08b3851 | 1076 | |
871beb8c | 1077 | return true; |
d08b3851 PZ |
1078 | } |
1079 | ||
d9c08e22 | 1080 | int folio_mkclean(struct folio *folio) |
d08b3851 | 1081 | { |
9853a407 JK |
1082 | int cleaned = 0; |
1083 | struct address_space *mapping; | |
1084 | struct rmap_walk_control rwc = { | |
1085 | .arg = (void *)&cleaned, | |
1086 | .rmap_one = page_mkclean_one, | |
1087 | .invalid_vma = invalid_mkclean_vma, | |
1088 | }; | |
d08b3851 | 1089 | |
d9c08e22 | 1090 | BUG_ON(!folio_test_locked(folio)); |
d08b3851 | 1091 | |
d9c08e22 | 1092 | if (!folio_mapped(folio)) |
9853a407 JK |
1093 | return 0; |
1094 | ||
d9c08e22 | 1095 | mapping = folio_mapping(folio); |
9853a407 JK |
1096 | if (!mapping) |
1097 | return 0; | |
1098 | ||
2f031c6f | 1099 | rmap_walk(folio, &rwc); |
d08b3851 | 1100 | |
9853a407 | 1101 | return cleaned; |
d08b3851 | 1102 | } |
d9c08e22 | 1103 | EXPORT_SYMBOL_GPL(folio_mkclean); |
d08b3851 | 1104 | |
6a8e0596 MS |
1105 | /** |
1106 | * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of | |
1107 | * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) | |
1108 | * within the @vma of shared mappings. And since clean PTEs | |
1109 | * should also be readonly, write protects them too. | |
1110 | * @pfn: start pfn. | |
1111 | * @nr_pages: number of physically contiguous pages srarting with @pfn. | |
1112 | * @pgoff: page offset that the @pfn mapped with. | |
1113 | * @vma: vma that @pfn mapped within. | |
1114 | * | |
1115 | * Returns the number of cleaned PTEs (including PMDs). | |
1116 | */ | |
1117 | int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, | |
1118 | struct vm_area_struct *vma) | |
1119 | { | |
1120 | struct page_vma_mapped_walk pvmw = { | |
1121 | .pfn = pfn, | |
1122 | .nr_pages = nr_pages, | |
1123 | .pgoff = pgoff, | |
1124 | .vma = vma, | |
1125 | .flags = PVMW_SYNC, | |
1126 | }; | |
1127 | ||
1128 | if (invalid_mkclean_vma(vma, NULL)) | |
1129 | return 0; | |
1130 | ||
1131 | pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma); | |
1132 | VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); | |
1133 | ||
1134 | return page_vma_mkclean_one(&pvmw); | |
1135 | } | |
1136 | ||
b14224fb | 1137 | int folio_total_mapcount(struct folio *folio) |
cb67f428 | 1138 | { |
b14224fb MWO |
1139 | int mapcount = folio_entire_mapcount(folio); |
1140 | int nr_pages; | |
cb67f428 HD |
1141 | int i; |
1142 | ||
b14224fb | 1143 | /* In the common case, avoid the loop when no pages mapped by PTE */ |
eec20426 | 1144 | if (folio_nr_pages_mapped(folio) == 0) |
be5ef2d9 HD |
1145 | return mapcount; |
1146 | /* | |
b14224fb MWO |
1147 | * Add all the PTE mappings of those pages mapped by PTE. |
1148 | * Limit the loop to folio_nr_pages_mapped()? | |
be5ef2d9 HD |
1149 | * Perhaps: given all the raciness, that may be a good or a bad idea. |
1150 | */ | |
b14224fb MWO |
1151 | nr_pages = folio_nr_pages(folio); |
1152 | for (i = 0; i < nr_pages; i++) | |
1153 | mapcount += atomic_read(&folio_page(folio, i)->_mapcount); | |
be5ef2d9 HD |
1154 | |
1155 | /* But each of those _mapcounts was based on -1 */ | |
b14224fb | 1156 | mapcount += nr_pages; |
be5ef2d9 | 1157 | return mapcount; |
cb67f428 HD |
1158 | } |
1159 | ||
96fd7495 DH |
1160 | static __always_inline unsigned int __folio_add_rmap(struct folio *folio, |
1161 | struct page *page, int nr_pages, enum rmap_level level, | |
1162 | int *nr_pmdmapped) | |
1163 | { | |
1164 | atomic_t *mapped = &folio->_nr_pages_mapped; | |
1165 | int first, nr = 0; | |
1166 | ||
1167 | __folio_rmap_sanity_checks(folio, page, nr_pages, level); | |
1168 | ||
1169 | switch (level) { | |
1170 | case RMAP_LEVEL_PTE: | |
1171 | do { | |
1172 | first = atomic_inc_and_test(&page->_mapcount); | |
1173 | if (first && folio_test_large(folio)) { | |
1174 | first = atomic_inc_return_relaxed(mapped); | |
e78a13fd | 1175 | first = (first < ENTIRELY_MAPPED); |
96fd7495 DH |
1176 | } |
1177 | ||
1178 | if (first) | |
1179 | nr++; | |
1180 | } while (page++, --nr_pages > 0); | |
1181 | break; | |
1182 | case RMAP_LEVEL_PMD: | |
1183 | first = atomic_inc_and_test(&folio->_entire_mapcount); | |
1184 | if (first) { | |
e78a13fd DH |
1185 | nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped); |
1186 | if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) { | |
96fd7495 DH |
1187 | *nr_pmdmapped = folio_nr_pages(folio); |
1188 | nr = *nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); | |
1189 | /* Raced ahead of a remove and another add? */ | |
1190 | if (unlikely(nr < 0)) | |
1191 | nr = 0; | |
1192 | } else { | |
e78a13fd | 1193 | /* Raced ahead of a remove of ENTIRELY_MAPPED */ |
96fd7495 DH |
1194 | nr = 0; |
1195 | } | |
1196 | } | |
1197 | break; | |
1198 | } | |
1199 | return nr; | |
1200 | } | |
1201 | ||
c44b6743 | 1202 | /** |
06968625 DH |
1203 | * folio_move_anon_rmap - move a folio to our anon_vma |
1204 | * @folio: The folio to move to our anon_vma | |
1205 | * @vma: The vma the folio belongs to | |
c44b6743 | 1206 | * |
06968625 DH |
1207 | * When a folio belongs exclusively to one process after a COW event, |
1208 | * that folio can be moved into the anon_vma that belongs to just that | |
1209 | * process, so the rmap code will not search the parent or sibling processes. | |
c44b6743 | 1210 | */ |
06968625 | 1211 | void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma) |
c44b6743 | 1212 | { |
595af4c9 | 1213 | void *anon_vma = vma->anon_vma; |
5a49973d | 1214 | |
595af4c9 | 1215 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
81d1b09c | 1216 | VM_BUG_ON_VMA(!anon_vma, vma); |
c44b6743 | 1217 | |
595af4c9 | 1218 | anon_vma += PAGE_MAPPING_ANON; |
414e2fb8 VD |
1219 | /* |
1220 | * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written | |
b3ac0413 MWO |
1221 | * simultaneously, so a concurrent reader (eg folio_referenced()'s |
1222 | * folio_test_anon()) will not see one without the other. | |
414e2fb8 | 1223 | */ |
595af4c9 | 1224 | WRITE_ONCE(folio->mapping, anon_vma); |
c44b6743 RR |
1225 | } |
1226 | ||
9617d95e | 1227 | /** |
c66db8c0 DH |
1228 | * __folio_set_anon - set up a new anonymous rmap for a folio |
1229 | * @folio: The folio to set up the new anonymous rmap for. | |
1230 | * @vma: VM area to add the folio to. | |
c33c7948 | 1231 | * @address: User virtual address of the mapping |
c66db8c0 | 1232 | * @exclusive: Whether the folio is exclusive to the process. |
9617d95e | 1233 | */ |
c66db8c0 DH |
1234 | static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma, |
1235 | unsigned long address, bool exclusive) | |
9617d95e | 1236 | { |
e8a03feb | 1237 | struct anon_vma *anon_vma = vma->anon_vma; |
ea90002b | 1238 | |
e8a03feb | 1239 | BUG_ON(!anon_vma); |
ea90002b LT |
1240 | |
1241 | /* | |
c66db8c0 DH |
1242 | * If the folio isn't exclusive to this vma, we must use the _oldest_ |
1243 | * possible anon_vma for the folio mapping! | |
ea90002b | 1244 | */ |
4e1c1975 | 1245 | if (!exclusive) |
288468c3 | 1246 | anon_vma = anon_vma->root; |
9617d95e | 1247 | |
16f5e707 | 1248 | /* |
5b4bd90f | 1249 | * page_idle does a lockless/optimistic rmap scan on folio->mapping. |
16f5e707 AS |
1250 | * Make sure the compiler doesn't split the stores of anon_vma and |
1251 | * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code | |
1252 | * could mistake the mapping for a struct address_space and crash. | |
1253 | */ | |
9617d95e | 1254 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; |
5b4bd90f MWO |
1255 | WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); |
1256 | folio->index = linear_page_index(vma, address); | |
9617d95e NP |
1257 | } |
1258 | ||
c97a9e10 | 1259 | /** |
43d8eac4 | 1260 | * __page_check_anon_rmap - sanity check anonymous rmap addition |
dba438bd MWO |
1261 | * @folio: The folio containing @page. |
1262 | * @page: the page to check the mapping of | |
c97a9e10 NP |
1263 | * @vma: the vm area in which the mapping is added |
1264 | * @address: the user virtual address mapped | |
1265 | */ | |
dba438bd | 1266 | static void __page_check_anon_rmap(struct folio *folio, struct page *page, |
c97a9e10 NP |
1267 | struct vm_area_struct *vma, unsigned long address) |
1268 | { | |
c97a9e10 NP |
1269 | /* |
1270 | * The page's anon-rmap details (mapping and index) are guaranteed to | |
1271 | * be set up correctly at this point. | |
1272 | * | |
84f0169e | 1273 | * We have exclusion against folio_add_anon_rmap_*() because the caller |
90aaca85 | 1274 | * always holds the page locked. |
c97a9e10 | 1275 | * |
cb9089ba | 1276 | * We have exclusion against folio_add_new_anon_rmap because those pages |
c97a9e10 | 1277 | * are initially only visible via the pagetables, and the pte is locked |
cb9089ba | 1278 | * over the call to folio_add_new_anon_rmap. |
c97a9e10 | 1279 | */ |
e05b3453 MWO |
1280 | VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, |
1281 | folio); | |
30c46382 YS |
1282 | VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), |
1283 | page); | |
c97a9e10 NP |
1284 | } |
1285 | ||
8bd51300 DH |
1286 | static __always_inline void __folio_add_anon_rmap(struct folio *folio, |
1287 | struct page *page, int nr_pages, struct vm_area_struct *vma, | |
1288 | unsigned long address, rmap_t flags, enum rmap_level level) | |
1289 | { | |
1290 | int i, nr, nr_pmdmapped = 0; | |
cb67f428 | 1291 | |
8bd51300 | 1292 | nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); |
9bd3155e | 1293 | if (nr_pmdmapped) |
ee0800c2 | 1294 | __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped); |
9bd3155e | 1295 | if (nr) |
ee0800c2 | 1296 | __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); |
5ad64688 | 1297 | |
c5c54003 DH |
1298 | if (unlikely(!folio_test_anon(folio))) { |
1299 | VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); | |
a1f34ee1 DH |
1300 | /* |
1301 | * For a PTE-mapped large folio, we only know that the single | |
1302 | * PTE is exclusive. Further, __folio_set_anon() might not get | |
1303 | * folio->index right when not given the address of the head | |
1304 | * page. | |
1305 | */ | |
8bd51300 DH |
1306 | VM_WARN_ON_FOLIO(folio_test_large(folio) && |
1307 | level != RMAP_LEVEL_PMD, folio); | |
c5c54003 DH |
1308 | __folio_set_anon(folio, vma, address, |
1309 | !!(flags & RMAP_EXCLUSIVE)); | |
1310 | } else if (likely(!folio_test_ksm(folio))) { | |
1311 | __page_check_anon_rmap(folio, page, vma, address); | |
c7c3dec1 | 1312 | } |
8bd51300 DH |
1313 | |
1314 | if (flags & RMAP_EXCLUSIVE) { | |
1315 | switch (level) { | |
1316 | case RMAP_LEVEL_PTE: | |
1317 | for (i = 0; i < nr_pages; i++) | |
1318 | SetPageAnonExclusive(page + i); | |
1319 | break; | |
1320 | case RMAP_LEVEL_PMD: | |
1321 | SetPageAnonExclusive(page); | |
1322 | break; | |
1323 | } | |
1324 | } | |
1325 | for (i = 0; i < nr_pages; i++) { | |
1326 | struct page *cur_page = page + i; | |
1327 | ||
1328 | /* While PTE-mapping a THP we have a PMD and a PTE mapping. */ | |
1329 | VM_WARN_ON_FOLIO((atomic_read(&cur_page->_mapcount) > 0 || | |
1330 | (folio_test_large(folio) && | |
1331 | folio_entire_mapcount(folio) > 1)) && | |
1332 | PageAnonExclusive(cur_page), folio); | |
1333 | } | |
cea86fe2 | 1334 | |
1acbc3f9 YF |
1335 | /* |
1336 | * For large folio, only mlock it if it's fully mapped to VMA. It's | |
1337 | * not easy to check whether the large folio is fully mapped to VMA | |
1338 | * here. Only mlock normal 4K folio and leave page reclaim to handle | |
1339 | * large folio. | |
1340 | */ | |
1341 | if (!folio_test_large(folio)) | |
1342 | mlock_vma_folio(folio, vma); | |
1da177e4 LT |
1343 | } |
1344 | ||
8bd51300 DH |
1345 | /** |
1346 | * folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio | |
1347 | * @folio: The folio to add the mappings to | |
1348 | * @page: The first page to add | |
1349 | * @nr_pages: The number of pages which will be mapped | |
1350 | * @vma: The vm area in which the mappings are added | |
1351 | * @address: The user virtual address of the first page to map | |
1352 | * @flags: The rmap flags | |
1353 | * | |
1354 | * The page range of folio is defined by [first_page, first_page + nr_pages) | |
1355 | * | |
1356 | * The caller needs to hold the page table lock, and the page must be locked in | |
1357 | * the anon_vma case: to serialize mapping,index checking after setting, | |
1358 | * and to ensure that an anon folio is not being upgraded racily to a KSM folio | |
1359 | * (but KSM folios are never downgraded). | |
1360 | */ | |
1361 | void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page, | |
1362 | int nr_pages, struct vm_area_struct *vma, unsigned long address, | |
1363 | rmap_t flags) | |
1364 | { | |
1365 | __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags, | |
1366 | RMAP_LEVEL_PTE); | |
1367 | } | |
1368 | ||
1369 | /** | |
1370 | * folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio | |
1371 | * @folio: The folio to add the mapping to | |
1372 | * @page: The first page to add | |
1373 | * @vma: The vm area in which the mapping is added | |
1374 | * @address: The user virtual address of the first page to map | |
1375 | * @flags: The rmap flags | |
1376 | * | |
1377 | * The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR) | |
1378 | * | |
1379 | * The caller needs to hold the page table lock, and the page must be locked in | |
1380 | * the anon_vma case: to serialize mapping,index checking after setting. | |
1381 | */ | |
1382 | void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page, | |
1383 | struct vm_area_struct *vma, unsigned long address, rmap_t flags) | |
1384 | { | |
1385 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
1386 | __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags, | |
1387 | RMAP_LEVEL_PMD); | |
1388 | #else | |
1389 | WARN_ON_ONCE(true); | |
1390 | #endif | |
1391 | } | |
1392 | ||
43d8eac4 | 1393 | /** |
4d510f3d MWO |
1394 | * folio_add_new_anon_rmap - Add mapping to a new anonymous folio. |
1395 | * @folio: The folio to add the mapping to. | |
9617d95e NP |
1396 | * @vma: the vm area in which the mapping is added |
1397 | * @address: the user virtual address mapped | |
40f2bbf7 | 1398 | * |
84f0169e | 1399 | * Like folio_add_anon_rmap_*() but must only be called on *new* folios. |
9617d95e | 1400 | * This means the inc-and-test can be bypassed. |
4d510f3d MWO |
1401 | * The folio does not have to be locked. |
1402 | * | |
372cbd4d | 1403 | * If the folio is pmd-mappable, it is accounted as a THP. As the folio |
4d510f3d | 1404 | * is new, it's assumed to be mapped exclusively by a single process. |
9617d95e | 1405 | */ |
4d510f3d MWO |
1406 | void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, |
1407 | unsigned long address) | |
9617d95e | 1408 | { |
372cbd4d | 1409 | int nr = folio_nr_pages(folio); |
d281ee61 | 1410 | |
a4ea1864 | 1411 | VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); |
372cbd4d RR |
1412 | VM_BUG_ON_VMA(address < vma->vm_start || |
1413 | address + (nr << PAGE_SHIFT) > vma->vm_end, vma); | |
4d510f3d | 1414 | __folio_set_swapbacked(folio); |
372cbd4d | 1415 | __folio_set_anon(folio, vma, address, true); |
d8dd5e97 | 1416 | |
372cbd4d | 1417 | if (likely(!folio_test_large(folio))) { |
d8dd5e97 | 1418 | /* increment count (starts at -1) */ |
4d510f3d | 1419 | atomic_set(&folio->_mapcount, 0); |
372cbd4d RR |
1420 | SetPageAnonExclusive(&folio->page); |
1421 | } else if (!folio_test_pmd_mappable(folio)) { | |
1422 | int i; | |
1423 | ||
1424 | for (i = 0; i < nr; i++) { | |
1425 | struct page *page = folio_page(folio, i); | |
1426 | ||
1427 | /* increment count (starts at -1) */ | |
1428 | atomic_set(&page->_mapcount, 0); | |
1429 | SetPageAnonExclusive(page); | |
1430 | } | |
1431 | ||
1432 | atomic_set(&folio->_nr_pages_mapped, nr); | |
d8dd5e97 | 1433 | } else { |
53f9263b | 1434 | /* increment count (starts at -1) */ |
4d510f3d | 1435 | atomic_set(&folio->_entire_mapcount, 0); |
e78a13fd | 1436 | atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED); |
372cbd4d | 1437 | SetPageAnonExclusive(&folio->page); |
4d510f3d | 1438 | __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr); |
d281ee61 | 1439 | } |
d8dd5e97 | 1440 | |
4d510f3d | 1441 | __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); |
9617d95e NP |
1442 | } |
1443 | ||
68f03208 DH |
1444 | static __always_inline void __folio_add_file_rmap(struct folio *folio, |
1445 | struct page *page, int nr_pages, struct vm_area_struct *vma, | |
1446 | enum rmap_level level) | |
1da177e4 | 1447 | { |
96fd7495 | 1448 | int nr, nr_pmdmapped = 0; |
dd78fedd | 1449 | |
68f03208 | 1450 | VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); |
9bd3155e | 1451 | |
96fd7495 | 1452 | nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); |
9bd3155e | 1453 | if (nr_pmdmapped) |
eb01a2ad | 1454 | __lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ? |
9bd3155e | 1455 | NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped); |
5d543f13 | 1456 | if (nr) |
eb01a2ad | 1457 | __lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr); |
cea86fe2 | 1458 | |
84f0169e | 1459 | /* See comments in folio_add_anon_rmap_*() */ |
1acbc3f9 YF |
1460 | if (!folio_test_large(folio)) |
1461 | mlock_vma_folio(folio, vma); | |
1da177e4 LT |
1462 | } |
1463 | ||
68f03208 DH |
1464 | /** |
1465 | * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio | |
1466 | * @folio: The folio to add the mappings to | |
1467 | * @page: The first page to add | |
1468 | * @nr_pages: The number of pages that will be mapped using PTEs | |
1469 | * @vma: The vm area in which the mappings are added | |
1470 | * | |
1471 | * The page range of the folio is defined by [page, page + nr_pages) | |
1472 | * | |
1473 | * The caller needs to hold the page table lock. | |
1474 | */ | |
1475 | void folio_add_file_rmap_ptes(struct folio *folio, struct page *page, | |
1476 | int nr_pages, struct vm_area_struct *vma) | |
1477 | { | |
1478 | __folio_add_file_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE); | |
1479 | } | |
1480 | ||
1481 | /** | |
1482 | * folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio | |
1483 | * @folio: The folio to add the mapping to | |
1484 | * @page: The first page to add | |
1485 | * @vma: The vm area in which the mapping is added | |
1486 | * | |
1487 | * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) | |
1488 | * | |
1489 | * The caller needs to hold the page table lock. | |
1490 | */ | |
1491 | void folio_add_file_rmap_pmd(struct folio *folio, struct page *page, | |
1492 | struct vm_area_struct *vma) | |
1493 | { | |
1494 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
1495 | __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD); | |
1496 | #else | |
1497 | WARN_ON_ONCE(true); | |
1498 | #endif | |
1499 | } | |
1500 | ||
b06dc281 DH |
1501 | static __always_inline void __folio_remove_rmap(struct folio *folio, |
1502 | struct page *page, int nr_pages, struct vm_area_struct *vma, | |
1503 | enum rmap_level level) | |
1504 | { | |
62beb906 | 1505 | atomic_t *mapped = &folio->_nr_pages_mapped; |
b06dc281 | 1506 | int last, nr = 0, nr_pmdmapped = 0; |
62beb906 | 1507 | enum node_stat_item idx; |
dd78fedd | 1508 | |
b06dc281 DH |
1509 | __folio_rmap_sanity_checks(folio, page, nr_pages, level); |
1510 | ||
1511 | switch (level) { | |
1512 | case RMAP_LEVEL_PTE: | |
1513 | do { | |
1514 | last = atomic_add_negative(-1, &page->_mapcount); | |
1515 | if (last && folio_test_large(folio)) { | |
1516 | last = atomic_dec_return_relaxed(mapped); | |
e78a13fd | 1517 | last = (last < ENTIRELY_MAPPED); |
b06dc281 | 1518 | } |
d8dd5e97 | 1519 | |
b06dc281 DH |
1520 | if (last) |
1521 | nr++; | |
1522 | } while (page++, --nr_pages > 0); | |
1523 | break; | |
1524 | case RMAP_LEVEL_PMD: | |
62beb906 | 1525 | last = atomic_add_negative(-1, &folio->_entire_mapcount); |
9bd3155e | 1526 | if (last) { |
e78a13fd DH |
1527 | nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped); |
1528 | if (likely(nr < ENTIRELY_MAPPED)) { | |
62beb906 | 1529 | nr_pmdmapped = folio_nr_pages(folio); |
eec20426 | 1530 | nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); |
6287b7da HD |
1531 | /* Raced ahead of another remove and an add? */ |
1532 | if (unlikely(nr < 0)) | |
1533 | nr = 0; | |
1534 | } else { | |
e78a13fd | 1535 | /* An add of ENTIRELY_MAPPED raced ahead */ |
6287b7da HD |
1536 | nr = 0; |
1537 | } | |
9bd3155e | 1538 | } |
b06dc281 | 1539 | break; |
dd78fedd | 1540 | } |
cb67f428 | 1541 | |
9bd3155e | 1542 | if (nr_pmdmapped) { |
62beb906 MWO |
1543 | if (folio_test_anon(folio)) |
1544 | idx = NR_ANON_THPS; | |
1545 | else if (folio_test_swapbacked(folio)) | |
1546 | idx = NR_SHMEM_PMDMAPPED; | |
1547 | else | |
1548 | idx = NR_FILE_PMDMAPPED; | |
1549 | __lruvec_stat_mod_folio(folio, idx, -nr_pmdmapped); | |
9bd3155e HD |
1550 | } |
1551 | if (nr) { | |
62beb906 MWO |
1552 | idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; |
1553 | __lruvec_stat_mod_folio(folio, idx, -nr); | |
1554 | ||
f1fe80d4 | 1555 | /* |
7dc7c5ef | 1556 | * Queue anon large folio for deferred split if at least one |
62beb906 MWO |
1557 | * page of the folio is unmapped and at least one page |
1558 | * is still mapped. | |
f1fe80d4 | 1559 | */ |
7dc7c5ef | 1560 | if (folio_test_large(folio) && folio_test_anon(folio)) |
b06dc281 | 1561 | if (level == RMAP_LEVEL_PTE || nr < nr_pmdmapped) |
f158ed61 | 1562 | deferred_split_folio(folio); |
53f9263b KS |
1563 | } |
1564 | ||
b904dcfe | 1565 | /* |
672aa27d | 1566 | * It would be tidy to reset folio_test_anon mapping when fully |
84f0169e | 1567 | * unmapped, but that might overwrite a racing folio_add_anon_rmap_*() |
672aa27d MWO |
1568 | * which increments mapcount after us but sets mapping before us: |
1569 | * so leave the reset to free_pages_prepare, and remember that | |
1570 | * it's only reliable while mapped. | |
b904dcfe | 1571 | */ |
9bd3155e | 1572 | |
1acbc3f9 | 1573 | munlock_vma_folio(folio, vma); |
1da177e4 LT |
1574 | } |
1575 | ||
b06dc281 DH |
1576 | /** |
1577 | * folio_remove_rmap_ptes - remove PTE mappings from a page range of a folio | |
1578 | * @folio: The folio to remove the mappings from | |
1579 | * @page: The first page to remove | |
1580 | * @nr_pages: The number of pages that will be removed from the mapping | |
1581 | * @vma: The vm area from which the mappings are removed | |
1582 | * | |
1583 | * The page range of the folio is defined by [page, page + nr_pages) | |
1584 | * | |
1585 | * The caller needs to hold the page table lock. | |
1586 | */ | |
1587 | void folio_remove_rmap_ptes(struct folio *folio, struct page *page, | |
1588 | int nr_pages, struct vm_area_struct *vma) | |
1589 | { | |
1590 | __folio_remove_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE); | |
1591 | } | |
1592 | ||
1593 | /** | |
1594 | * folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio | |
1595 | * @folio: The folio to remove the mapping from | |
1596 | * @page: The first page to remove | |
1597 | * @vma: The vm area from which the mapping is removed | |
1598 | * | |
1599 | * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) | |
1600 | * | |
1601 | * The caller needs to hold the page table lock. | |
1602 | */ | |
1603 | void folio_remove_rmap_pmd(struct folio *folio, struct page *page, | |
1604 | struct vm_area_struct *vma) | |
1605 | { | |
1606 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
1607 | __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD); | |
1608 | #else | |
1609 | WARN_ON_ONCE(true); | |
1610 | #endif | |
1611 | } | |
1612 | ||
1da177e4 | 1613 | /* |
52629506 | 1614 | * @arg: enum ttu_flags will be passed to this argument |
1da177e4 | 1615 | */ |
2f031c6f | 1616 | static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, |
52629506 | 1617 | unsigned long address, void *arg) |
1da177e4 LT |
1618 | { |
1619 | struct mm_struct *mm = vma->vm_mm; | |
869f7ee6 | 1620 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); |
1da177e4 | 1621 | pte_t pteval; |
c7ab0d2f | 1622 | struct page *subpage; |
6c287605 | 1623 | bool anon_exclusive, ret = true; |
ac46d4f3 | 1624 | struct mmu_notifier_range range; |
4708f318 | 1625 | enum ttu_flags flags = (enum ttu_flags)(long)arg; |
c33c7948 | 1626 | unsigned long pfn; |
935d4f0c | 1627 | unsigned long hsz = 0; |
1da177e4 | 1628 | |
732ed558 HD |
1629 | /* |
1630 | * When racing against e.g. zap_pte_range() on another cpu, | |
ca1a0746 | 1631 | * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), |
1fb08ac6 | 1632 | * try_to_unmap() may return before page_mapped() has become false, |
732ed558 HD |
1633 | * if page table locking is skipped: use TTU_SYNC to wait for that. |
1634 | */ | |
1635 | if (flags & TTU_SYNC) | |
1636 | pvmw.flags = PVMW_SYNC; | |
1637 | ||
a98a2f0c | 1638 | if (flags & TTU_SPLIT_HUGE_PMD) |
af28a988 | 1639 | split_huge_pmd_address(vma, address, false, folio); |
fec89c10 | 1640 | |
369ea824 | 1641 | /* |
017b1660 MK |
1642 | * For THP, we have to assume the worse case ie pmd for invalidation. |
1643 | * For hugetlb, it could be much worse if we need to do pud | |
1644 | * invalidation in the case of pmd sharing. | |
1645 | * | |
869f7ee6 MWO |
1646 | * Note that the folio can not be freed in this function as call of |
1647 | * try_to_unmap() must hold a reference on the folio. | |
369ea824 | 1648 | */ |
2aff7a47 | 1649 | range.end = vma_address_end(&pvmw); |
7d4a8be0 | 1650 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, |
494334e4 | 1651 | address, range.end); |
869f7ee6 | 1652 | if (folio_test_hugetlb(folio)) { |
017b1660 MK |
1653 | /* |
1654 | * If sharing is possible, start and end will be adjusted | |
1655 | * accordingly. | |
1656 | */ | |
ac46d4f3 JG |
1657 | adjust_range_if_pmd_sharing_possible(vma, &range.start, |
1658 | &range.end); | |
935d4f0c RR |
1659 | |
1660 | /* We need the huge page size for set_huge_pte_at() */ | |
1661 | hsz = huge_page_size(hstate_vma(vma)); | |
017b1660 | 1662 | } |
ac46d4f3 | 1663 | mmu_notifier_invalidate_range_start(&range); |
369ea824 | 1664 | |
c7ab0d2f | 1665 | while (page_vma_mapped_walk(&pvmw)) { |
cea86fe2 | 1666 | /* Unexpected PMD-mapped THP? */ |
869f7ee6 | 1667 | VM_BUG_ON_FOLIO(!pvmw.pte, folio); |
cea86fe2 | 1668 | |
c7ab0d2f | 1669 | /* |
869f7ee6 | 1670 | * If the folio is in an mlock()d vma, we must not swap it out. |
c7ab0d2f | 1671 | */ |
efdb6720 HD |
1672 | if (!(flags & TTU_IGNORE_MLOCK) && |
1673 | (vma->vm_flags & VM_LOCKED)) { | |
cea86fe2 | 1674 | /* Restore the mlock which got missed */ |
1acbc3f9 YF |
1675 | if (!folio_test_large(folio)) |
1676 | mlock_vma_folio(folio, vma); | |
efdb6720 HD |
1677 | page_vma_mapped_walk_done(&pvmw); |
1678 | ret = false; | |
1679 | break; | |
b87537d9 | 1680 | } |
c7ab0d2f | 1681 | |
c33c7948 RR |
1682 | pfn = pte_pfn(ptep_get(pvmw.pte)); |
1683 | subpage = folio_page(folio, pfn - folio_pfn(folio)); | |
785373b4 | 1684 | address = pvmw.address; |
6c287605 DH |
1685 | anon_exclusive = folio_test_anon(folio) && |
1686 | PageAnonExclusive(subpage); | |
785373b4 | 1687 | |
dfc7ab57 | 1688 | if (folio_test_hugetlb(folio)) { |
0506c31d BW |
1689 | bool anon = folio_test_anon(folio); |
1690 | ||
a00a8759 BW |
1691 | /* |
1692 | * The try_to_unmap() is only passed a hugetlb page | |
1693 | * in the case where the hugetlb page is poisoned. | |
1694 | */ | |
1695 | VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage); | |
54205e9c BW |
1696 | /* |
1697 | * huge_pmd_unshare may unmap an entire PMD page. | |
1698 | * There is no way of knowing exactly which PMDs may | |
1699 | * be cached for this mm, so we must flush them all. | |
1700 | * start/end were already adjusted above to cover this | |
1701 | * range. | |
1702 | */ | |
1703 | flush_cache_range(vma, range.start, range.end); | |
1704 | ||
0506c31d BW |
1705 | /* |
1706 | * To call huge_pmd_unshare, i_mmap_rwsem must be | |
1707 | * held in write mode. Caller needs to explicitly | |
1708 | * do this outside rmap routines. | |
40549ba8 MK |
1709 | * |
1710 | * We also must hold hugetlb vma_lock in write mode. | |
1711 | * Lock order dictates acquiring vma_lock BEFORE | |
1712 | * i_mmap_rwsem. We can only try lock here and fail | |
1713 | * if unsuccessful. | |
0506c31d | 1714 | */ |
40549ba8 MK |
1715 | if (!anon) { |
1716 | VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); | |
1717 | if (!hugetlb_vma_trylock_write(vma)) { | |
1718 | page_vma_mapped_walk_done(&pvmw); | |
1719 | ret = false; | |
1720 | break; | |
1721 | } | |
1722 | if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { | |
1723 | hugetlb_vma_unlock_write(vma); | |
1724 | flush_tlb_range(vma, | |
1725 | range.start, range.end); | |
40549ba8 MK |
1726 | /* |
1727 | * The ref count of the PMD page was | |
1728 | * dropped which is part of the way map | |
1729 | * counting is done for shared PMDs. | |
1730 | * Return 'true' here. When there is | |
1731 | * no other sharing, huge_pmd_unshare | |
1732 | * returns false and we will unmap the | |
1733 | * actual page and drop map count | |
1734 | * to zero. | |
1735 | */ | |
1736 | page_vma_mapped_walk_done(&pvmw); | |
1737 | break; | |
1738 | } | |
1739 | hugetlb_vma_unlock_write(vma); | |
017b1660 | 1740 | } |
a00a8759 | 1741 | pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); |
54205e9c | 1742 | } else { |
c33c7948 | 1743 | flush_cache_page(vma, address, pfn); |
088b8aa5 DH |
1744 | /* Nuke the page table entry. */ |
1745 | if (should_defer_flush(mm, flags)) { | |
a00a8759 BW |
1746 | /* |
1747 | * We clear the PTE but do not flush so potentially | |
1748 | * a remote CPU could still be writing to the folio. | |
1749 | * If the entry was previously clean then the | |
1750 | * architecture must guarantee that a clear->dirty | |
1751 | * transition on a cached TLB entry is written through | |
1752 | * and traps if the PTE is unmapped. | |
1753 | */ | |
1754 | pteval = ptep_get_and_clear(mm, address, pvmw.pte); | |
c7ab0d2f | 1755 | |
f73419bb | 1756 | set_tlb_ubc_flush_pending(mm, pteval, address); |
a00a8759 BW |
1757 | } else { |
1758 | pteval = ptep_clear_flush(vma, address, pvmw.pte); | |
1759 | } | |
c7ab0d2f | 1760 | } |
72b252ae | 1761 | |
999dad82 PX |
1762 | /* |
1763 | * Now the pte is cleared. If this pte was uffd-wp armed, | |
1764 | * we may want to replace a none pte with a marker pte if | |
1765 | * it's file-backed, so we don't lose the tracking info. | |
1766 | */ | |
1767 | pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval); | |
1768 | ||
869f7ee6 | 1769 | /* Set the dirty flag on the folio now the pte is gone. */ |
c7ab0d2f | 1770 | if (pte_dirty(pteval)) |
869f7ee6 | 1771 | folio_mark_dirty(folio); |
1da177e4 | 1772 | |
c7ab0d2f KS |
1773 | /* Update high watermark before we lower rss */ |
1774 | update_hiwater_rss(mm); | |
1da177e4 | 1775 | |
6da6b1d4 | 1776 | if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) { |
5fd27b8e | 1777 | pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); |
869f7ee6 MWO |
1778 | if (folio_test_hugetlb(folio)) { |
1779 | hugetlb_count_sub(folio_nr_pages(folio), mm); | |
935d4f0c RR |
1780 | set_huge_pte_at(mm, address, pvmw.pte, pteval, |
1781 | hsz); | |
c7ab0d2f | 1782 | } else { |
a23f517b | 1783 | dec_mm_counter(mm, mm_counter(folio)); |
785373b4 | 1784 | set_pte_at(mm, address, pvmw.pte, pteval); |
c7ab0d2f | 1785 | } |
365e9c87 | 1786 | |
bce73e48 | 1787 | } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { |
c7ab0d2f KS |
1788 | /* |
1789 | * The guest indicated that the page content is of no | |
1790 | * interest anymore. Simply discard the pte, vmscan | |
1791 | * will take care of the rest. | |
bce73e48 CB |
1792 | * A future reference will then fault in a new zero |
1793 | * page. When userfaultfd is active, we must not drop | |
1794 | * this page though, as its main user (postcopy | |
1795 | * migration) will not expect userfaults on already | |
1796 | * copied pages. | |
c7ab0d2f | 1797 | */ |
a23f517b | 1798 | dec_mm_counter(mm, mm_counter(folio)); |
869f7ee6 | 1799 | } else if (folio_test_anon(folio)) { |
cfeed8ff | 1800 | swp_entry_t entry = page_swap_entry(subpage); |
c7ab0d2f KS |
1801 | pte_t swp_pte; |
1802 | /* | |
1803 | * Store the swap location in the pte. | |
1804 | * See handle_pte_fault() ... | |
1805 | */ | |
869f7ee6 MWO |
1806 | if (unlikely(folio_test_swapbacked(folio) != |
1807 | folio_test_swapcache(folio))) { | |
eb94a878 | 1808 | WARN_ON_ONCE(1); |
83612a94 | 1809 | ret = false; |
eb94a878 MK |
1810 | page_vma_mapped_walk_done(&pvmw); |
1811 | break; | |
1812 | } | |
c7ab0d2f | 1813 | |
802a3a92 | 1814 | /* MADV_FREE page check */ |
869f7ee6 | 1815 | if (!folio_test_swapbacked(folio)) { |
6c8e2a25 MFO |
1816 | int ref_count, map_count; |
1817 | ||
1818 | /* | |
1819 | * Synchronize with gup_pte_range(): | |
1820 | * - clear PTE; barrier; read refcount | |
1821 | * - inc refcount; barrier; read PTE | |
1822 | */ | |
1823 | smp_mb(); | |
1824 | ||
1825 | ref_count = folio_ref_count(folio); | |
1826 | map_count = folio_mapcount(folio); | |
1827 | ||
1828 | /* | |
1829 | * Order reads for page refcount and dirty flag | |
1830 | * (see comments in __remove_mapping()). | |
1831 | */ | |
1832 | smp_rmb(); | |
1833 | ||
1834 | /* | |
1835 | * The only page refs must be one from isolation | |
1836 | * plus the rmap(s) (dropped by discard:). | |
1837 | */ | |
1838 | if (ref_count == 1 + map_count && | |
1839 | !folio_test_dirty(folio)) { | |
802a3a92 SL |
1840 | dec_mm_counter(mm, MM_ANONPAGES); |
1841 | goto discard; | |
1842 | } | |
1843 | ||
1844 | /* | |
869f7ee6 | 1845 | * If the folio was redirtied, it cannot be |
802a3a92 SL |
1846 | * discarded. Remap the page to page table. |
1847 | */ | |
785373b4 | 1848 | set_pte_at(mm, address, pvmw.pte, pteval); |
869f7ee6 | 1849 | folio_set_swapbacked(folio); |
e4b82222 | 1850 | ret = false; |
802a3a92 SL |
1851 | page_vma_mapped_walk_done(&pvmw); |
1852 | break; | |
c7ab0d2f | 1853 | } |
854e9ed0 | 1854 | |
c7ab0d2f | 1855 | if (swap_duplicate(entry) < 0) { |
785373b4 | 1856 | set_pte_at(mm, address, pvmw.pte, pteval); |
e4b82222 | 1857 | ret = false; |
c7ab0d2f KS |
1858 | page_vma_mapped_walk_done(&pvmw); |
1859 | break; | |
1860 | } | |
ca827d55 | 1861 | if (arch_unmap_one(mm, vma, address, pteval) < 0) { |
322842ea | 1862 | swap_free(entry); |
ca827d55 KA |
1863 | set_pte_at(mm, address, pvmw.pte, pteval); |
1864 | ret = false; | |
1865 | page_vma_mapped_walk_done(&pvmw); | |
1866 | break; | |
1867 | } | |
088b8aa5 | 1868 | |
e3b4b137 | 1869 | /* See folio_try_share_anon_rmap(): clear PTE first. */ |
6c287605 | 1870 | if (anon_exclusive && |
e3b4b137 | 1871 | folio_try_share_anon_rmap_pte(folio, subpage)) { |
6c287605 DH |
1872 | swap_free(entry); |
1873 | set_pte_at(mm, address, pvmw.pte, pteval); | |
1874 | ret = false; | |
1875 | page_vma_mapped_walk_done(&pvmw); | |
1876 | break; | |
1877 | } | |
c7ab0d2f KS |
1878 | if (list_empty(&mm->mmlist)) { |
1879 | spin_lock(&mmlist_lock); | |
1880 | if (list_empty(&mm->mmlist)) | |
1881 | list_add(&mm->mmlist, &init_mm.mmlist); | |
1882 | spin_unlock(&mmlist_lock); | |
1883 | } | |
854e9ed0 | 1884 | dec_mm_counter(mm, MM_ANONPAGES); |
c7ab0d2f KS |
1885 | inc_mm_counter(mm, MM_SWAPENTS); |
1886 | swp_pte = swp_entry_to_pte(entry); | |
1493a191 DH |
1887 | if (anon_exclusive) |
1888 | swp_pte = pte_swp_mkexclusive(swp_pte); | |
c7ab0d2f KS |
1889 | if (pte_soft_dirty(pteval)) |
1890 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | |
f45ec5ff PX |
1891 | if (pte_uffd_wp(pteval)) |
1892 | swp_pte = pte_swp_mkuffd_wp(swp_pte); | |
785373b4 | 1893 | set_pte_at(mm, address, pvmw.pte, swp_pte); |
0f10851e JG |
1894 | } else { |
1895 | /* | |
869f7ee6 MWO |
1896 | * This is a locked file-backed folio, |
1897 | * so it cannot be removed from the page | |
1898 | * cache and replaced by a new folio before | |
1899 | * mmu_notifier_invalidate_range_end, so no | |
1900 | * concurrent thread might update its page table | |
1901 | * to point at a new folio while a device is | |
1902 | * still using this folio. | |
0f10851e | 1903 | * |
ee65728e | 1904 | * See Documentation/mm/mmu_notifier.rst |
0f10851e | 1905 | */ |
6b27cc6c | 1906 | dec_mm_counter(mm, mm_counter_file(folio)); |
0f10851e | 1907 | } |
854e9ed0 | 1908 | discard: |
e135826b DH |
1909 | if (unlikely(folio_test_hugetlb(folio))) |
1910 | hugetlb_remove_rmap(folio); | |
1911 | else | |
ca1a0746 | 1912 | folio_remove_rmap_pte(folio, subpage, vma); |
b7435507 | 1913 | if (vma->vm_flags & VM_LOCKED) |
96f97c43 | 1914 | mlock_drain_local(); |
869f7ee6 | 1915 | folio_put(folio); |
c7ab0d2f | 1916 | } |
369ea824 | 1917 | |
ac46d4f3 | 1918 | mmu_notifier_invalidate_range_end(&range); |
369ea824 | 1919 | |
caed0f48 | 1920 | return ret; |
1da177e4 LT |
1921 | } |
1922 | ||
52629506 JK |
1923 | static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) |
1924 | { | |
222100ee | 1925 | return vma_is_temporary_stack(vma); |
52629506 JK |
1926 | } |
1927 | ||
f3ad032c | 1928 | static int folio_not_mapped(struct folio *folio) |
52629506 | 1929 | { |
2f031c6f | 1930 | return !folio_mapped(folio); |
2a52bcbc | 1931 | } |
52629506 | 1932 | |
1da177e4 | 1933 | /** |
869f7ee6 MWO |
1934 | * try_to_unmap - Try to remove all page table mappings to a folio. |
1935 | * @folio: The folio to unmap. | |
14fa31b8 | 1936 | * @flags: action and flags |
1da177e4 LT |
1937 | * |
1938 | * Tries to remove all the page table entries which are mapping this | |
869f7ee6 MWO |
1939 | * folio. It is the caller's responsibility to check if the folio is |
1940 | * still mapped if needed (use TTU_SYNC to prevent accounting races). | |
1da177e4 | 1941 | * |
869f7ee6 | 1942 | * Context: Caller must hold the folio lock. |
1da177e4 | 1943 | */ |
869f7ee6 | 1944 | void try_to_unmap(struct folio *folio, enum ttu_flags flags) |
1da177e4 | 1945 | { |
52629506 JK |
1946 | struct rmap_walk_control rwc = { |
1947 | .rmap_one = try_to_unmap_one, | |
802a3a92 | 1948 | .arg = (void *)flags, |
f3ad032c | 1949 | .done = folio_not_mapped, |
2f031c6f | 1950 | .anon_lock = folio_lock_anon_vma_read, |
52629506 | 1951 | }; |
1da177e4 | 1952 | |
a98a2f0c | 1953 | if (flags & TTU_RMAP_LOCKED) |
2f031c6f | 1954 | rmap_walk_locked(folio, &rwc); |
a98a2f0c | 1955 | else |
2f031c6f | 1956 | rmap_walk(folio, &rwc); |
a98a2f0c AP |
1957 | } |
1958 | ||
1959 | /* | |
1960 | * @arg: enum ttu_flags will be passed to this argument. | |
1961 | * | |
1962 | * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs | |
64b586d1 | 1963 | * containing migration entries. |
a98a2f0c | 1964 | */ |
2f031c6f | 1965 | static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, |
a98a2f0c AP |
1966 | unsigned long address, void *arg) |
1967 | { | |
1968 | struct mm_struct *mm = vma->vm_mm; | |
4b8554c5 | 1969 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); |
a98a2f0c AP |
1970 | pte_t pteval; |
1971 | struct page *subpage; | |
6c287605 | 1972 | bool anon_exclusive, ret = true; |
a98a2f0c AP |
1973 | struct mmu_notifier_range range; |
1974 | enum ttu_flags flags = (enum ttu_flags)(long)arg; | |
c33c7948 | 1975 | unsigned long pfn; |
935d4f0c | 1976 | unsigned long hsz = 0; |
a98a2f0c | 1977 | |
a98a2f0c AP |
1978 | /* |
1979 | * When racing against e.g. zap_pte_range() on another cpu, | |
ca1a0746 | 1980 | * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), |
a98a2f0c AP |
1981 | * try_to_migrate() may return before page_mapped() has become false, |
1982 | * if page table locking is skipped: use TTU_SYNC to wait for that. | |
1983 | */ | |
1984 | if (flags & TTU_SYNC) | |
1985 | pvmw.flags = PVMW_SYNC; | |
1986 | ||
1987 | /* | |
1988 | * unmap_page() in mm/huge_memory.c is the only user of migration with | |
1989 | * TTU_SPLIT_HUGE_PMD and it wants to freeze. | |
1990 | */ | |
1991 | if (flags & TTU_SPLIT_HUGE_PMD) | |
af28a988 | 1992 | split_huge_pmd_address(vma, address, true, folio); |
a98a2f0c AP |
1993 | |
1994 | /* | |
1995 | * For THP, we have to assume the worse case ie pmd for invalidation. | |
1996 | * For hugetlb, it could be much worse if we need to do pud | |
1997 | * invalidation in the case of pmd sharing. | |
1998 | * | |
1999 | * Note that the page can not be free in this function as call of | |
2000 | * try_to_unmap() must hold a reference on the page. | |
2001 | */ | |
2aff7a47 | 2002 | range.end = vma_address_end(&pvmw); |
7d4a8be0 | 2003 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, |
a98a2f0c | 2004 | address, range.end); |
4b8554c5 | 2005 | if (folio_test_hugetlb(folio)) { |
a98a2f0c AP |
2006 | /* |
2007 | * If sharing is possible, start and end will be adjusted | |
2008 | * accordingly. | |
2009 | */ | |
2010 | adjust_range_if_pmd_sharing_possible(vma, &range.start, | |
2011 | &range.end); | |
935d4f0c RR |
2012 | |
2013 | /* We need the huge page size for set_huge_pte_at() */ | |
2014 | hsz = huge_page_size(hstate_vma(vma)); | |
a98a2f0c AP |
2015 | } |
2016 | mmu_notifier_invalidate_range_start(&range); | |
2017 | ||
2018 | while (page_vma_mapped_walk(&pvmw)) { | |
2019 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION | |
2020 | /* PMD-mapped THP migration entry */ | |
2021 | if (!pvmw.pte) { | |
4b8554c5 MWO |
2022 | subpage = folio_page(folio, |
2023 | pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); | |
2024 | VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || | |
2025 | !folio_test_pmd_mappable(folio), folio); | |
a98a2f0c | 2026 | |
7f5abe60 DH |
2027 | if (set_pmd_migration_entry(&pvmw, subpage)) { |
2028 | ret = false; | |
2029 | page_vma_mapped_walk_done(&pvmw); | |
2030 | break; | |
2031 | } | |
a98a2f0c AP |
2032 | continue; |
2033 | } | |
2034 | #endif | |
2035 | ||
2036 | /* Unexpected PMD-mapped THP? */ | |
4b8554c5 | 2037 | VM_BUG_ON_FOLIO(!pvmw.pte, folio); |
a98a2f0c | 2038 | |
c33c7948 RR |
2039 | pfn = pte_pfn(ptep_get(pvmw.pte)); |
2040 | ||
1118234e DH |
2041 | if (folio_is_zone_device(folio)) { |
2042 | /* | |
2043 | * Our PTE is a non-present device exclusive entry and | |
2044 | * calculating the subpage as for the common case would | |
2045 | * result in an invalid pointer. | |
2046 | * | |
2047 | * Since only PAGE_SIZE pages can currently be | |
2048 | * migrated, just set it to page. This will need to be | |
2049 | * changed when hugepage migrations to device private | |
2050 | * memory are supported. | |
2051 | */ | |
2052 | VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio); | |
2053 | subpage = &folio->page; | |
2054 | } else { | |
c33c7948 | 2055 | subpage = folio_page(folio, pfn - folio_pfn(folio)); |
1118234e | 2056 | } |
a98a2f0c | 2057 | address = pvmw.address; |
6c287605 DH |
2058 | anon_exclusive = folio_test_anon(folio) && |
2059 | PageAnonExclusive(subpage); | |
a98a2f0c | 2060 | |
dfc7ab57 | 2061 | if (folio_test_hugetlb(folio)) { |
0506c31d BW |
2062 | bool anon = folio_test_anon(folio); |
2063 | ||
54205e9c BW |
2064 | /* |
2065 | * huge_pmd_unshare may unmap an entire PMD page. | |
2066 | * There is no way of knowing exactly which PMDs may | |
2067 | * be cached for this mm, so we must flush them all. | |
2068 | * start/end were already adjusted above to cover this | |
2069 | * range. | |
2070 | */ | |
2071 | flush_cache_range(vma, range.start, range.end); | |
2072 | ||
0506c31d BW |
2073 | /* |
2074 | * To call huge_pmd_unshare, i_mmap_rwsem must be | |
2075 | * held in write mode. Caller needs to explicitly | |
2076 | * do this outside rmap routines. | |
40549ba8 MK |
2077 | * |
2078 | * We also must hold hugetlb vma_lock in write mode. | |
2079 | * Lock order dictates acquiring vma_lock BEFORE | |
2080 | * i_mmap_rwsem. We can only try lock here and | |
2081 | * fail if unsuccessful. | |
0506c31d | 2082 | */ |
40549ba8 MK |
2083 | if (!anon) { |
2084 | VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); | |
2085 | if (!hugetlb_vma_trylock_write(vma)) { | |
2086 | page_vma_mapped_walk_done(&pvmw); | |
2087 | ret = false; | |
2088 | break; | |
2089 | } | |
2090 | if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { | |
2091 | hugetlb_vma_unlock_write(vma); | |
2092 | flush_tlb_range(vma, | |
2093 | range.start, range.end); | |
40549ba8 MK |
2094 | |
2095 | /* | |
2096 | * The ref count of the PMD page was | |
2097 | * dropped which is part of the way map | |
2098 | * counting is done for shared PMDs. | |
2099 | * Return 'true' here. When there is | |
2100 | * no other sharing, huge_pmd_unshare | |
2101 | * returns false and we will unmap the | |
2102 | * actual page and drop map count | |
2103 | * to zero. | |
2104 | */ | |
2105 | page_vma_mapped_walk_done(&pvmw); | |
2106 | break; | |
2107 | } | |
2108 | hugetlb_vma_unlock_write(vma); | |
a98a2f0c | 2109 | } |
5d4af619 BW |
2110 | /* Nuke the hugetlb page table entry */ |
2111 | pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); | |
54205e9c | 2112 | } else { |
c33c7948 | 2113 | flush_cache_page(vma, address, pfn); |
5d4af619 | 2114 | /* Nuke the page table entry. */ |
7e12beb8 YH |
2115 | if (should_defer_flush(mm, flags)) { |
2116 | /* | |
2117 | * We clear the PTE but do not flush so potentially | |
2118 | * a remote CPU could still be writing to the folio. | |
2119 | * If the entry was previously clean then the | |
2120 | * architecture must guarantee that a clear->dirty | |
2121 | * transition on a cached TLB entry is written through | |
2122 | * and traps if the PTE is unmapped. | |
2123 | */ | |
2124 | pteval = ptep_get_and_clear(mm, address, pvmw.pte); | |
2125 | ||
f73419bb | 2126 | set_tlb_ubc_flush_pending(mm, pteval, address); |
7e12beb8 YH |
2127 | } else { |
2128 | pteval = ptep_clear_flush(vma, address, pvmw.pte); | |
2129 | } | |
a98a2f0c AP |
2130 | } |
2131 | ||
4b8554c5 | 2132 | /* Set the dirty flag on the folio now the pte is gone. */ |
a98a2f0c | 2133 | if (pte_dirty(pteval)) |
4b8554c5 | 2134 | folio_mark_dirty(folio); |
a98a2f0c AP |
2135 | |
2136 | /* Update high watermark before we lower rss */ | |
2137 | update_hiwater_rss(mm); | |
2138 | ||
f25cbb7a | 2139 | if (folio_is_device_private(folio)) { |
4b8554c5 | 2140 | unsigned long pfn = folio_pfn(folio); |
a98a2f0c AP |
2141 | swp_entry_t entry; |
2142 | pte_t swp_pte; | |
2143 | ||
6c287605 | 2144 | if (anon_exclusive) |
e3b4b137 DH |
2145 | WARN_ON_ONCE(folio_try_share_anon_rmap_pte(folio, |
2146 | subpage)); | |
6c287605 | 2147 | |
a98a2f0c AP |
2148 | /* |
2149 | * Store the pfn of the page in a special migration | |
2150 | * pte. do_swap_page() will wait until the migration | |
2151 | * pte is removed and then restart fault handling. | |
2152 | */ | |
3d88705c AP |
2153 | entry = pte_to_swp_entry(pteval); |
2154 | if (is_writable_device_private_entry(entry)) | |
2155 | entry = make_writable_migration_entry(pfn); | |
6c287605 DH |
2156 | else if (anon_exclusive) |
2157 | entry = make_readable_exclusive_migration_entry(pfn); | |
3d88705c AP |
2158 | else |
2159 | entry = make_readable_migration_entry(pfn); | |
a98a2f0c AP |
2160 | swp_pte = swp_entry_to_pte(entry); |
2161 | ||
2162 | /* | |
2163 | * pteval maps a zone device page and is therefore | |
2164 | * a swap pte. | |
2165 | */ | |
2166 | if (pte_swp_soft_dirty(pteval)) | |
2167 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | |
2168 | if (pte_swp_uffd_wp(pteval)) | |
2169 | swp_pte = pte_swp_mkuffd_wp(swp_pte); | |
2170 | set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); | |
4cc79b33 | 2171 | trace_set_migration_pte(pvmw.address, pte_val(swp_pte), |
059ab7be | 2172 | folio_order(folio)); |
a98a2f0c AP |
2173 | /* |
2174 | * No need to invalidate here it will synchronize on | |
2175 | * against the special swap migration pte. | |
a98a2f0c | 2176 | */ |
da358d5c | 2177 | } else if (PageHWPoison(subpage)) { |
a98a2f0c | 2178 | pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); |
4b8554c5 MWO |
2179 | if (folio_test_hugetlb(folio)) { |
2180 | hugetlb_count_sub(folio_nr_pages(folio), mm); | |
935d4f0c RR |
2181 | set_huge_pte_at(mm, address, pvmw.pte, pteval, |
2182 | hsz); | |
a98a2f0c | 2183 | } else { |
a23f517b | 2184 | dec_mm_counter(mm, mm_counter(folio)); |
a98a2f0c AP |
2185 | set_pte_at(mm, address, pvmw.pte, pteval); |
2186 | } | |
2187 | ||
2188 | } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { | |
2189 | /* | |
2190 | * The guest indicated that the page content is of no | |
2191 | * interest anymore. Simply discard the pte, vmscan | |
2192 | * will take care of the rest. | |
2193 | * A future reference will then fault in a new zero | |
2194 | * page. When userfaultfd is active, we must not drop | |
2195 | * this page though, as its main user (postcopy | |
2196 | * migration) will not expect userfaults on already | |
2197 | * copied pages. | |
2198 | */ | |
a23f517b | 2199 | dec_mm_counter(mm, mm_counter(folio)); |
a98a2f0c AP |
2200 | } else { |
2201 | swp_entry_t entry; | |
2202 | pte_t swp_pte; | |
2203 | ||
2204 | if (arch_unmap_one(mm, vma, address, pteval) < 0) { | |
5d4af619 | 2205 | if (folio_test_hugetlb(folio)) |
935d4f0c RR |
2206 | set_huge_pte_at(mm, address, pvmw.pte, |
2207 | pteval, hsz); | |
5d4af619 BW |
2208 | else |
2209 | set_pte_at(mm, address, pvmw.pte, pteval); | |
a98a2f0c AP |
2210 | ret = false; |
2211 | page_vma_mapped_walk_done(&pvmw); | |
2212 | break; | |
2213 | } | |
6c287605 DH |
2214 | VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && |
2215 | !anon_exclusive, subpage); | |
088b8aa5 | 2216 | |
e3b4b137 | 2217 | /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ |
0c2ec32b DH |
2218 | if (folio_test_hugetlb(folio)) { |
2219 | if (anon_exclusive && | |
2220 | hugetlb_try_share_anon_rmap(folio)) { | |
935d4f0c RR |
2221 | set_huge_pte_at(mm, address, pvmw.pte, |
2222 | pteval, hsz); | |
0c2ec32b DH |
2223 | ret = false; |
2224 | page_vma_mapped_walk_done(&pvmw); | |
2225 | break; | |
2226 | } | |
2227 | } else if (anon_exclusive && | |
e3b4b137 | 2228 | folio_try_share_anon_rmap_pte(folio, subpage)) { |
0c2ec32b | 2229 | set_pte_at(mm, address, pvmw.pte, pteval); |
6c287605 DH |
2230 | ret = false; |
2231 | page_vma_mapped_walk_done(&pvmw); | |
2232 | break; | |
2233 | } | |
a98a2f0c AP |
2234 | |
2235 | /* | |
2236 | * Store the pfn of the page in a special migration | |
2237 | * pte. do_swap_page() will wait until the migration | |
2238 | * pte is removed and then restart fault handling. | |
2239 | */ | |
2240 | if (pte_write(pteval)) | |
2241 | entry = make_writable_migration_entry( | |
2242 | page_to_pfn(subpage)); | |
6c287605 DH |
2243 | else if (anon_exclusive) |
2244 | entry = make_readable_exclusive_migration_entry( | |
2245 | page_to_pfn(subpage)); | |
a98a2f0c AP |
2246 | else |
2247 | entry = make_readable_migration_entry( | |
2248 | page_to_pfn(subpage)); | |
2e346877 PX |
2249 | if (pte_young(pteval)) |
2250 | entry = make_migration_entry_young(entry); | |
2251 | if (pte_dirty(pteval)) | |
2252 | entry = make_migration_entry_dirty(entry); | |
a98a2f0c AP |
2253 | swp_pte = swp_entry_to_pte(entry); |
2254 | if (pte_soft_dirty(pteval)) | |
2255 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | |
2256 | if (pte_uffd_wp(pteval)) | |
2257 | swp_pte = pte_swp_mkuffd_wp(swp_pte); | |
5d4af619 | 2258 | if (folio_test_hugetlb(folio)) |
935d4f0c RR |
2259 | set_huge_pte_at(mm, address, pvmw.pte, swp_pte, |
2260 | hsz); | |
5d4af619 BW |
2261 | else |
2262 | set_pte_at(mm, address, pvmw.pte, swp_pte); | |
4cc79b33 | 2263 | trace_set_migration_pte(address, pte_val(swp_pte), |
059ab7be | 2264 | folio_order(folio)); |
a98a2f0c AP |
2265 | /* |
2266 | * No need to invalidate here it will synchronize on | |
2267 | * against the special swap migration pte. | |
2268 | */ | |
2269 | } | |
2270 | ||
e135826b DH |
2271 | if (unlikely(folio_test_hugetlb(folio))) |
2272 | hugetlb_remove_rmap(folio); | |
2273 | else | |
ca1a0746 | 2274 | folio_remove_rmap_pte(folio, subpage, vma); |
b7435507 | 2275 | if (vma->vm_flags & VM_LOCKED) |
96f97c43 | 2276 | mlock_drain_local(); |
4b8554c5 | 2277 | folio_put(folio); |
a98a2f0c AP |
2278 | } |
2279 | ||
2280 | mmu_notifier_invalidate_range_end(&range); | |
2281 | ||
2282 | return ret; | |
2283 | } | |
2284 | ||
2285 | /** | |
2286 | * try_to_migrate - try to replace all page table mappings with swap entries | |
4b8554c5 | 2287 | * @folio: the folio to replace page table entries for |
a98a2f0c AP |
2288 | * @flags: action and flags |
2289 | * | |
4b8554c5 MWO |
2290 | * Tries to remove all the page table entries which are mapping this folio and |
2291 | * replace them with special swap entries. Caller must hold the folio lock. | |
a98a2f0c | 2292 | */ |
4b8554c5 | 2293 | void try_to_migrate(struct folio *folio, enum ttu_flags flags) |
a98a2f0c AP |
2294 | { |
2295 | struct rmap_walk_control rwc = { | |
2296 | .rmap_one = try_to_migrate_one, | |
2297 | .arg = (void *)flags, | |
f3ad032c | 2298 | .done = folio_not_mapped, |
2f031c6f | 2299 | .anon_lock = folio_lock_anon_vma_read, |
a98a2f0c AP |
2300 | }; |
2301 | ||
2302 | /* | |
2303 | * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and | |
7e12beb8 | 2304 | * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags. |
a98a2f0c AP |
2305 | */ |
2306 | if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | | |
7e12beb8 | 2307 | TTU_SYNC | TTU_BATCH_FLUSH))) |
a98a2f0c AP |
2308 | return; |
2309 | ||
f25cbb7a AS |
2310 | if (folio_is_zone_device(folio) && |
2311 | (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) | |
6c855fce HD |
2312 | return; |
2313 | ||
52629506 JK |
2314 | /* |
2315 | * During exec, a temporary VMA is setup and later moved. | |
2316 | * The VMA is moved under the anon_vma lock but not the | |
2317 | * page tables leading to a race where migration cannot | |
2318 | * find the migration ptes. Rather than increasing the | |
2319 | * locking requirements of exec(), migration skips | |
2320 | * temporary VMAs until after exec() completes. | |
2321 | */ | |
4b8554c5 | 2322 | if (!folio_test_ksm(folio) && folio_test_anon(folio)) |
52629506 JK |
2323 | rwc.invalid_vma = invalid_migration_vma; |
2324 | ||
2a52bcbc | 2325 | if (flags & TTU_RMAP_LOCKED) |
2f031c6f | 2326 | rmap_walk_locked(folio, &rwc); |
2a52bcbc | 2327 | else |
2f031c6f | 2328 | rmap_walk(folio, &rwc); |
b291f000 | 2329 | } |
e9995ef9 | 2330 | |
b756a3b5 AP |
2331 | #ifdef CONFIG_DEVICE_PRIVATE |
2332 | struct make_exclusive_args { | |
2333 | struct mm_struct *mm; | |
2334 | unsigned long address; | |
2335 | void *owner; | |
2336 | bool valid; | |
2337 | }; | |
2338 | ||
2f031c6f | 2339 | static bool page_make_device_exclusive_one(struct folio *folio, |
b756a3b5 AP |
2340 | struct vm_area_struct *vma, unsigned long address, void *priv) |
2341 | { | |
2342 | struct mm_struct *mm = vma->vm_mm; | |
0d251485 | 2343 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); |
b756a3b5 AP |
2344 | struct make_exclusive_args *args = priv; |
2345 | pte_t pteval; | |
2346 | struct page *subpage; | |
2347 | bool ret = true; | |
2348 | struct mmu_notifier_range range; | |
2349 | swp_entry_t entry; | |
2350 | pte_t swp_pte; | |
c33c7948 | 2351 | pte_t ptent; |
b756a3b5 | 2352 | |
7d4a8be0 | 2353 | mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, |
b756a3b5 | 2354 | vma->vm_mm, address, min(vma->vm_end, |
0d251485 MWO |
2355 | address + folio_size(folio)), |
2356 | args->owner); | |
b756a3b5 AP |
2357 | mmu_notifier_invalidate_range_start(&range); |
2358 | ||
2359 | while (page_vma_mapped_walk(&pvmw)) { | |
2360 | /* Unexpected PMD-mapped THP? */ | |
0d251485 | 2361 | VM_BUG_ON_FOLIO(!pvmw.pte, folio); |
b756a3b5 | 2362 | |
c33c7948 RR |
2363 | ptent = ptep_get(pvmw.pte); |
2364 | if (!pte_present(ptent)) { | |
b756a3b5 AP |
2365 | ret = false; |
2366 | page_vma_mapped_walk_done(&pvmw); | |
2367 | break; | |
2368 | } | |
2369 | ||
0d251485 | 2370 | subpage = folio_page(folio, |
c33c7948 | 2371 | pte_pfn(ptent) - folio_pfn(folio)); |
b756a3b5 AP |
2372 | address = pvmw.address; |
2373 | ||
2374 | /* Nuke the page table entry. */ | |
c33c7948 | 2375 | flush_cache_page(vma, address, pte_pfn(ptent)); |
b756a3b5 AP |
2376 | pteval = ptep_clear_flush(vma, address, pvmw.pte); |
2377 | ||
0d251485 | 2378 | /* Set the dirty flag on the folio now the pte is gone. */ |
b756a3b5 | 2379 | if (pte_dirty(pteval)) |
0d251485 | 2380 | folio_mark_dirty(folio); |
b756a3b5 AP |
2381 | |
2382 | /* | |
2383 | * Check that our target page is still mapped at the expected | |
2384 | * address. | |
2385 | */ | |
2386 | if (args->mm == mm && args->address == address && | |
2387 | pte_write(pteval)) | |
2388 | args->valid = true; | |
2389 | ||
2390 | /* | |
2391 | * Store the pfn of the page in a special migration | |
2392 | * pte. do_swap_page() will wait until the migration | |
2393 | * pte is removed and then restart fault handling. | |
2394 | */ | |
2395 | if (pte_write(pteval)) | |
2396 | entry = make_writable_device_exclusive_entry( | |
2397 | page_to_pfn(subpage)); | |
2398 | else | |
2399 | entry = make_readable_device_exclusive_entry( | |
2400 | page_to_pfn(subpage)); | |
2401 | swp_pte = swp_entry_to_pte(entry); | |
2402 | if (pte_soft_dirty(pteval)) | |
2403 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | |
2404 | if (pte_uffd_wp(pteval)) | |
2405 | swp_pte = pte_swp_mkuffd_wp(swp_pte); | |
2406 | ||
2407 | set_pte_at(mm, address, pvmw.pte, swp_pte); | |
2408 | ||
2409 | /* | |
2410 | * There is a reference on the page for the swap entry which has | |
2411 | * been removed, so shouldn't take another. | |
2412 | */ | |
ca1a0746 | 2413 | folio_remove_rmap_pte(folio, subpage, vma); |
b756a3b5 AP |
2414 | } |
2415 | ||
2416 | mmu_notifier_invalidate_range_end(&range); | |
2417 | ||
2418 | return ret; | |
2419 | } | |
2420 | ||
2421 | /** | |
0d251485 MWO |
2422 | * folio_make_device_exclusive - Mark the folio exclusively owned by a device. |
2423 | * @folio: The folio to replace page table entries for. | |
2424 | * @mm: The mm_struct where the folio is expected to be mapped. | |
2425 | * @address: Address where the folio is expected to be mapped. | |
b756a3b5 AP |
2426 | * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks |
2427 | * | |
0d251485 MWO |
2428 | * Tries to remove all the page table entries which are mapping this |
2429 | * folio and replace them with special device exclusive swap entries to | |
2430 | * grant a device exclusive access to the folio. | |
b756a3b5 | 2431 | * |
0d251485 MWO |
2432 | * Context: Caller must hold the folio lock. |
2433 | * Return: false if the page is still mapped, or if it could not be unmapped | |
b756a3b5 AP |
2434 | * from the expected address. Otherwise returns true (success). |
2435 | */ | |
0d251485 MWO |
2436 | static bool folio_make_device_exclusive(struct folio *folio, |
2437 | struct mm_struct *mm, unsigned long address, void *owner) | |
b756a3b5 AP |
2438 | { |
2439 | struct make_exclusive_args args = { | |
2440 | .mm = mm, | |
2441 | .address = address, | |
2442 | .owner = owner, | |
2443 | .valid = false, | |
2444 | }; | |
2445 | struct rmap_walk_control rwc = { | |
2446 | .rmap_one = page_make_device_exclusive_one, | |
f3ad032c | 2447 | .done = folio_not_mapped, |
2f031c6f | 2448 | .anon_lock = folio_lock_anon_vma_read, |
b756a3b5 AP |
2449 | .arg = &args, |
2450 | }; | |
2451 | ||
2452 | /* | |
0d251485 MWO |
2453 | * Restrict to anonymous folios for now to avoid potential writeback |
2454 | * issues. | |
b756a3b5 | 2455 | */ |
0d251485 | 2456 | if (!folio_test_anon(folio)) |
b756a3b5 AP |
2457 | return false; |
2458 | ||
2f031c6f | 2459 | rmap_walk(folio, &rwc); |
b756a3b5 | 2460 | |
0d251485 | 2461 | return args.valid && !folio_mapcount(folio); |
b756a3b5 AP |
2462 | } |
2463 | ||
2464 | /** | |
2465 | * make_device_exclusive_range() - Mark a range for exclusive use by a device | |
dd062302 | 2466 | * @mm: mm_struct of associated target process |
b756a3b5 AP |
2467 | * @start: start of the region to mark for exclusive device access |
2468 | * @end: end address of region | |
2469 | * @pages: returns the pages which were successfully marked for exclusive access | |
2470 | * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering | |
2471 | * | |
2472 | * Returns: number of pages found in the range by GUP. A page is marked for | |
2473 | * exclusive access only if the page pointer is non-NULL. | |
2474 | * | |
2475 | * This function finds ptes mapping page(s) to the given address range, locks | |
2476 | * them and replaces mappings with special swap entries preventing userspace CPU | |
2477 | * access. On fault these entries are replaced with the original mapping after | |
2478 | * calling MMU notifiers. | |
2479 | * | |
2480 | * A driver using this to program access from a device must use a mmu notifier | |
2481 | * critical section to hold a device specific lock during programming. Once | |
2482 | * programming is complete it should drop the page lock and reference after | |
2483 | * which point CPU access to the page will revoke the exclusive access. | |
2484 | */ | |
2485 | int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, | |
2486 | unsigned long end, struct page **pages, | |
2487 | void *owner) | |
2488 | { | |
2489 | long npages = (end - start) >> PAGE_SHIFT; | |
2490 | long i; | |
2491 | ||
2492 | npages = get_user_pages_remote(mm, start, npages, | |
2493 | FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, | |
ca5e8632 | 2494 | pages, NULL); |
b756a3b5 AP |
2495 | if (npages < 0) |
2496 | return npages; | |
2497 | ||
2498 | for (i = 0; i < npages; i++, start += PAGE_SIZE) { | |
0d251485 MWO |
2499 | struct folio *folio = page_folio(pages[i]); |
2500 | if (PageTail(pages[i]) || !folio_trylock(folio)) { | |
2501 | folio_put(folio); | |
b756a3b5 AP |
2502 | pages[i] = NULL; |
2503 | continue; | |
2504 | } | |
2505 | ||
0d251485 MWO |
2506 | if (!folio_make_device_exclusive(folio, mm, start, owner)) { |
2507 | folio_unlock(folio); | |
2508 | folio_put(folio); | |
b756a3b5 AP |
2509 | pages[i] = NULL; |
2510 | } | |
2511 | } | |
2512 | ||
2513 | return npages; | |
2514 | } | |
2515 | EXPORT_SYMBOL_GPL(make_device_exclusive_range); | |
2516 | #endif | |
2517 | ||
01d8b20d | 2518 | void __put_anon_vma(struct anon_vma *anon_vma) |
76545066 | 2519 | { |
01d8b20d | 2520 | struct anon_vma *root = anon_vma->root; |
76545066 | 2521 | |
624483f3 | 2522 | anon_vma_free(anon_vma); |
01d8b20d PZ |
2523 | if (root != anon_vma && atomic_dec_and_test(&root->refcount)) |
2524 | anon_vma_free(root); | |
76545066 | 2525 | } |
76545066 | 2526 | |
2f031c6f | 2527 | static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, |
6d4675e6 | 2528 | struct rmap_walk_control *rwc) |
faecd8dd JK |
2529 | { |
2530 | struct anon_vma *anon_vma; | |
2531 | ||
0dd1c7bb | 2532 | if (rwc->anon_lock) |
6d4675e6 | 2533 | return rwc->anon_lock(folio, rwc); |
0dd1c7bb | 2534 | |
faecd8dd | 2535 | /* |
2f031c6f | 2536 | * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() |
faecd8dd | 2537 | * because that depends on page_mapped(); but not all its usages |
c1e8d7c6 | 2538 | * are holding mmap_lock. Users without mmap_lock are required to |
faecd8dd JK |
2539 | * take a reference count to prevent the anon_vma disappearing |
2540 | */ | |
e05b3453 | 2541 | anon_vma = folio_anon_vma(folio); |
faecd8dd JK |
2542 | if (!anon_vma) |
2543 | return NULL; | |
2544 | ||
6d4675e6 MK |
2545 | if (anon_vma_trylock_read(anon_vma)) |
2546 | goto out; | |
2547 | ||
2548 | if (rwc->try_lock) { | |
2549 | anon_vma = NULL; | |
2550 | rwc->contended = true; | |
2551 | goto out; | |
2552 | } | |
2553 | ||
faecd8dd | 2554 | anon_vma_lock_read(anon_vma); |
6d4675e6 | 2555 | out: |
faecd8dd JK |
2556 | return anon_vma; |
2557 | } | |
2558 | ||
e9995ef9 | 2559 | /* |
e8351ac9 JK |
2560 | * rmap_walk_anon - do something to anonymous page using the object-based |
2561 | * rmap method | |
89be82b4 | 2562 | * @folio: the folio to be handled |
e8351ac9 | 2563 | * @rwc: control variable according to each walk type |
89be82b4 | 2564 | * @locked: caller holds relevant rmap lock |
e8351ac9 | 2565 | * |
89be82b4 KS |
2566 | * Find all the mappings of a folio using the mapping pointer and the vma |
2567 | * chains contained in the anon_vma struct it points to. | |
e9995ef9 | 2568 | */ |
84fbbe21 | 2569 | static void rmap_walk_anon(struct folio *folio, |
6d4675e6 | 2570 | struct rmap_walk_control *rwc, bool locked) |
e9995ef9 HD |
2571 | { |
2572 | struct anon_vma *anon_vma; | |
a8fa41ad | 2573 | pgoff_t pgoff_start, pgoff_end; |
5beb4930 | 2574 | struct anon_vma_chain *avc; |
e9995ef9 | 2575 | |
b9773199 | 2576 | if (locked) { |
e05b3453 | 2577 | anon_vma = folio_anon_vma(folio); |
b9773199 | 2578 | /* anon_vma disappear under us? */ |
e05b3453 | 2579 | VM_BUG_ON_FOLIO(!anon_vma, folio); |
b9773199 | 2580 | } else { |
2f031c6f | 2581 | anon_vma = rmap_walk_anon_lock(folio, rwc); |
b9773199 | 2582 | } |
e9995ef9 | 2583 | if (!anon_vma) |
1df631ae | 2584 | return; |
faecd8dd | 2585 | |
2f031c6f MWO |
2586 | pgoff_start = folio_pgoff(folio); |
2587 | pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; | |
a8fa41ad KS |
2588 | anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, |
2589 | pgoff_start, pgoff_end) { | |
5beb4930 | 2590 | struct vm_area_struct *vma = avc->vma; |
2f031c6f | 2591 | unsigned long address = vma_address(&folio->page, vma); |
0dd1c7bb | 2592 | |
494334e4 | 2593 | VM_BUG_ON_VMA(address == -EFAULT, vma); |
ad12695f AA |
2594 | cond_resched(); |
2595 | ||
0dd1c7bb JK |
2596 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
2597 | continue; | |
2598 | ||
2f031c6f | 2599 | if (!rwc->rmap_one(folio, vma, address, rwc->arg)) |
e9995ef9 | 2600 | break; |
2f031c6f | 2601 | if (rwc->done && rwc->done(folio)) |
0dd1c7bb | 2602 | break; |
e9995ef9 | 2603 | } |
b9773199 KS |
2604 | |
2605 | if (!locked) | |
2606 | anon_vma_unlock_read(anon_vma); | |
e9995ef9 HD |
2607 | } |
2608 | ||
e8351ac9 JK |
2609 | /* |
2610 | * rmap_walk_file - do something to file page using the object-based rmap method | |
89be82b4 | 2611 | * @folio: the folio to be handled |
e8351ac9 | 2612 | * @rwc: control variable according to each walk type |
89be82b4 | 2613 | * @locked: caller holds relevant rmap lock |
e8351ac9 | 2614 | * |
89be82b4 | 2615 | * Find all the mappings of a folio using the mapping pointer and the vma chains |
e8351ac9 | 2616 | * contained in the address_space struct it points to. |
e8351ac9 | 2617 | */ |
84fbbe21 | 2618 | static void rmap_walk_file(struct folio *folio, |
6d4675e6 | 2619 | struct rmap_walk_control *rwc, bool locked) |
e9995ef9 | 2620 | { |
2f031c6f | 2621 | struct address_space *mapping = folio_mapping(folio); |
a8fa41ad | 2622 | pgoff_t pgoff_start, pgoff_end; |
e9995ef9 | 2623 | struct vm_area_struct *vma; |
e9995ef9 | 2624 | |
9f32624b JK |
2625 | /* |
2626 | * The page lock not only makes sure that page->mapping cannot | |
2627 | * suddenly be NULLified by truncation, it makes sure that the | |
2628 | * structure at mapping cannot be freed and reused yet, | |
c8c06efa | 2629 | * so we can safely take mapping->i_mmap_rwsem. |
9f32624b | 2630 | */ |
2f031c6f | 2631 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
9f32624b | 2632 | |
e9995ef9 | 2633 | if (!mapping) |
1df631ae | 2634 | return; |
3dec0ba0 | 2635 | |
2f031c6f MWO |
2636 | pgoff_start = folio_pgoff(folio); |
2637 | pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; | |
6d4675e6 MK |
2638 | if (!locked) { |
2639 | if (i_mmap_trylock_read(mapping)) | |
2640 | goto lookup; | |
2641 | ||
2642 | if (rwc->try_lock) { | |
2643 | rwc->contended = true; | |
2644 | return; | |
2645 | } | |
2646 | ||
b9773199 | 2647 | i_mmap_lock_read(mapping); |
6d4675e6 MK |
2648 | } |
2649 | lookup: | |
a8fa41ad KS |
2650 | vma_interval_tree_foreach(vma, &mapping->i_mmap, |
2651 | pgoff_start, pgoff_end) { | |
2f031c6f | 2652 | unsigned long address = vma_address(&folio->page, vma); |
0dd1c7bb | 2653 | |
494334e4 | 2654 | VM_BUG_ON_VMA(address == -EFAULT, vma); |
ad12695f AA |
2655 | cond_resched(); |
2656 | ||
0dd1c7bb JK |
2657 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
2658 | continue; | |
2659 | ||
2f031c6f | 2660 | if (!rwc->rmap_one(folio, vma, address, rwc->arg)) |
0dd1c7bb | 2661 | goto done; |
2f031c6f | 2662 | if (rwc->done && rwc->done(folio)) |
0dd1c7bb | 2663 | goto done; |
e9995ef9 | 2664 | } |
0dd1c7bb | 2665 | |
0dd1c7bb | 2666 | done: |
b9773199 KS |
2667 | if (!locked) |
2668 | i_mmap_unlock_read(mapping); | |
e9995ef9 HD |
2669 | } |
2670 | ||
6d4675e6 | 2671 | void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) |
e9995ef9 | 2672 | { |
2f031c6f MWO |
2673 | if (unlikely(folio_test_ksm(folio))) |
2674 | rmap_walk_ksm(folio, rwc); | |
2675 | else if (folio_test_anon(folio)) | |
2676 | rmap_walk_anon(folio, rwc, false); | |
b9773199 | 2677 | else |
2f031c6f | 2678 | rmap_walk_file(folio, rwc, false); |
b9773199 KS |
2679 | } |
2680 | ||
2681 | /* Like rmap_walk, but caller holds relevant rmap lock */ | |
6d4675e6 | 2682 | void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) |
b9773199 KS |
2683 | { |
2684 | /* no ksm support for now */ | |
2f031c6f MWO |
2685 | VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); |
2686 | if (folio_test_anon(folio)) | |
2687 | rmap_walk_anon(folio, rwc, true); | |
e9995ef9 | 2688 | else |
2f031c6f | 2689 | rmap_walk_file(folio, rwc, true); |
e9995ef9 | 2690 | } |
0fe6e20b | 2691 | |
e3390f67 | 2692 | #ifdef CONFIG_HUGETLB_PAGE |
0fe6e20b | 2693 | /* |
451b9514 | 2694 | * The following two functions are for anonymous (private mapped) hugepages. |
0fe6e20b NH |
2695 | * Unlike common anonymous pages, anonymous hugepages have no accounting code |
2696 | * and no lru code, because we handle hugepages differently from common pages. | |
2697 | */ | |
9d5fafd5 DH |
2698 | void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, |
2699 | unsigned long address, rmap_t flags) | |
0fe6e20b | 2700 | { |
a4ea1864 | 2701 | VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); |
c5c54003 DH |
2702 | VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); |
2703 | ||
132b180f | 2704 | atomic_inc(&folio->_entire_mapcount); |
c66db8c0 | 2705 | if (flags & RMAP_EXCLUSIVE) |
09c55050 | 2706 | SetPageAnonExclusive(&folio->page); |
132b180f | 2707 | VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 && |
09c55050 | 2708 | PageAnonExclusive(&folio->page), folio); |
0fe6e20b NH |
2709 | } |
2710 | ||
9d5fafd5 DH |
2711 | void hugetlb_add_new_anon_rmap(struct folio *folio, |
2712 | struct vm_area_struct *vma, unsigned long address) | |
0fe6e20b | 2713 | { |
a4ea1864 DH |
2714 | VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); |
2715 | ||
0fe6e20b | 2716 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); |
cb67f428 | 2717 | /* increment count (starts at -1) */ |
db4e5dbd MWO |
2718 | atomic_set(&folio->_entire_mapcount, 0); |
2719 | folio_clear_hugetlb_restore_reserve(folio); | |
c66db8c0 DH |
2720 | __folio_set_anon(folio, vma, address, true); |
2721 | SetPageAnonExclusive(&folio->page); | |
0fe6e20b | 2722 | } |
e3390f67 | 2723 | #endif /* CONFIG_HUGETLB_PAGE */ |