]>
Commit | Line | Data |
---|---|---|
196d9d8b PZ |
1 | #include <linux/gfp.h> |
2 | #include <linux/highmem.h> | |
3 | #include <linux/kernel.h> | |
4 | #include <linux/mmdebug.h> | |
5 | #include <linux/mm_types.h> | |
36090def | 6 | #include <linux/mm_inline.h> |
196d9d8b PZ |
7 | #include <linux/pagemap.h> |
8 | #include <linux/rcupdate.h> | |
9 | #include <linux/smp.h> | |
10 | #include <linux/swap.h> | |
5df397de | 11 | #include <linux/rmap.h> |
196d9d8b PZ |
12 | |
13 | #include <asm/pgalloc.h> | |
14 | #include <asm/tlb.h> | |
15 | ||
580a586c | 16 | #ifndef CONFIG_MMU_GATHER_NO_GATHER |
952a31c9 | 17 | |
196d9d8b PZ |
18 | static bool tlb_next_batch(struct mmu_gather *tlb) |
19 | { | |
20 | struct mmu_gather_batch *batch; | |
21 | ||
c4745482 LT |
22 | /* Limit batching if we have delayed rmaps pending */ |
23 | if (tlb->delayed_rmap && tlb->active != &tlb->local) | |
5df397de LT |
24 | return false; |
25 | ||
196d9d8b PZ |
26 | batch = tlb->active; |
27 | if (batch->next) { | |
28 | tlb->active = batch->next; | |
29 | return true; | |
30 | } | |
31 | ||
32 | if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) | |
33 | return false; | |
34 | ||
dcc1be11 | 35 | batch = (void *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); |
196d9d8b PZ |
36 | if (!batch) |
37 | return false; | |
38 | ||
39 | tlb->batch_count++; | |
40 | batch->next = NULL; | |
41 | batch->nr = 0; | |
42 | batch->max = MAX_GATHER_BATCH; | |
43 | ||
44 | tlb->active->next = batch; | |
45 | tlb->active = batch; | |
46 | ||
47 | return true; | |
48 | } | |
49 | ||
5df397de | 50 | #ifdef CONFIG_SMP |
c4745482 LT |
51 | static void tlb_flush_rmap_batch(struct mmu_gather_batch *batch, struct vm_area_struct *vma) |
52 | { | |
d7f861b9 DH |
53 | struct encoded_page **pages = batch->encoded_pages; |
54 | ||
c4745482 | 55 | for (int i = 0; i < batch->nr; i++) { |
d7f861b9 | 56 | struct encoded_page *enc = pages[i]; |
c4745482 | 57 | |
da510964 | 58 | if (encoded_page_flags(enc) & ENCODED_PAGE_BIT_DELAY_RMAP) { |
c4745482 | 59 | struct page *page = encoded_page_ptr(enc); |
d7f861b9 DH |
60 | unsigned int nr_pages = 1; |
61 | ||
62 | if (unlikely(encoded_page_flags(enc) & | |
63 | ENCODED_PAGE_BIT_NR_PAGES_NEXT)) | |
64 | nr_pages = encoded_nr_pages(pages[++i]); | |
65 | ||
66 | folio_remove_rmap_ptes(page_folio(page), page, nr_pages, | |
67 | vma); | |
c4745482 LT |
68 | } |
69 | } | |
70 | } | |
71 | ||
5df397de LT |
72 | /** |
73 | * tlb_flush_rmaps - do pending rmap removals after we have flushed the TLB | |
74 | * @tlb: the current mmu_gather | |
19134bc2 | 75 | * @vma: The memory area from which the pages are being removed. |
5df397de LT |
76 | * |
77 | * Note that because of how tlb_next_batch() above works, we will | |
c4745482 LT |
78 | * never start multiple new batches with pending delayed rmaps, so |
79 | * we only need to walk through the current active batch and the | |
80 | * original local one. | |
5df397de LT |
81 | */ |
82 | void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) | |
83 | { | |
f036c818 AG |
84 | if (!tlb->delayed_rmap) |
85 | return; | |
86 | ||
c4745482 LT |
87 | tlb_flush_rmap_batch(&tlb->local, vma); |
88 | if (tlb->active != &tlb->local) | |
89 | tlb_flush_rmap_batch(tlb->active, vma); | |
5df397de LT |
90 | tlb->delayed_rmap = 0; |
91 | } | |
92 | #endif | |
93 | ||
e61abd44 DH |
94 | /* |
95 | * We might end up freeing a lot of pages. Reschedule on a regular | |
96 | * basis to avoid soft lockups in configurations without full | |
97 | * preemption enabled. The magic number of 512 folios seems to work. | |
98 | */ | |
99 | #define MAX_NR_FOLIOS_PER_FREE 512 | |
952a31c9 | 100 | |
e61abd44 DH |
101 | static void __tlb_batch_free_encoded_pages(struct mmu_gather_batch *batch) |
102 | { | |
103 | struct encoded_page **pages = batch->encoded_pages; | |
104 | unsigned int nr, nr_pages; | |
b191c9bc | 105 | |
e61abd44 DH |
106 | while (batch->nr) { |
107 | if (!page_poisoning_enabled_static() && !want_init_on_free()) { | |
108 | nr = min(MAX_NR_FOLIOS_PER_FREE, batch->nr); | |
b191c9bc | 109 | |
d7f861b9 DH |
110 | /* |
111 | * Make sure we cover page + nr_pages, and don't leave | |
112 | * nr_pages behind when capping the number of entries. | |
113 | */ | |
114 | if (unlikely(encoded_page_flags(pages[nr - 1]) & | |
115 | ENCODED_PAGE_BIT_NR_PAGES_NEXT)) | |
116 | nr++; | |
e61abd44 DH |
117 | } else { |
118 | /* | |
119 | * With page poisoning and init_on_free, the time it | |
120 | * takes to free memory grows proportionally with the | |
121 | * actual memory size. Therefore, limit based on the | |
122 | * actual memory size and not the number of involved | |
123 | * folios. | |
124 | */ | |
125 | for (nr = 0, nr_pages = 0; | |
126 | nr < batch->nr && nr_pages < MAX_NR_FOLIOS_PER_FREE; | |
127 | nr++) { | |
128 | if (unlikely(encoded_page_flags(pages[nr]) & | |
129 | ENCODED_PAGE_BIT_NR_PAGES_NEXT)) | |
130 | nr_pages += encoded_nr_pages(pages[++nr]); | |
131 | else | |
132 | nr_pages++; | |
133 | } | |
134 | } | |
d7f861b9 | 135 | |
e61abd44 DH |
136 | free_pages_and_swap_cache(pages, nr); |
137 | pages += nr; | |
138 | batch->nr -= nr; | |
b191c9bc | 139 | |
e61abd44 | 140 | cond_resched(); |
952a31c9 | 141 | } |
e61abd44 DH |
142 | } |
143 | ||
144 | static void tlb_batch_pages_flush(struct mmu_gather *tlb) | |
145 | { | |
146 | struct mmu_gather_batch *batch; | |
147 | ||
148 | for (batch = &tlb->local; batch && batch->nr; batch = batch->next) | |
149 | __tlb_batch_free_encoded_pages(batch); | |
952a31c9 MS |
150 | tlb->active = &tlb->local; |
151 | } | |
152 | ||
153 | static void tlb_batch_list_free(struct mmu_gather *tlb) | |
154 | { | |
155 | struct mmu_gather_batch *batch, *next; | |
156 | ||
157 | for (batch = tlb->local.next; batch; batch = next) { | |
158 | next = batch->next; | |
159 | free_pages((unsigned long)batch, 0); | |
160 | } | |
161 | tlb->local.next = NULL; | |
162 | } | |
163 | ||
d7f861b9 DH |
164 | static bool __tlb_remove_folio_pages_size(struct mmu_gather *tlb, |
165 | struct page *page, unsigned int nr_pages, bool delay_rmap, | |
166 | int page_size) | |
952a31c9 | 167 | { |
da510964 | 168 | int flags = delay_rmap ? ENCODED_PAGE_BIT_DELAY_RMAP : 0; |
952a31c9 MS |
169 | struct mmu_gather_batch *batch; |
170 | ||
171 | VM_BUG_ON(!tlb->end); | |
172 | ||
3af4bd03 | 173 | #ifdef CONFIG_MMU_GATHER_PAGE_SIZE |
952a31c9 | 174 | VM_WARN_ON(tlb->page_size != page_size); |
d7f861b9 DH |
175 | VM_WARN_ON_ONCE(nr_pages != 1 && page_size != PAGE_SIZE); |
176 | VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1)); | |
952a31c9 MS |
177 | #endif |
178 | ||
179 | batch = tlb->active; | |
180 | /* | |
181 | * Add the page and check if we are full. If so | |
182 | * force a flush. | |
183 | */ | |
d7f861b9 DH |
184 | if (likely(nr_pages == 1)) { |
185 | batch->encoded_pages[batch->nr++] = encode_page(page, flags); | |
186 | } else { | |
187 | flags |= ENCODED_PAGE_BIT_NR_PAGES_NEXT; | |
188 | batch->encoded_pages[batch->nr++] = encode_page(page, flags); | |
189 | batch->encoded_pages[batch->nr++] = encode_nr_pages(nr_pages); | |
190 | } | |
191 | /* | |
192 | * Make sure that we can always add another "page" + "nr_pages", | |
193 | * requiring two entries instead of only a single one. | |
194 | */ | |
195 | if (batch->nr >= batch->max - 1) { | |
952a31c9 MS |
196 | if (!tlb_next_batch(tlb)) |
197 | return true; | |
198 | batch = tlb->active; | |
199 | } | |
d7f861b9 | 200 | VM_BUG_ON_PAGE(batch->nr > batch->max - 1, page); |
952a31c9 MS |
201 | |
202 | return false; | |
203 | } | |
204 | ||
d7f861b9 DH |
205 | bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page, |
206 | unsigned int nr_pages, bool delay_rmap) | |
207 | { | |
208 | return __tlb_remove_folio_pages_size(tlb, page, nr_pages, delay_rmap, | |
209 | PAGE_SIZE); | |
210 | } | |
211 | ||
212 | bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, | |
213 | bool delay_rmap, int page_size) | |
214 | { | |
215 | return __tlb_remove_folio_pages_size(tlb, page, 1, delay_rmap, page_size); | |
216 | } | |
217 | ||
580a586c | 218 | #endif /* MMU_GATHER_NO_GATHER */ |
952a31c9 | 219 | |
0d6e24d4 | 220 | #ifdef CONFIG_MMU_GATHER_TABLE_FREE |
196d9d8b | 221 | |
0d6e24d4 PZ |
222 | static void __tlb_remove_table_free(struct mmu_table_batch *batch) |
223 | { | |
224 | int i; | |
225 | ||
226 | for (i = 0; i < batch->nr; i++) | |
227 | __tlb_remove_table(batch->tables[i]); | |
228 | ||
229 | free_page((unsigned long)batch); | |
230 | } | |
231 | ||
232 | #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE | |
196d9d8b PZ |
233 | |
234 | /* | |
0d6e24d4 PZ |
235 | * Semi RCU freeing of the page directories. |
236 | * | |
237 | * This is needed by some architectures to implement software pagetable walkers. | |
238 | * | |
239 | * gup_fast() and other software pagetable walkers do a lockless page-table | |
240 | * walk and therefore needs some synchronization with the freeing of the page | |
241 | * directories. The chosen means to accomplish that is by disabling IRQs over | |
242 | * the walk. | |
243 | * | |
244 | * Architectures that use IPIs to flush TLBs will then automagically DTRT, | |
245 | * since we unlink the page, flush TLBs, free the page. Since the disabling of | |
246 | * IRQs delays the completion of the TLB flush we can never observe an already | |
247 | * freed page. | |
248 | * | |
249 | * Architectures that do not have this (PPC) need to delay the freeing by some | |
250 | * other means, this is that means. | |
251 | * | |
252 | * What we do is batch the freed directory pages (tables) and RCU free them. | |
253 | * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling | |
254 | * holds off grace periods. | |
255 | * | |
256 | * However, in order to batch these pages we need to allocate storage, this | |
257 | * allocation is deep inside the MM code and can thus easily fail on memory | |
258 | * pressure. To guarantee progress we fall back to single table freeing, see | |
259 | * the implementation of tlb_remove_table_one(). | |
260 | * | |
196d9d8b | 261 | */ |
196d9d8b PZ |
262 | |
263 | static void tlb_remove_table_smp_sync(void *arg) | |
264 | { | |
265 | /* Simply deliver the interrupt */ | |
266 | } | |
267 | ||
2ba99c5e | 268 | void tlb_remove_table_sync_one(void) |
196d9d8b PZ |
269 | { |
270 | /* | |
271 | * This isn't an RCU grace period and hence the page-tables cannot be | |
272 | * assumed to be actually RCU-freed. | |
273 | * | |
274 | * It is however sufficient for software page-table walkers that rely on | |
0d6e24d4 | 275 | * IRQ disabling. |
196d9d8b PZ |
276 | */ |
277 | smp_call_function(tlb_remove_table_smp_sync, NULL, 1); | |
196d9d8b PZ |
278 | } |
279 | ||
280 | static void tlb_remove_table_rcu(struct rcu_head *head) | |
281 | { | |
0d6e24d4 PZ |
282 | __tlb_remove_table_free(container_of(head, struct mmu_table_batch, rcu)); |
283 | } | |
196d9d8b | 284 | |
0d6e24d4 PZ |
285 | static void tlb_remove_table_free(struct mmu_table_batch *batch) |
286 | { | |
287 | call_rcu(&batch->rcu, tlb_remove_table_rcu); | |
288 | } | |
196d9d8b | 289 | |
0d6e24d4 | 290 | #else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */ |
196d9d8b | 291 | |
0d6e24d4 PZ |
292 | static void tlb_remove_table_free(struct mmu_table_batch *batch) |
293 | { | |
294 | __tlb_remove_table_free(batch); | |
295 | } | |
296 | ||
297 | #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */ | |
298 | ||
299 | /* | |
300 | * If we want tlb_remove_table() to imply TLB invalidates. | |
301 | */ | |
302 | static inline void tlb_table_invalidate(struct mmu_gather *tlb) | |
303 | { | |
304 | if (tlb_needs_table_invalidate()) { | |
305 | /* | |
306 | * Invalidate page-table caches used by hardware walkers. Then | |
307 | * we still need to RCU-sched wait while freeing the pages | |
308 | * because software walkers can still be in-flight. | |
309 | */ | |
310 | tlb_flush_mmu_tlbonly(tlb); | |
311 | } | |
312 | } | |
313 | ||
314 | static void tlb_remove_table_one(void *table) | |
315 | { | |
316 | tlb_remove_table_sync_one(); | |
317 | __tlb_remove_table(table); | |
196d9d8b PZ |
318 | } |
319 | ||
0a8caf21 | 320 | static void tlb_table_flush(struct mmu_gather *tlb) |
196d9d8b PZ |
321 | { |
322 | struct mmu_table_batch **batch = &tlb->batch; | |
323 | ||
324 | if (*batch) { | |
325 | tlb_table_invalidate(tlb); | |
0d6e24d4 | 326 | tlb_remove_table_free(*batch); |
196d9d8b PZ |
327 | *batch = NULL; |
328 | } | |
329 | } | |
330 | ||
331 | void tlb_remove_table(struct mmu_gather *tlb, void *table) | |
332 | { | |
333 | struct mmu_table_batch **batch = &tlb->batch; | |
334 | ||
335 | if (*batch == NULL) { | |
336 | *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); | |
337 | if (*batch == NULL) { | |
338 | tlb_table_invalidate(tlb); | |
339 | tlb_remove_table_one(table); | |
340 | return; | |
341 | } | |
342 | (*batch)->nr = 0; | |
343 | } | |
344 | ||
345 | (*batch)->tables[(*batch)->nr++] = table; | |
346 | if ((*batch)->nr == MAX_TABLE_BATCH) | |
347 | tlb_table_flush(tlb); | |
348 | } | |
349 | ||
0d6e24d4 PZ |
350 | static inline void tlb_table_init(struct mmu_gather *tlb) |
351 | { | |
352 | tlb->batch = NULL; | |
353 | } | |
354 | ||
355 | #else /* !CONFIG_MMU_GATHER_TABLE_FREE */ | |
356 | ||
357 | static inline void tlb_table_flush(struct mmu_gather *tlb) { } | |
358 | static inline void tlb_table_init(struct mmu_gather *tlb) { } | |
359 | ||
360 | #endif /* CONFIG_MMU_GATHER_TABLE_FREE */ | |
196d9d8b | 361 | |
0a8caf21 PZ |
362 | static void tlb_flush_mmu_free(struct mmu_gather *tlb) |
363 | { | |
0a8caf21 | 364 | tlb_table_flush(tlb); |
580a586c | 365 | #ifndef CONFIG_MMU_GATHER_NO_GATHER |
0a8caf21 PZ |
366 | tlb_batch_pages_flush(tlb); |
367 | #endif | |
368 | } | |
369 | ||
370 | void tlb_flush_mmu(struct mmu_gather *tlb) | |
371 | { | |
372 | tlb_flush_mmu_tlbonly(tlb); | |
373 | tlb_flush_mmu_free(tlb); | |
374 | } | |
375 | ||
d8b45053 | 376 | static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, |
a72afd87 | 377 | bool fullmm) |
196d9d8b | 378 | { |
1808d65b | 379 | tlb->mm = mm; |
a72afd87 | 380 | tlb->fullmm = fullmm; |
1808d65b | 381 | |
580a586c | 382 | #ifndef CONFIG_MMU_GATHER_NO_GATHER |
1808d65b PZ |
383 | tlb->need_flush_all = 0; |
384 | tlb->local.next = NULL; | |
385 | tlb->local.nr = 0; | |
386 | tlb->local.max = ARRAY_SIZE(tlb->__pages); | |
387 | tlb->active = &tlb->local; | |
388 | tlb->batch_count = 0; | |
389 | #endif | |
5df397de | 390 | tlb->delayed_rmap = 0; |
1808d65b | 391 | |
0d6e24d4 | 392 | tlb_table_init(tlb); |
3af4bd03 | 393 | #ifdef CONFIG_MMU_GATHER_PAGE_SIZE |
1808d65b PZ |
394 | tlb->page_size = 0; |
395 | #endif | |
396 | ||
397 | __tlb_reset_range(tlb); | |
196d9d8b PZ |
398 | inc_tlb_flush_pending(tlb->mm); |
399 | } | |
400 | ||
845be1cd RD |
401 | /** |
402 | * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down | |
403 | * @tlb: the mmu_gather structure to initialize | |
404 | * @mm: the mm_struct of the target address space | |
405 | * | |
406 | * Called to initialize an (on-stack) mmu_gather structure for page-table | |
407 | * tear-down from @mm. | |
408 | */ | |
a72afd87 | 409 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm) |
d8b45053 | 410 | { |
a72afd87 | 411 | __tlb_gather_mmu(tlb, mm, false); |
d8b45053 WD |
412 | } |
413 | ||
845be1cd RD |
414 | /** |
415 | * tlb_gather_mmu_fullmm - initialize an mmu_gather structure for page-table tear-down | |
416 | * @tlb: the mmu_gather structure to initialize | |
417 | * @mm: the mm_struct of the target address space | |
418 | * | |
419 | * In this case, @mm is without users and we're going to destroy the | |
420 | * full address space (exit/execve). | |
421 | * | |
422 | * Called to initialize an (on-stack) mmu_gather structure for page-table | |
423 | * tear-down from @mm. | |
424 | */ | |
d8b45053 WD |
425 | void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm) |
426 | { | |
a72afd87 | 427 | __tlb_gather_mmu(tlb, mm, true); |
d8b45053 WD |
428 | } |
429 | ||
1808d65b PZ |
430 | /** |
431 | * tlb_finish_mmu - finish an mmu_gather structure | |
432 | * @tlb: the mmu_gather structure to finish | |
1808d65b PZ |
433 | * |
434 | * Called at the end of the shootdown operation to free up any resources that | |
435 | * were required. | |
436 | */ | |
ae8eba8b | 437 | void tlb_finish_mmu(struct mmu_gather *tlb) |
196d9d8b PZ |
438 | { |
439 | /* | |
440 | * If there are parallel threads are doing PTE changes on same range | |
c1e8d7c6 | 441 | * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB |
7a30df49 YS |
442 | * flush by batching, one thread may end up seeing inconsistent PTEs |
443 | * and result in having stale TLB entries. So flush TLB forcefully | |
444 | * if we detect parallel PTE batching threads. | |
445 | * | |
446 | * However, some syscalls, e.g. munmap(), may free page tables, this | |
447 | * needs force flush everything in the given range. Otherwise this | |
448 | * may result in having stale TLB entries for some architectures, | |
449 | * e.g. aarch64, that could specify flush what level TLB. | |
196d9d8b | 450 | */ |
1808d65b | 451 | if (mm_tlb_flush_nested(tlb->mm)) { |
7a30df49 YS |
452 | /* |
453 | * The aarch64 yields better performance with fullmm by | |
454 | * avoiding multiple CPUs spamming TLBI messages at the | |
455 | * same time. | |
456 | * | |
457 | * On x86 non-fullmm doesn't yield significant difference | |
458 | * against fullmm. | |
459 | */ | |
460 | tlb->fullmm = 1; | |
1808d65b | 461 | __tlb_reset_range(tlb); |
7a30df49 | 462 | tlb->freed_tables = 1; |
1808d65b | 463 | } |
196d9d8b | 464 | |
1808d65b PZ |
465 | tlb_flush_mmu(tlb); |
466 | ||
580a586c | 467 | #ifndef CONFIG_MMU_GATHER_NO_GATHER |
1808d65b PZ |
468 | tlb_batch_list_free(tlb); |
469 | #endif | |
196d9d8b PZ |
470 | dec_tlb_flush_pending(tlb->mm); |
471 | } |