]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * High memory handling common code and variables. | |
4 | * | |
5 | * (C) 1999 Andrea Arcangeli, SuSE GmbH, [email protected] | |
6 | * Gerhard Wichert, Siemens AG, [email protected] | |
7 | * | |
8 | * | |
9 | * Redesigned the x86 32-bit VM architecture to deal with | |
10 | * 64-bit physical space. With current x86 CPUs this | |
11 | * means up to 64 Gigabytes physical RAM. | |
12 | * | |
13 | * Rewrote high memory support to move the page cache into | |
14 | * high memory. Implemented permanent (schedulable) kmaps | |
15 | * based on Linus' idea. | |
16 | * | |
17 | * Copyright (C) 1999 Ingo Molnar <[email protected]> | |
18 | */ | |
19 | ||
20 | #include <linux/mm.h> | |
b95f1b31 | 21 | #include <linux/export.h> |
1da177e4 LT |
22 | #include <linux/swap.h> |
23 | #include <linux/bio.h> | |
24 | #include <linux/pagemap.h> | |
25 | #include <linux/mempool.h> | |
1da177e4 LT |
26 | #include <linux/init.h> |
27 | #include <linux/hash.h> | |
28 | #include <linux/highmem.h> | |
eac79005 | 29 | #include <linux/kgdb.h> |
1da177e4 | 30 | #include <asm/tlbflush.h> |
186525bd | 31 | #include <linux/vmalloc.h> |
a8e23a29 | 32 | |
1da177e4 LT |
33 | /* |
34 | * Virtual_count is not a pure "count". | |
35 | * 0 means that it is not mapped, and has not been mapped | |
36 | * since a TLB flush - it is usable. | |
37 | * 1 means that there are no users, but it has been mapped | |
38 | * since the last TLB flush - so we can't use it. | |
39 | * n means that there are (n-1) current users of it. | |
40 | */ | |
41 | #ifdef CONFIG_HIGHMEM | |
260b2367 | 42 | |
15de36a4 MF |
43 | /* |
44 | * Architecture with aliasing data cache may define the following family of | |
45 | * helper functions in its asm/highmem.h to control cache color of virtual | |
46 | * addresses where physical memory pages are mapped by kmap. | |
47 | */ | |
48 | #ifndef get_pkmap_color | |
49 | ||
50 | /* | |
51 | * Determine color of virtual address where the page should be mapped. | |
52 | */ | |
53 | static inline unsigned int get_pkmap_color(struct page *page) | |
54 | { | |
55 | return 0; | |
56 | } | |
57 | #define get_pkmap_color get_pkmap_color | |
58 | ||
59 | /* | |
60 | * Get next index for mapping inside PKMAP region for page with given color. | |
61 | */ | |
62 | static inline unsigned int get_next_pkmap_nr(unsigned int color) | |
63 | { | |
64 | static unsigned int last_pkmap_nr; | |
65 | ||
66 | last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; | |
67 | return last_pkmap_nr; | |
68 | } | |
69 | ||
70 | /* | |
71 | * Determine if page index inside PKMAP region (pkmap_nr) of given color | |
72 | * has wrapped around PKMAP region end. When this happens an attempt to | |
73 | * flush all unused PKMAP slots is made. | |
74 | */ | |
75 | static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color) | |
76 | { | |
77 | return pkmap_nr == 0; | |
78 | } | |
79 | ||
80 | /* | |
81 | * Get the number of PKMAP entries of the given color. If no free slot is | |
82 | * found after checking that many entries, kmap will sleep waiting for | |
83 | * someone to call kunmap and free PKMAP slot. | |
84 | */ | |
85 | static inline int get_pkmap_entries_count(unsigned int color) | |
86 | { | |
87 | return LAST_PKMAP; | |
88 | } | |
89 | ||
90 | /* | |
91 | * Get head of a wait queue for PKMAP entries of the given color. | |
92 | * Wait queues for different mapping colors should be independent to avoid | |
93 | * unnecessary wakeups caused by freeing of slots of other colors. | |
94 | */ | |
95 | static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) | |
96 | { | |
97 | static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); | |
98 | ||
99 | return &pkmap_map_wait; | |
100 | } | |
101 | #endif | |
102 | ||
ca79b0c2 AK |
103 | atomic_long_t _totalhigh_pages __read_mostly; |
104 | EXPORT_SYMBOL(_totalhigh_pages); | |
3e4d3af5 | 105 | |
9727688d | 106 | unsigned int __nr_free_highpages(void) |
c1f60a5a | 107 | { |
33499bfe | 108 | struct zone *zone; |
c1f60a5a CL |
109 | unsigned int pages = 0; |
110 | ||
33499bfe JK |
111 | for_each_populated_zone(zone) { |
112 | if (is_highmem(zone)) | |
113 | pages += zone_page_state(zone, NR_FREE_PAGES); | |
2a1e274a | 114 | } |
c1f60a5a CL |
115 | |
116 | return pages; | |
117 | } | |
118 | ||
1da177e4 | 119 | static int pkmap_count[LAST_PKMAP]; |
1da177e4 LT |
120 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); |
121 | ||
9727688d | 122 | pte_t *pkmap_page_table; |
1da177e4 | 123 | |
3297e760 NP |
124 | /* |
125 | * Most architectures have no use for kmap_high_get(), so let's abstract | |
126 | * the disabling of IRQ out of the locking in that case to save on a | |
127 | * potential useless overhead. | |
128 | */ | |
129 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET | |
130 | #define lock_kmap() spin_lock_irq(&kmap_lock) | |
131 | #define unlock_kmap() spin_unlock_irq(&kmap_lock) | |
132 | #define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags) | |
133 | #define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags) | |
134 | #else | |
135 | #define lock_kmap() spin_lock(&kmap_lock) | |
136 | #define unlock_kmap() spin_unlock(&kmap_lock) | |
137 | #define lock_kmap_any(flags) \ | |
138 | do { spin_lock(&kmap_lock); (void)(flags); } while (0) | |
139 | #define unlock_kmap_any(flags) \ | |
140 | do { spin_unlock(&kmap_lock); (void)(flags); } while (0) | |
141 | #endif | |
142 | ||
13f876ba | 143 | struct page *__kmap_to_page(void *vaddr) |
5a178119 MG |
144 | { |
145 | unsigned long addr = (unsigned long)vaddr; | |
146 | ||
498c2280 | 147 | if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) { |
4de22c05 | 148 | int i = PKMAP_NR(addr); |
9727688d | 149 | |
5a178119 MG |
150 | return pte_page(pkmap_page_table[i]); |
151 | } | |
152 | ||
153 | return virt_to_page(addr); | |
154 | } | |
13f876ba | 155 | EXPORT_SYMBOL(__kmap_to_page); |
5a178119 | 156 | |
1da177e4 LT |
157 | static void flush_all_zero_pkmaps(void) |
158 | { | |
159 | int i; | |
5843d9a4 | 160 | int need_flush = 0; |
1da177e4 LT |
161 | |
162 | flush_cache_kmaps(); | |
163 | ||
164 | for (i = 0; i < LAST_PKMAP; i++) { | |
165 | struct page *page; | |
166 | ||
167 | /* | |
168 | * zero means we don't have anything to do, | |
169 | * >1 means that it is still in use. Only | |
170 | * a count of 1 means that it is free but | |
171 | * needs to be unmapped | |
172 | */ | |
173 | if (pkmap_count[i] != 1) | |
174 | continue; | |
175 | pkmap_count[i] = 0; | |
176 | ||
177 | /* sanity check */ | |
75babcac | 178 | BUG_ON(pte_none(pkmap_page_table[i])); |
1da177e4 LT |
179 | |
180 | /* | |
181 | * Don't need an atomic fetch-and-clear op here; | |
182 | * no-one has the page mapped, and cannot get at | |
183 | * its virtual address (and hence PTE) without first | |
184 | * getting the kmap_lock (which is held here). | |
185 | * So no dangers, even with speculative execution. | |
186 | */ | |
187 | page = pte_page(pkmap_page_table[i]); | |
eb2db439 | 188 | pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]); |
1da177e4 LT |
189 | |
190 | set_page_address(page, NULL); | |
5843d9a4 | 191 | need_flush = 1; |
1da177e4 | 192 | } |
5843d9a4 NP |
193 | if (need_flush) |
194 | flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); | |
1da177e4 LT |
195 | } |
196 | ||
13f876ba | 197 | void __kmap_flush_unused(void) |
ce6234b5 | 198 | { |
3297e760 | 199 | lock_kmap(); |
ce6234b5 | 200 | flush_all_zero_pkmaps(); |
3297e760 | 201 | unlock_kmap(); |
ce6234b5 JF |
202 | } |
203 | ||
1da177e4 LT |
204 | static inline unsigned long map_new_virtual(struct page *page) |
205 | { | |
206 | unsigned long vaddr; | |
207 | int count; | |
15de36a4 MF |
208 | unsigned int last_pkmap_nr; |
209 | unsigned int color = get_pkmap_color(page); | |
1da177e4 LT |
210 | |
211 | start: | |
15de36a4 | 212 | count = get_pkmap_entries_count(color); |
1da177e4 LT |
213 | /* Find an empty entry */ |
214 | for (;;) { | |
15de36a4 MF |
215 | last_pkmap_nr = get_next_pkmap_nr(color); |
216 | if (no_more_pkmaps(last_pkmap_nr, color)) { | |
1da177e4 | 217 | flush_all_zero_pkmaps(); |
15de36a4 | 218 | count = get_pkmap_entries_count(color); |
1da177e4 LT |
219 | } |
220 | if (!pkmap_count[last_pkmap_nr]) | |
221 | break; /* Found a usable entry */ | |
222 | if (--count) | |
223 | continue; | |
224 | ||
225 | /* | |
226 | * Sleep for somebody else to unmap their entries | |
227 | */ | |
228 | { | |
229 | DECLARE_WAITQUEUE(wait, current); | |
15de36a4 MF |
230 | wait_queue_head_t *pkmap_map_wait = |
231 | get_pkmap_wait_queue_head(color); | |
1da177e4 LT |
232 | |
233 | __set_current_state(TASK_UNINTERRUPTIBLE); | |
15de36a4 | 234 | add_wait_queue(pkmap_map_wait, &wait); |
3297e760 | 235 | unlock_kmap(); |
1da177e4 | 236 | schedule(); |
15de36a4 | 237 | remove_wait_queue(pkmap_map_wait, &wait); |
3297e760 | 238 | lock_kmap(); |
1da177e4 LT |
239 | |
240 | /* Somebody else might have mapped it while we slept */ | |
241 | if (page_address(page)) | |
242 | return (unsigned long)page_address(page); | |
243 | ||
244 | /* Re-start */ | |
245 | goto start; | |
246 | } | |
247 | } | |
248 | vaddr = PKMAP_ADDR(last_pkmap_nr); | |
249 | set_pte_at(&init_mm, vaddr, | |
250 | &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); | |
251 | ||
252 | pkmap_count[last_pkmap_nr] = 1; | |
253 | set_page_address(page, (void *)vaddr); | |
254 | ||
255 | return vaddr; | |
256 | } | |
257 | ||
77f6078a RD |
258 | /** |
259 | * kmap_high - map a highmem page into memory | |
260 | * @page: &struct page to map | |
261 | * | |
262 | * Returns the page's virtual memory address. | |
263 | * | |
264 | * We cannot call this from interrupts, as it may block. | |
265 | */ | |
920c7a5d | 266 | void *kmap_high(struct page *page) |
1da177e4 LT |
267 | { |
268 | unsigned long vaddr; | |
269 | ||
270 | /* | |
271 | * For highmem pages, we can't trust "virtual" until | |
272 | * after we have the lock. | |
1da177e4 | 273 | */ |
3297e760 | 274 | lock_kmap(); |
1da177e4 LT |
275 | vaddr = (unsigned long)page_address(page); |
276 | if (!vaddr) | |
277 | vaddr = map_new_virtual(page); | |
278 | pkmap_count[PKMAP_NR(vaddr)]++; | |
75babcac | 279 | BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); |
3297e760 | 280 | unlock_kmap(); |
9727688d | 281 | return (void *) vaddr; |
1da177e4 | 282 | } |
1da177e4 LT |
283 | EXPORT_SYMBOL(kmap_high); |
284 | ||
3297e760 NP |
285 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET |
286 | /** | |
287 | * kmap_high_get - pin a highmem page into memory | |
288 | * @page: &struct page to pin | |
289 | * | |
290 | * Returns the page's current virtual memory address, or NULL if no mapping | |
5e39df56 | 291 | * exists. If and only if a non null address is returned then a |
3297e760 NP |
292 | * matching call to kunmap_high() is necessary. |
293 | * | |
294 | * This can be called from any context. | |
295 | */ | |
296 | void *kmap_high_get(struct page *page) | |
297 | { | |
298 | unsigned long vaddr, flags; | |
299 | ||
300 | lock_kmap_any(flags); | |
301 | vaddr = (unsigned long)page_address(page); | |
302 | if (vaddr) { | |
303 | BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1); | |
304 | pkmap_count[PKMAP_NR(vaddr)]++; | |
305 | } | |
306 | unlock_kmap_any(flags); | |
9727688d | 307 | return (void *) vaddr; |
3297e760 NP |
308 | } |
309 | #endif | |
310 | ||
77f6078a | 311 | /** |
4e9dc5df | 312 | * kunmap_high - unmap a highmem page into memory |
77f6078a | 313 | * @page: &struct page to unmap |
3297e760 NP |
314 | * |
315 | * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called | |
316 | * only from user context. | |
77f6078a | 317 | */ |
920c7a5d | 318 | void kunmap_high(struct page *page) |
1da177e4 LT |
319 | { |
320 | unsigned long vaddr; | |
321 | unsigned long nr; | |
3297e760 | 322 | unsigned long flags; |
1da177e4 | 323 | int need_wakeup; |
15de36a4 MF |
324 | unsigned int color = get_pkmap_color(page); |
325 | wait_queue_head_t *pkmap_map_wait; | |
1da177e4 | 326 | |
3297e760 | 327 | lock_kmap_any(flags); |
1da177e4 | 328 | vaddr = (unsigned long)page_address(page); |
75babcac | 329 | BUG_ON(!vaddr); |
1da177e4 LT |
330 | nr = PKMAP_NR(vaddr); |
331 | ||
332 | /* | |
333 | * A count must never go down to zero | |
334 | * without a TLB flush! | |
335 | */ | |
336 | need_wakeup = 0; | |
337 | switch (--pkmap_count[nr]) { | |
338 | case 0: | |
339 | BUG(); | |
340 | case 1: | |
341 | /* | |
342 | * Avoid an unnecessary wake_up() function call. | |
343 | * The common case is pkmap_count[] == 1, but | |
344 | * no waiters. | |
345 | * The tasks queued in the wait-queue are guarded | |
346 | * by both the lock in the wait-queue-head and by | |
347 | * the kmap_lock. As the kmap_lock is held here, | |
348 | * no need for the wait-queue-head's lock. Simply | |
349 | * test if the queue is empty. | |
350 | */ | |
15de36a4 MF |
351 | pkmap_map_wait = get_pkmap_wait_queue_head(color); |
352 | need_wakeup = waitqueue_active(pkmap_map_wait); | |
1da177e4 | 353 | } |
3297e760 | 354 | unlock_kmap_any(flags); |
1da177e4 LT |
355 | |
356 | /* do wake-up, if needed, race-free outside of the spin lock */ | |
357 | if (need_wakeup) | |
15de36a4 | 358 | wake_up(pkmap_map_wait); |
1da177e4 | 359 | } |
1da177e4 | 360 | EXPORT_SYMBOL(kunmap_high); |
0060ef3b | 361 | |
0060ef3b MWO |
362 | void zero_user_segments(struct page *page, unsigned start1, unsigned end1, |
363 | unsigned start2, unsigned end2) | |
364 | { | |
365 | unsigned int i; | |
366 | ||
367 | BUG_ON(end1 > page_size(page) || end2 > page_size(page)); | |
368 | ||
184cee51 OH |
369 | if (start1 >= end1) |
370 | start1 = end1 = 0; | |
371 | if (start2 >= end2) | |
372 | start2 = end2 = 0; | |
373 | ||
0060ef3b MWO |
374 | for (i = 0; i < compound_nr(page); i++) { |
375 | void *kaddr = NULL; | |
376 | ||
0060ef3b MWO |
377 | if (start1 >= PAGE_SIZE) { |
378 | start1 -= PAGE_SIZE; | |
379 | end1 -= PAGE_SIZE; | |
380 | } else { | |
381 | unsigned this_end = min_t(unsigned, end1, PAGE_SIZE); | |
382 | ||
184cee51 | 383 | if (end1 > start1) { |
d2c20e51 | 384 | kaddr = kmap_local_page(page + i); |
0060ef3b | 385 | memset(kaddr + start1, 0, this_end - start1); |
184cee51 | 386 | } |
0060ef3b MWO |
387 | end1 -= this_end; |
388 | start1 = 0; | |
389 | } | |
390 | ||
391 | if (start2 >= PAGE_SIZE) { | |
392 | start2 -= PAGE_SIZE; | |
393 | end2 -= PAGE_SIZE; | |
394 | } else { | |
395 | unsigned this_end = min_t(unsigned, end2, PAGE_SIZE); | |
396 | ||
184cee51 OH |
397 | if (end2 > start2) { |
398 | if (!kaddr) | |
d2c20e51 | 399 | kaddr = kmap_local_page(page + i); |
0060ef3b | 400 | memset(kaddr + start2, 0, this_end - start2); |
184cee51 | 401 | } |
0060ef3b MWO |
402 | end2 -= this_end; |
403 | start2 = 0; | |
404 | } | |
405 | ||
406 | if (kaddr) { | |
d2c20e51 | 407 | kunmap_local(kaddr); |
0060ef3b MWO |
408 | flush_dcache_page(page + i); |
409 | } | |
410 | ||
411 | if (!end1 && !end2) | |
412 | break; | |
413 | } | |
414 | ||
415 | BUG_ON((start1 | start2 | end1 | end2) != 0); | |
416 | } | |
417 | EXPORT_SYMBOL(zero_user_segments); | |
298fa1ad TG |
418 | #endif /* CONFIG_HIGHMEM */ |
419 | ||
420 | #ifdef CONFIG_KMAP_LOCAL | |
421 | ||
422 | #include <asm/kmap_size.h> | |
423 | ||
389755c2 | 424 | /* |
6e799cb6 | 425 | * With DEBUG_KMAP_LOCAL the stack depth is doubled and every second |
389755c2 TG |
426 | * slot is unused which acts as a guard page |
427 | */ | |
6e799cb6 | 428 | #ifdef CONFIG_DEBUG_KMAP_LOCAL |
389755c2 TG |
429 | # define KM_INCR 2 |
430 | #else | |
431 | # define KM_INCR 1 | |
432 | #endif | |
433 | ||
298fa1ad TG |
434 | static inline int kmap_local_idx_push(void) |
435 | { | |
ea0eafea | 436 | WARN_ON_ONCE(in_hardirq() && !irqs_disabled()); |
5fbda3ec TG |
437 | current->kmap_ctrl.idx += KM_INCR; |
438 | BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX); | |
439 | return current->kmap_ctrl.idx - 1; | |
298fa1ad TG |
440 | } |
441 | ||
442 | static inline int kmap_local_idx(void) | |
443 | { | |
5fbda3ec | 444 | return current->kmap_ctrl.idx - 1; |
298fa1ad TG |
445 | } |
446 | ||
447 | static inline void kmap_local_idx_pop(void) | |
448 | { | |
5fbda3ec TG |
449 | current->kmap_ctrl.idx -= KM_INCR; |
450 | BUG_ON(current->kmap_ctrl.idx < 0); | |
298fa1ad TG |
451 | } |
452 | ||
453 | #ifndef arch_kmap_local_post_map | |
454 | # define arch_kmap_local_post_map(vaddr, pteval) do { } while (0) | |
455 | #endif | |
3c1016b5 | 456 | |
298fa1ad TG |
457 | #ifndef arch_kmap_local_pre_unmap |
458 | # define arch_kmap_local_pre_unmap(vaddr) do { } while (0) | |
459 | #endif | |
460 | ||
461 | #ifndef arch_kmap_local_post_unmap | |
462 | # define arch_kmap_local_post_unmap(vaddr) do { } while (0) | |
463 | #endif | |
464 | ||
465 | #ifndef arch_kmap_local_map_idx | |
466 | #define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx) | |
467 | #endif | |
468 | ||
469 | #ifndef arch_kmap_local_unmap_idx | |
470 | #define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx) | |
471 | #endif | |
472 | ||
473 | #ifndef arch_kmap_local_high_get | |
474 | static inline void *arch_kmap_local_high_get(struct page *page) | |
475 | { | |
476 | return NULL; | |
477 | } | |
478 | #endif | |
479 | ||
a1dce7fd TG |
480 | #ifndef arch_kmap_local_set_pte |
481 | #define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \ | |
482 | set_pte_at(mm, vaddr, ptep, ptev) | |
483 | #endif | |
484 | ||
298fa1ad | 485 | /* Unmap a local mapping which was obtained by kmap_high_get() */ |
2a656cad | 486 | static inline bool kmap_high_unmap_local(unsigned long vaddr) |
298fa1ad TG |
487 | { |
488 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET | |
2a656cad | 489 | if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { |
298fa1ad | 490 | kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); |
2a656cad TG |
491 | return true; |
492 | } | |
298fa1ad | 493 | #endif |
2a656cad | 494 | return false; |
298fa1ad TG |
495 | } |
496 | ||
497 | static inline int kmap_local_calc_idx(int idx) | |
498 | { | |
499 | return idx + KM_MAX_IDX * smp_processor_id(); | |
500 | } | |
501 | ||
502 | static pte_t *__kmap_pte; | |
503 | ||
825c43f5 | 504 | static pte_t *kmap_get_pte(unsigned long vaddr, int idx) |
298fa1ad | 505 | { |
825c43f5 AB |
506 | if (IS_ENABLED(CONFIG_KMAP_LOCAL_NON_LINEAR_PTE_ARRAY)) |
507 | /* | |
508 | * Set by the arch if __kmap_pte[-idx] does not produce | |
509 | * the correct entry. | |
510 | */ | |
511 | return virt_to_kpte(vaddr); | |
298fa1ad TG |
512 | if (!__kmap_pte) |
513 | __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); | |
825c43f5 | 514 | return &__kmap_pte[-idx]; |
298fa1ad TG |
515 | } |
516 | ||
517 | void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) | |
518 | { | |
825c43f5 | 519 | pte_t pteval, *kmap_pte; |
298fa1ad TG |
520 | unsigned long vaddr; |
521 | int idx; | |
522 | ||
f3ba3c71 TG |
523 | /* |
524 | * Disable migration so resulting virtual address is stable | |
f0953a1b | 525 | * across preemption. |
f3ba3c71 TG |
526 | */ |
527 | migrate_disable(); | |
298fa1ad TG |
528 | preempt_disable(); |
529 | idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn); | |
530 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
825c43f5 AB |
531 | kmap_pte = kmap_get_pte(vaddr, idx); |
532 | BUG_ON(!pte_none(*kmap_pte)); | |
298fa1ad | 533 | pteval = pfn_pte(pfn, prot); |
825c43f5 | 534 | arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte, pteval); |
298fa1ad | 535 | arch_kmap_local_post_map(vaddr, pteval); |
5fbda3ec | 536 | current->kmap_ctrl.pteval[kmap_local_idx()] = pteval; |
298fa1ad TG |
537 | preempt_enable(); |
538 | ||
539 | return (void *)vaddr; | |
540 | } | |
541 | EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot); | |
542 | ||
543 | void *__kmap_local_page_prot(struct page *page, pgprot_t prot) | |
544 | { | |
545 | void *kmap; | |
546 | ||
0e91a0c6 TG |
547 | /* |
548 | * To broaden the usage of the actual kmap_local() machinery always map | |
549 | * pages when debugging is enabled and the architecture has no problems | |
550 | * with alias mappings. | |
551 | */ | |
552 | if (!IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) && !PageHighMem(page)) | |
298fa1ad TG |
553 | return page_address(page); |
554 | ||
555 | /* Try kmap_high_get() if architecture has it enabled */ | |
556 | kmap = arch_kmap_local_high_get(page); | |
557 | if (kmap) | |
558 | return kmap; | |
559 | ||
560 | return __kmap_local_pfn_prot(page_to_pfn(page), prot); | |
561 | } | |
562 | EXPORT_SYMBOL(__kmap_local_page_prot); | |
563 | ||
564 | void kunmap_local_indexed(void *vaddr) | |
565 | { | |
566 | unsigned long addr = (unsigned long) vaddr & PAGE_MASK; | |
825c43f5 | 567 | pte_t *kmap_pte; |
298fa1ad TG |
568 | int idx; |
569 | ||
570 | if (addr < __fix_to_virt(FIX_KMAP_END) || | |
571 | addr > __fix_to_virt(FIX_KMAP_BEGIN)) { | |
0e91a0c6 TG |
572 | if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP)) { |
573 | /* This _should_ never happen! See above. */ | |
574 | WARN_ON_ONCE(1); | |
575 | return; | |
576 | } | |
2a656cad TG |
577 | /* |
578 | * Handle mappings which were obtained by kmap_high_get() | |
579 | * first as the virtual address of such mappings is below | |
580 | * PAGE_OFFSET. Warn for all other addresses which are in | |
581 | * the user space part of the virtual address space. | |
582 | */ | |
583 | if (!kmap_high_unmap_local(addr)) | |
584 | WARN_ON_ONCE(addr < PAGE_OFFSET); | |
298fa1ad TG |
585 | return; |
586 | } | |
587 | ||
588 | preempt_disable(); | |
589 | idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr); | |
590 | WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | |
591 | ||
825c43f5 | 592 | kmap_pte = kmap_get_pte(addr, idx); |
298fa1ad | 593 | arch_kmap_local_pre_unmap(addr); |
825c43f5 | 594 | pte_clear(&init_mm, addr, kmap_pte); |
298fa1ad | 595 | arch_kmap_local_post_unmap(addr); |
5fbda3ec | 596 | current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0); |
298fa1ad TG |
597 | kmap_local_idx_pop(); |
598 | preempt_enable(); | |
f3ba3c71 | 599 | migrate_enable(); |
298fa1ad TG |
600 | } |
601 | EXPORT_SYMBOL(kunmap_local_indexed); | |
5fbda3ec TG |
602 | |
603 | /* | |
604 | * Invoked before switch_to(). This is safe even when during or after | |
605 | * clearing the maps an interrupt which needs a kmap_local happens because | |
606 | * the task::kmap_ctrl.idx is not modified by the unmapping code so a | |
607 | * nested kmap_local will use the next unused index and restore the index | |
608 | * on unmap. The already cleared kmaps of the outgoing task are irrelevant | |
609 | * because the interrupt context does not know about them. The same applies | |
610 | * when scheduling back in for an interrupt which happens before the | |
611 | * restore is complete. | |
612 | */ | |
613 | void __kmap_local_sched_out(void) | |
614 | { | |
615 | struct task_struct *tsk = current; | |
825c43f5 | 616 | pte_t *kmap_pte; |
5fbda3ec TG |
617 | int i; |
618 | ||
619 | /* Clear kmaps */ | |
620 | for (i = 0; i < tsk->kmap_ctrl.idx; i++) { | |
621 | pte_t pteval = tsk->kmap_ctrl.pteval[i]; | |
622 | unsigned long addr; | |
623 | int idx; | |
624 | ||
625 | /* With debug all even slots are unmapped and act as guard */ | |
487cfade | 626 | if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) { |
5fbda3ec TG |
627 | WARN_ON_ONCE(!pte_none(pteval)); |
628 | continue; | |
629 | } | |
630 | if (WARN_ON_ONCE(pte_none(pteval))) | |
631 | continue; | |
632 | ||
633 | /* | |
634 | * This is a horrible hack for XTENSA to calculate the | |
635 | * coloured PTE index. Uses the PFN encoded into the pteval | |
636 | * and the map index calculation because the actual mapped | |
637 | * virtual address is not stored in task::kmap_ctrl. | |
638 | * For any sane architecture this is optimized out. | |
639 | */ | |
640 | idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); | |
641 | ||
642 | addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
825c43f5 | 643 | kmap_pte = kmap_get_pte(addr, idx); |
5fbda3ec | 644 | arch_kmap_local_pre_unmap(addr); |
825c43f5 | 645 | pte_clear(&init_mm, addr, kmap_pte); |
5fbda3ec TG |
646 | arch_kmap_local_post_unmap(addr); |
647 | } | |
648 | } | |
649 | ||
650 | void __kmap_local_sched_in(void) | |
651 | { | |
652 | struct task_struct *tsk = current; | |
825c43f5 | 653 | pte_t *kmap_pte; |
5fbda3ec TG |
654 | int i; |
655 | ||
656 | /* Restore kmaps */ | |
657 | for (i = 0; i < tsk->kmap_ctrl.idx; i++) { | |
658 | pte_t pteval = tsk->kmap_ctrl.pteval[i]; | |
659 | unsigned long addr; | |
660 | int idx; | |
661 | ||
662 | /* With debug all even slots are unmapped and act as guard */ | |
487cfade | 663 | if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) { |
5fbda3ec TG |
664 | WARN_ON_ONCE(!pte_none(pteval)); |
665 | continue; | |
666 | } | |
667 | if (WARN_ON_ONCE(pte_none(pteval))) | |
668 | continue; | |
669 | ||
670 | /* See comment in __kmap_local_sched_out() */ | |
671 | idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); | |
672 | addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
825c43f5 AB |
673 | kmap_pte = kmap_get_pte(addr, idx); |
674 | set_pte_at(&init_mm, addr, kmap_pte, pteval); | |
5fbda3ec TG |
675 | arch_kmap_local_post_map(addr, pteval); |
676 | } | |
677 | } | |
678 | ||
679 | void kmap_local_fork(struct task_struct *tsk) | |
680 | { | |
681 | if (WARN_ON_ONCE(tsk->kmap_ctrl.idx)) | |
682 | memset(&tsk->kmap_ctrl, 0, sizeof(tsk->kmap_ctrl)); | |
683 | } | |
684 | ||
298fa1ad | 685 | #endif |
1da177e4 | 686 | |
1da177e4 LT |
687 | #if defined(HASHED_PAGE_VIRTUAL) |
688 | ||
689 | #define PA_HASH_ORDER 7 | |
690 | ||
691 | /* | |
692 | * Describes one page->virtual association | |
693 | */ | |
694 | struct page_address_map { | |
695 | struct page *page; | |
696 | void *virtual; | |
697 | struct list_head list; | |
698 | }; | |
699 | ||
a354e2c8 | 700 | static struct page_address_map page_address_maps[LAST_PKMAP]; |
1da177e4 LT |
701 | |
702 | /* | |
703 | * Hash table bucket | |
704 | */ | |
705 | static struct page_address_slot { | |
706 | struct list_head lh; /* List of page_address_maps */ | |
707 | spinlock_t lock; /* Protect this bucket's list */ | |
708 | } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; | |
709 | ||
f9918794 | 710 | static struct page_address_slot *page_slot(const struct page *page) |
1da177e4 LT |
711 | { |
712 | return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; | |
713 | } | |
714 | ||
77f6078a RD |
715 | /** |
716 | * page_address - get the mapped virtual address of a page | |
717 | * @page: &struct page to get the virtual address of | |
718 | * | |
719 | * Returns the page's virtual address. | |
720 | */ | |
f9918794 | 721 | void *page_address(const struct page *page) |
1da177e4 LT |
722 | { |
723 | unsigned long flags; | |
724 | void *ret; | |
725 | struct page_address_slot *pas; | |
726 | ||
727 | if (!PageHighMem(page)) | |
728 | return lowmem_page_address(page); | |
729 | ||
730 | pas = page_slot(page); | |
731 | ret = NULL; | |
732 | spin_lock_irqsave(&pas->lock, flags); | |
733 | if (!list_empty(&pas->lh)) { | |
734 | struct page_address_map *pam; | |
735 | ||
736 | list_for_each_entry(pam, &pas->lh, list) { | |
737 | if (pam->page == page) { | |
738 | ret = pam->virtual; | |
739 | goto done; | |
740 | } | |
741 | } | |
742 | } | |
743 | done: | |
744 | spin_unlock_irqrestore(&pas->lock, flags); | |
745 | return ret; | |
746 | } | |
1da177e4 LT |
747 | EXPORT_SYMBOL(page_address); |
748 | ||
77f6078a RD |
749 | /** |
750 | * set_page_address - set a page's virtual address | |
751 | * @page: &struct page to set | |
752 | * @virtual: virtual address to use | |
753 | */ | |
1da177e4 LT |
754 | void set_page_address(struct page *page, void *virtual) |
755 | { | |
756 | unsigned long flags; | |
757 | struct page_address_slot *pas; | |
758 | struct page_address_map *pam; | |
759 | ||
760 | BUG_ON(!PageHighMem(page)); | |
761 | ||
762 | pas = page_slot(page); | |
763 | if (virtual) { /* Add */ | |
a354e2c8 | 764 | pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)]; |
1da177e4 LT |
765 | pam->page = page; |
766 | pam->virtual = virtual; | |
767 | ||
768 | spin_lock_irqsave(&pas->lock, flags); | |
769 | list_add_tail(&pam->list, &pas->lh); | |
770 | spin_unlock_irqrestore(&pas->lock, flags); | |
771 | } else { /* Remove */ | |
772 | spin_lock_irqsave(&pas->lock, flags); | |
773 | list_for_each_entry(pam, &pas->lh, list) { | |
774 | if (pam->page == page) { | |
775 | list_del(&pam->list); | |
776 | spin_unlock_irqrestore(&pas->lock, flags); | |
1da177e4 LT |
777 | goto done; |
778 | } | |
779 | } | |
780 | spin_unlock_irqrestore(&pas->lock, flags); | |
781 | } | |
782 | done: | |
783 | return; | |
784 | } | |
785 | ||
1da177e4 LT |
786 | void __init page_address_init(void) |
787 | { | |
788 | int i; | |
789 | ||
1da177e4 LT |
790 | for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) { |
791 | INIT_LIST_HEAD(&page_address_htable[i].lh); | |
792 | spin_lock_init(&page_address_htable[i].lock); | |
793 | } | |
1da177e4 LT |
794 | } |
795 | ||
955cc774 | 796 | #endif /* defined(HASHED_PAGE_VIRTUAL) */ |