]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * High memory handling common code and variables. | |
4 | * | |
5 | * (C) 1999 Andrea Arcangeli, SuSE GmbH, [email protected] | |
6 | * Gerhard Wichert, Siemens AG, [email protected] | |
7 | * | |
8 | * | |
9 | * Redesigned the x86 32-bit VM architecture to deal with | |
10 | * 64-bit physical space. With current x86 CPUs this | |
11 | * means up to 64 Gigabytes physical RAM. | |
12 | * | |
13 | * Rewrote high memory support to move the page cache into | |
14 | * high memory. Implemented permanent (schedulable) kmaps | |
15 | * based on Linus' idea. | |
16 | * | |
17 | * Copyright (C) 1999 Ingo Molnar <[email protected]> | |
18 | */ | |
19 | ||
20 | #include <linux/mm.h> | |
b95f1b31 | 21 | #include <linux/export.h> |
1da177e4 LT |
22 | #include <linux/swap.h> |
23 | #include <linux/bio.h> | |
24 | #include <linux/pagemap.h> | |
25 | #include <linux/mempool.h> | |
26 | #include <linux/blkdev.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/hash.h> | |
29 | #include <linux/highmem.h> | |
eac79005 | 30 | #include <linux/kgdb.h> |
1da177e4 LT |
31 | #include <asm/tlbflush.h> |
32 | ||
a8e23a29 PZ |
33 | |
34 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) | |
35 | DEFINE_PER_CPU(int, __kmap_atomic_idx); | |
36 | #endif | |
37 | ||
1da177e4 LT |
38 | /* |
39 | * Virtual_count is not a pure "count". | |
40 | * 0 means that it is not mapped, and has not been mapped | |
41 | * since a TLB flush - it is usable. | |
42 | * 1 means that there are no users, but it has been mapped | |
43 | * since the last TLB flush - so we can't use it. | |
44 | * n means that there are (n-1) current users of it. | |
45 | */ | |
46 | #ifdef CONFIG_HIGHMEM | |
260b2367 | 47 | |
15de36a4 MF |
48 | /* |
49 | * Architecture with aliasing data cache may define the following family of | |
50 | * helper functions in its asm/highmem.h to control cache color of virtual | |
51 | * addresses where physical memory pages are mapped by kmap. | |
52 | */ | |
53 | #ifndef get_pkmap_color | |
54 | ||
55 | /* | |
56 | * Determine color of virtual address where the page should be mapped. | |
57 | */ | |
58 | static inline unsigned int get_pkmap_color(struct page *page) | |
59 | { | |
60 | return 0; | |
61 | } | |
62 | #define get_pkmap_color get_pkmap_color | |
63 | ||
64 | /* | |
65 | * Get next index for mapping inside PKMAP region for page with given color. | |
66 | */ | |
67 | static inline unsigned int get_next_pkmap_nr(unsigned int color) | |
68 | { | |
69 | static unsigned int last_pkmap_nr; | |
70 | ||
71 | last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; | |
72 | return last_pkmap_nr; | |
73 | } | |
74 | ||
75 | /* | |
76 | * Determine if page index inside PKMAP region (pkmap_nr) of given color | |
77 | * has wrapped around PKMAP region end. When this happens an attempt to | |
78 | * flush all unused PKMAP slots is made. | |
79 | */ | |
80 | static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color) | |
81 | { | |
82 | return pkmap_nr == 0; | |
83 | } | |
84 | ||
85 | /* | |
86 | * Get the number of PKMAP entries of the given color. If no free slot is | |
87 | * found after checking that many entries, kmap will sleep waiting for | |
88 | * someone to call kunmap and free PKMAP slot. | |
89 | */ | |
90 | static inline int get_pkmap_entries_count(unsigned int color) | |
91 | { | |
92 | return LAST_PKMAP; | |
93 | } | |
94 | ||
95 | /* | |
96 | * Get head of a wait queue for PKMAP entries of the given color. | |
97 | * Wait queues for different mapping colors should be independent to avoid | |
98 | * unnecessary wakeups caused by freeing of slots of other colors. | |
99 | */ | |
100 | static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) | |
101 | { | |
102 | static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); | |
103 | ||
104 | return &pkmap_map_wait; | |
105 | } | |
106 | #endif | |
107 | ||
c1f60a5a | 108 | unsigned long totalhigh_pages __read_mostly; |
db7a94d6 | 109 | EXPORT_SYMBOL(totalhigh_pages); |
c1f60a5a | 110 | |
3e4d3af5 | 111 | |
3e4d3af5 PZ |
112 | EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); |
113 | ||
c1f60a5a CL |
114 | unsigned int nr_free_highpages (void) |
115 | { | |
33499bfe | 116 | struct zone *zone; |
c1f60a5a CL |
117 | unsigned int pages = 0; |
118 | ||
33499bfe JK |
119 | for_each_populated_zone(zone) { |
120 | if (is_highmem(zone)) | |
121 | pages += zone_page_state(zone, NR_FREE_PAGES); | |
2a1e274a | 122 | } |
c1f60a5a CL |
123 | |
124 | return pages; | |
125 | } | |
126 | ||
1da177e4 | 127 | static int pkmap_count[LAST_PKMAP]; |
1da177e4 LT |
128 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); |
129 | ||
130 | pte_t * pkmap_page_table; | |
131 | ||
3297e760 NP |
132 | /* |
133 | * Most architectures have no use for kmap_high_get(), so let's abstract | |
134 | * the disabling of IRQ out of the locking in that case to save on a | |
135 | * potential useless overhead. | |
136 | */ | |
137 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET | |
138 | #define lock_kmap() spin_lock_irq(&kmap_lock) | |
139 | #define unlock_kmap() spin_unlock_irq(&kmap_lock) | |
140 | #define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags) | |
141 | #define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags) | |
142 | #else | |
143 | #define lock_kmap() spin_lock(&kmap_lock) | |
144 | #define unlock_kmap() spin_unlock(&kmap_lock) | |
145 | #define lock_kmap_any(flags) \ | |
146 | do { spin_lock(&kmap_lock); (void)(flags); } while (0) | |
147 | #define unlock_kmap_any(flags) \ | |
148 | do { spin_unlock(&kmap_lock); (void)(flags); } while (0) | |
149 | #endif | |
150 | ||
5a178119 MG |
151 | struct page *kmap_to_page(void *vaddr) |
152 | { | |
153 | unsigned long addr = (unsigned long)vaddr; | |
154 | ||
498c2280 | 155 | if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) { |
4de22c05 | 156 | int i = PKMAP_NR(addr); |
5a178119 MG |
157 | return pte_page(pkmap_page_table[i]); |
158 | } | |
159 | ||
160 | return virt_to_page(addr); | |
161 | } | |
f0263d2d | 162 | EXPORT_SYMBOL(kmap_to_page); |
5a178119 | 163 | |
1da177e4 LT |
164 | static void flush_all_zero_pkmaps(void) |
165 | { | |
166 | int i; | |
5843d9a4 | 167 | int need_flush = 0; |
1da177e4 LT |
168 | |
169 | flush_cache_kmaps(); | |
170 | ||
171 | for (i = 0; i < LAST_PKMAP; i++) { | |
172 | struct page *page; | |
173 | ||
174 | /* | |
175 | * zero means we don't have anything to do, | |
176 | * >1 means that it is still in use. Only | |
177 | * a count of 1 means that it is free but | |
178 | * needs to be unmapped | |
179 | */ | |
180 | if (pkmap_count[i] != 1) | |
181 | continue; | |
182 | pkmap_count[i] = 0; | |
183 | ||
184 | /* sanity check */ | |
75babcac | 185 | BUG_ON(pte_none(pkmap_page_table[i])); |
1da177e4 LT |
186 | |
187 | /* | |
188 | * Don't need an atomic fetch-and-clear op here; | |
189 | * no-one has the page mapped, and cannot get at | |
190 | * its virtual address (and hence PTE) without first | |
191 | * getting the kmap_lock (which is held here). | |
192 | * So no dangers, even with speculative execution. | |
193 | */ | |
194 | page = pte_page(pkmap_page_table[i]); | |
eb2db439 | 195 | pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]); |
1da177e4 LT |
196 | |
197 | set_page_address(page, NULL); | |
5843d9a4 | 198 | need_flush = 1; |
1da177e4 | 199 | } |
5843d9a4 NP |
200 | if (need_flush) |
201 | flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); | |
1da177e4 LT |
202 | } |
203 | ||
77f6078a RD |
204 | /** |
205 | * kmap_flush_unused - flush all unused kmap mappings in order to remove stray mappings | |
206 | */ | |
ce6234b5 JF |
207 | void kmap_flush_unused(void) |
208 | { | |
3297e760 | 209 | lock_kmap(); |
ce6234b5 | 210 | flush_all_zero_pkmaps(); |
3297e760 | 211 | unlock_kmap(); |
ce6234b5 JF |
212 | } |
213 | ||
1da177e4 LT |
214 | static inline unsigned long map_new_virtual(struct page *page) |
215 | { | |
216 | unsigned long vaddr; | |
217 | int count; | |
15de36a4 MF |
218 | unsigned int last_pkmap_nr; |
219 | unsigned int color = get_pkmap_color(page); | |
1da177e4 LT |
220 | |
221 | start: | |
15de36a4 | 222 | count = get_pkmap_entries_count(color); |
1da177e4 LT |
223 | /* Find an empty entry */ |
224 | for (;;) { | |
15de36a4 MF |
225 | last_pkmap_nr = get_next_pkmap_nr(color); |
226 | if (no_more_pkmaps(last_pkmap_nr, color)) { | |
1da177e4 | 227 | flush_all_zero_pkmaps(); |
15de36a4 | 228 | count = get_pkmap_entries_count(color); |
1da177e4 LT |
229 | } |
230 | if (!pkmap_count[last_pkmap_nr]) | |
231 | break; /* Found a usable entry */ | |
232 | if (--count) | |
233 | continue; | |
234 | ||
235 | /* | |
236 | * Sleep for somebody else to unmap their entries | |
237 | */ | |
238 | { | |
239 | DECLARE_WAITQUEUE(wait, current); | |
15de36a4 MF |
240 | wait_queue_head_t *pkmap_map_wait = |
241 | get_pkmap_wait_queue_head(color); | |
1da177e4 LT |
242 | |
243 | __set_current_state(TASK_UNINTERRUPTIBLE); | |
15de36a4 | 244 | add_wait_queue(pkmap_map_wait, &wait); |
3297e760 | 245 | unlock_kmap(); |
1da177e4 | 246 | schedule(); |
15de36a4 | 247 | remove_wait_queue(pkmap_map_wait, &wait); |
3297e760 | 248 | lock_kmap(); |
1da177e4 LT |
249 | |
250 | /* Somebody else might have mapped it while we slept */ | |
251 | if (page_address(page)) | |
252 | return (unsigned long)page_address(page); | |
253 | ||
254 | /* Re-start */ | |
255 | goto start; | |
256 | } | |
257 | } | |
258 | vaddr = PKMAP_ADDR(last_pkmap_nr); | |
259 | set_pte_at(&init_mm, vaddr, | |
260 | &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); | |
261 | ||
262 | pkmap_count[last_pkmap_nr] = 1; | |
263 | set_page_address(page, (void *)vaddr); | |
264 | ||
265 | return vaddr; | |
266 | } | |
267 | ||
77f6078a RD |
268 | /** |
269 | * kmap_high - map a highmem page into memory | |
270 | * @page: &struct page to map | |
271 | * | |
272 | * Returns the page's virtual memory address. | |
273 | * | |
274 | * We cannot call this from interrupts, as it may block. | |
275 | */ | |
920c7a5d | 276 | void *kmap_high(struct page *page) |
1da177e4 LT |
277 | { |
278 | unsigned long vaddr; | |
279 | ||
280 | /* | |
281 | * For highmem pages, we can't trust "virtual" until | |
282 | * after we have the lock. | |
1da177e4 | 283 | */ |
3297e760 | 284 | lock_kmap(); |
1da177e4 LT |
285 | vaddr = (unsigned long)page_address(page); |
286 | if (!vaddr) | |
287 | vaddr = map_new_virtual(page); | |
288 | pkmap_count[PKMAP_NR(vaddr)]++; | |
75babcac | 289 | BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); |
3297e760 | 290 | unlock_kmap(); |
1da177e4 LT |
291 | return (void*) vaddr; |
292 | } | |
293 | ||
294 | EXPORT_SYMBOL(kmap_high); | |
295 | ||
3297e760 NP |
296 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET |
297 | /** | |
298 | * kmap_high_get - pin a highmem page into memory | |
299 | * @page: &struct page to pin | |
300 | * | |
301 | * Returns the page's current virtual memory address, or NULL if no mapping | |
5e39df56 | 302 | * exists. If and only if a non null address is returned then a |
3297e760 NP |
303 | * matching call to kunmap_high() is necessary. |
304 | * | |
305 | * This can be called from any context. | |
306 | */ | |
307 | void *kmap_high_get(struct page *page) | |
308 | { | |
309 | unsigned long vaddr, flags; | |
310 | ||
311 | lock_kmap_any(flags); | |
312 | vaddr = (unsigned long)page_address(page); | |
313 | if (vaddr) { | |
314 | BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1); | |
315 | pkmap_count[PKMAP_NR(vaddr)]++; | |
316 | } | |
317 | unlock_kmap_any(flags); | |
318 | return (void*) vaddr; | |
319 | } | |
320 | #endif | |
321 | ||
77f6078a | 322 | /** |
4e9dc5df | 323 | * kunmap_high - unmap a highmem page into memory |
77f6078a | 324 | * @page: &struct page to unmap |
3297e760 NP |
325 | * |
326 | * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called | |
327 | * only from user context. | |
77f6078a | 328 | */ |
920c7a5d | 329 | void kunmap_high(struct page *page) |
1da177e4 LT |
330 | { |
331 | unsigned long vaddr; | |
332 | unsigned long nr; | |
3297e760 | 333 | unsigned long flags; |
1da177e4 | 334 | int need_wakeup; |
15de36a4 MF |
335 | unsigned int color = get_pkmap_color(page); |
336 | wait_queue_head_t *pkmap_map_wait; | |
1da177e4 | 337 | |
3297e760 | 338 | lock_kmap_any(flags); |
1da177e4 | 339 | vaddr = (unsigned long)page_address(page); |
75babcac | 340 | BUG_ON(!vaddr); |
1da177e4 LT |
341 | nr = PKMAP_NR(vaddr); |
342 | ||
343 | /* | |
344 | * A count must never go down to zero | |
345 | * without a TLB flush! | |
346 | */ | |
347 | need_wakeup = 0; | |
348 | switch (--pkmap_count[nr]) { | |
349 | case 0: | |
350 | BUG(); | |
351 | case 1: | |
352 | /* | |
353 | * Avoid an unnecessary wake_up() function call. | |
354 | * The common case is pkmap_count[] == 1, but | |
355 | * no waiters. | |
356 | * The tasks queued in the wait-queue are guarded | |
357 | * by both the lock in the wait-queue-head and by | |
358 | * the kmap_lock. As the kmap_lock is held here, | |
359 | * no need for the wait-queue-head's lock. Simply | |
360 | * test if the queue is empty. | |
361 | */ | |
15de36a4 MF |
362 | pkmap_map_wait = get_pkmap_wait_queue_head(color); |
363 | need_wakeup = waitqueue_active(pkmap_map_wait); | |
1da177e4 | 364 | } |
3297e760 | 365 | unlock_kmap_any(flags); |
1da177e4 LT |
366 | |
367 | /* do wake-up, if needed, race-free outside of the spin lock */ | |
368 | if (need_wakeup) | |
15de36a4 | 369 | wake_up(pkmap_map_wait); |
1da177e4 LT |
370 | } |
371 | ||
372 | EXPORT_SYMBOL(kunmap_high); | |
1da177e4 LT |
373 | #endif |
374 | ||
1da177e4 LT |
375 | #if defined(HASHED_PAGE_VIRTUAL) |
376 | ||
377 | #define PA_HASH_ORDER 7 | |
378 | ||
379 | /* | |
380 | * Describes one page->virtual association | |
381 | */ | |
382 | struct page_address_map { | |
383 | struct page *page; | |
384 | void *virtual; | |
385 | struct list_head list; | |
386 | }; | |
387 | ||
a354e2c8 | 388 | static struct page_address_map page_address_maps[LAST_PKMAP]; |
1da177e4 LT |
389 | |
390 | /* | |
391 | * Hash table bucket | |
392 | */ | |
393 | static struct page_address_slot { | |
394 | struct list_head lh; /* List of page_address_maps */ | |
395 | spinlock_t lock; /* Protect this bucket's list */ | |
396 | } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; | |
397 | ||
f9918794 | 398 | static struct page_address_slot *page_slot(const struct page *page) |
1da177e4 LT |
399 | { |
400 | return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; | |
401 | } | |
402 | ||
77f6078a RD |
403 | /** |
404 | * page_address - get the mapped virtual address of a page | |
405 | * @page: &struct page to get the virtual address of | |
406 | * | |
407 | * Returns the page's virtual address. | |
408 | */ | |
f9918794 | 409 | void *page_address(const struct page *page) |
1da177e4 LT |
410 | { |
411 | unsigned long flags; | |
412 | void *ret; | |
413 | struct page_address_slot *pas; | |
414 | ||
415 | if (!PageHighMem(page)) | |
416 | return lowmem_page_address(page); | |
417 | ||
418 | pas = page_slot(page); | |
419 | ret = NULL; | |
420 | spin_lock_irqsave(&pas->lock, flags); | |
421 | if (!list_empty(&pas->lh)) { | |
422 | struct page_address_map *pam; | |
423 | ||
424 | list_for_each_entry(pam, &pas->lh, list) { | |
425 | if (pam->page == page) { | |
426 | ret = pam->virtual; | |
427 | goto done; | |
428 | } | |
429 | } | |
430 | } | |
431 | done: | |
432 | spin_unlock_irqrestore(&pas->lock, flags); | |
433 | return ret; | |
434 | } | |
435 | ||
436 | EXPORT_SYMBOL(page_address); | |
437 | ||
77f6078a RD |
438 | /** |
439 | * set_page_address - set a page's virtual address | |
440 | * @page: &struct page to set | |
441 | * @virtual: virtual address to use | |
442 | */ | |
1da177e4 LT |
443 | void set_page_address(struct page *page, void *virtual) |
444 | { | |
445 | unsigned long flags; | |
446 | struct page_address_slot *pas; | |
447 | struct page_address_map *pam; | |
448 | ||
449 | BUG_ON(!PageHighMem(page)); | |
450 | ||
451 | pas = page_slot(page); | |
452 | if (virtual) { /* Add */ | |
a354e2c8 | 453 | pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)]; |
1da177e4 LT |
454 | pam->page = page; |
455 | pam->virtual = virtual; | |
456 | ||
457 | spin_lock_irqsave(&pas->lock, flags); | |
458 | list_add_tail(&pam->list, &pas->lh); | |
459 | spin_unlock_irqrestore(&pas->lock, flags); | |
460 | } else { /* Remove */ | |
461 | spin_lock_irqsave(&pas->lock, flags); | |
462 | list_for_each_entry(pam, &pas->lh, list) { | |
463 | if (pam->page == page) { | |
464 | list_del(&pam->list); | |
465 | spin_unlock_irqrestore(&pas->lock, flags); | |
1da177e4 LT |
466 | goto done; |
467 | } | |
468 | } | |
469 | spin_unlock_irqrestore(&pas->lock, flags); | |
470 | } | |
471 | done: | |
472 | return; | |
473 | } | |
474 | ||
1da177e4 LT |
475 | void __init page_address_init(void) |
476 | { | |
477 | int i; | |
478 | ||
1da177e4 LT |
479 | for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) { |
480 | INIT_LIST_HEAD(&page_address_htable[i].lh); | |
481 | spin_lock_init(&page_address_htable[i].lock); | |
482 | } | |
1da177e4 LT |
483 | } |
484 | ||
485 | #endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */ |