]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | #include <linux/highmem.h> |
4b599fed | 2 | #include <linux/export.h> |
867c5b52 | 3 | #include <linux/swap.h> /* for totalram_pages */ |
7b4b2a0d | 4 | #include <linux/bootmem.h> |
1da177e4 LT |
5 | |
6 | void *kmap(struct page *page) | |
7 | { | |
8 | might_sleep(); | |
9 | if (!PageHighMem(page)) | |
10 | return page_address(page); | |
11 | return kmap_high(page); | |
12 | } | |
3e4d3af5 | 13 | EXPORT_SYMBOL(kmap); |
1da177e4 LT |
14 | |
15 | void kunmap(struct page *page) | |
16 | { | |
17 | if (in_interrupt()) | |
18 | BUG(); | |
19 | if (!PageHighMem(page)) | |
20 | return; | |
21 | kunmap_high(page); | |
22 | } | |
3e4d3af5 | 23 | EXPORT_SYMBOL(kunmap); |
1da177e4 LT |
24 | |
25 | /* | |
26 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because | |
27 | * no global lock is needed and because the kmap code must perform a global TLB | |
28 | * invalidation when the kmap pool wraps. | |
29 | * | |
565b0c1f | 30 | * However when holding an atomic kmap it is not legal to sleep, so atomic |
1da177e4 LT |
31 | * kmaps are appropriate for short, tight code paths only. |
32 | */ | |
3e4d3af5 | 33 | void *kmap_atomic_prot(struct page *page, pgprot_t prot) |
1da177e4 | 34 | { |
1da177e4 | 35 | unsigned long vaddr; |
3e4d3af5 | 36 | int idx, type; |
022eb434 | 37 | |
2cb7c9cb | 38 | preempt_disable(); |
a866374a | 39 | pagefault_disable(); |
656dad31 | 40 | |
1da177e4 LT |
41 | if (!PageHighMem(page)) |
42 | return page_address(page); | |
43 | ||
3e4d3af5 | 44 | type = kmap_atomic_idx_push(); |
4150d3f5 | 45 | idx = type + KM_TYPE_NR*smp_processor_id(); |
1da177e4 | 46 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
4150d3f5 | 47 | BUG_ON(!pte_none(*(kmap_pte-idx))); |
ce6234b5 | 48 | set_pte(kmap_pte-idx, mk_pte(page, prot)); |
2cd1c8d4 | 49 | arch_flush_lazy_mmu_mode(); |
1da177e4 | 50 | |
4150d3f5 | 51 | return (void *)vaddr; |
1da177e4 | 52 | } |
3e4d3af5 PZ |
53 | EXPORT_SYMBOL(kmap_atomic_prot); |
54 | ||
a24401bc | 55 | void *kmap_atomic(struct page *page) |
3e4d3af5 PZ |
56 | { |
57 | return kmap_atomic_prot(page, kmap_prot); | |
58 | } | |
a24401bc | 59 | EXPORT_SYMBOL(kmap_atomic); |
1da177e4 | 60 | |
3e4d3af5 PZ |
61 | /* |
62 | * This is the same as kmap_atomic() but can map memory that doesn't | |
63 | * have a struct page associated with it. | |
64 | */ | |
65 | void *kmap_atomic_pfn(unsigned long pfn) | |
ce6234b5 | 66 | { |
3e4d3af5 | 67 | return kmap_atomic_prot_pfn(pfn, kmap_prot); |
ce6234b5 | 68 | } |
3e4d3af5 | 69 | EXPORT_SYMBOL_GPL(kmap_atomic_pfn); |
ce6234b5 | 70 | |
3e4d3af5 | 71 | void __kunmap_atomic(void *kvaddr) |
1da177e4 | 72 | { |
1da177e4 | 73 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
3e4d3af5 PZ |
74 | |
75 | if (vaddr >= __fix_to_virt(FIX_KMAP_END) && | |
76 | vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { | |
77 | int idx, type; | |
78 | ||
20273941 | 79 | type = kmap_atomic_idx(); |
3e4d3af5 PZ |
80 | idx = type + KM_TYPE_NR * smp_processor_id(); |
81 | ||
82 | #ifdef CONFIG_DEBUG_HIGHMEM | |
83 | WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | |
84 | #endif | |
85 | /* | |
86 | * Force other mappings to Oops if they'll try to access this | |
87 | * pte without first remap it. Keeping stale mappings around | |
88 | * is a bad idea also, in case the page changes cacheability | |
89 | * attributes or becomes a protected page in a hypervisor. | |
90 | */ | |
3b17979b | 91 | kpte_clear_flush(kmap_pte-idx, vaddr); |
20273941 | 92 | kmap_atomic_idx_pop(); |
2cd1c8d4 | 93 | arch_flush_lazy_mmu_mode(); |
3e4d3af5 | 94 | } |
3b17979b | 95 | #ifdef CONFIG_DEBUG_HIGHMEM |
3e4d3af5 | 96 | else { |
3b17979b JF |
97 | BUG_ON(vaddr < PAGE_OFFSET); |
98 | BUG_ON(vaddr >= (unsigned long)high_memory); | |
3b17979b | 99 | } |
3e4d3af5 | 100 | #endif |
1da177e4 | 101 | |
a866374a | 102 | pagefault_enable(); |
2cb7c9cb | 103 | preempt_enable(); |
1da177e4 | 104 | } |
3e4d3af5 | 105 | EXPORT_SYMBOL(__kunmap_atomic); |
60e64d46 | 106 | |
867c5b52 PE |
107 | void __init set_highmem_pages_init(void) |
108 | { | |
109 | struct zone *zone; | |
110 | int nid; | |
111 | ||
7b4b2a0d JL |
112 | /* |
113 | * Explicitly reset zone->managed_pages because set_highmem_pages_init() | |
c6ffc5ca | 114 | * is invoked before memblock_free_all() |
7b4b2a0d JL |
115 | */ |
116 | reset_all_zones_managed_pages(); | |
867c5b52 PE |
117 | for_each_zone(zone) { |
118 | unsigned long zone_start_pfn, zone_end_pfn; | |
119 | ||
120 | if (!is_highmem(zone)) | |
121 | continue; | |
122 | ||
123 | zone_start_pfn = zone->zone_start_pfn; | |
124 | zone_end_pfn = zone_start_pfn + zone->spanned_pages; | |
125 | ||
126 | nid = zone_to_nid(zone); | |
127 | printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n", | |
128 | zone->name, nid, zone_start_pfn, zone_end_pfn); | |
129 | ||
130 | add_highpages_with_active_regions(nid, zone_start_pfn, | |
131 | zone_end_pfn); | |
132 | } | |
867c5b52 | 133 | } |