]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | #include <linux/highmem.h> |
129f6946 | 2 | #include <linux/module.h> |
867c5b52 | 3 | #include <linux/swap.h> /* for totalram_pages */ |
1da177e4 LT |
4 | |
5 | void *kmap(struct page *page) | |
6 | { | |
7 | might_sleep(); | |
8 | if (!PageHighMem(page)) | |
9 | return page_address(page); | |
10 | return kmap_high(page); | |
11 | } | |
3e4d3af5 | 12 | EXPORT_SYMBOL(kmap); |
1da177e4 LT |
13 | |
14 | void kunmap(struct page *page) | |
15 | { | |
16 | if (in_interrupt()) | |
17 | BUG(); | |
18 | if (!PageHighMem(page)) | |
19 | return; | |
20 | kunmap_high(page); | |
21 | } | |
3e4d3af5 | 22 | EXPORT_SYMBOL(kunmap); |
1da177e4 LT |
23 | |
24 | /* | |
25 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because | |
26 | * no global lock is needed and because the kmap code must perform a global TLB | |
27 | * invalidation when the kmap pool wraps. | |
28 | * | |
565b0c1f | 29 | * However when holding an atomic kmap it is not legal to sleep, so atomic |
1da177e4 LT |
30 | * kmaps are appropriate for short, tight code paths only. |
31 | */ | |
3e4d3af5 | 32 | void *kmap_atomic_prot(struct page *page, pgprot_t prot) |
1da177e4 | 33 | { |
1da177e4 | 34 | unsigned long vaddr; |
3e4d3af5 | 35 | int idx, type; |
022eb434 | 36 | |
9c312058 | 37 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ |
a866374a | 38 | pagefault_disable(); |
656dad31 | 39 | |
1da177e4 LT |
40 | if (!PageHighMem(page)) |
41 | return page_address(page); | |
42 | ||
3e4d3af5 | 43 | type = kmap_atomic_idx_push(); |
4150d3f5 | 44 | idx = type + KM_TYPE_NR*smp_processor_id(); |
1da177e4 | 45 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
4150d3f5 | 46 | BUG_ON(!pte_none(*(kmap_pte-idx))); |
ce6234b5 | 47 | set_pte(kmap_pte-idx, mk_pte(page, prot)); |
1da177e4 | 48 | |
4150d3f5 | 49 | return (void *)vaddr; |
1da177e4 | 50 | } |
3e4d3af5 PZ |
51 | EXPORT_SYMBOL(kmap_atomic_prot); |
52 | ||
53 | void *__kmap_atomic(struct page *page) | |
54 | { | |
55 | return kmap_atomic_prot(page, kmap_prot); | |
56 | } | |
57 | EXPORT_SYMBOL(__kmap_atomic); | |
1da177e4 | 58 | |
3e4d3af5 PZ |
59 | /* |
60 | * This is the same as kmap_atomic() but can map memory that doesn't | |
61 | * have a struct page associated with it. | |
62 | */ | |
63 | void *kmap_atomic_pfn(unsigned long pfn) | |
ce6234b5 | 64 | { |
3e4d3af5 | 65 | return kmap_atomic_prot_pfn(pfn, kmap_prot); |
ce6234b5 | 66 | } |
3e4d3af5 | 67 | EXPORT_SYMBOL_GPL(kmap_atomic_pfn); |
ce6234b5 | 68 | |
3e4d3af5 | 69 | void __kunmap_atomic(void *kvaddr) |
1da177e4 | 70 | { |
1da177e4 | 71 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
3e4d3af5 PZ |
72 | |
73 | if (vaddr >= __fix_to_virt(FIX_KMAP_END) && | |
74 | vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { | |
75 | int idx, type; | |
76 | ||
20273941 | 77 | type = kmap_atomic_idx(); |
3e4d3af5 PZ |
78 | idx = type + KM_TYPE_NR * smp_processor_id(); |
79 | ||
80 | #ifdef CONFIG_DEBUG_HIGHMEM | |
81 | WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | |
82 | #endif | |
83 | /* | |
84 | * Force other mappings to Oops if they'll try to access this | |
85 | * pte without first remap it. Keeping stale mappings around | |
86 | * is a bad idea also, in case the page changes cacheability | |
87 | * attributes or becomes a protected page in a hypervisor. | |
88 | */ | |
3b17979b | 89 | kpte_clear_flush(kmap_pte-idx, vaddr); |
20273941 | 90 | kmap_atomic_idx_pop(); |
3e4d3af5 | 91 | } |
3b17979b | 92 | #ifdef CONFIG_DEBUG_HIGHMEM |
3e4d3af5 | 93 | else { |
3b17979b JF |
94 | BUG_ON(vaddr < PAGE_OFFSET); |
95 | BUG_ON(vaddr >= (unsigned long)high_memory); | |
3b17979b | 96 | } |
3e4d3af5 | 97 | #endif |
1da177e4 | 98 | |
a866374a | 99 | pagefault_enable(); |
1da177e4 | 100 | } |
3e4d3af5 | 101 | EXPORT_SYMBOL(__kunmap_atomic); |
60e64d46 | 102 | |
1da177e4 LT |
103 | struct page *kmap_atomic_to_page(void *ptr) |
104 | { | |
105 | unsigned long idx, vaddr = (unsigned long)ptr; | |
106 | pte_t *pte; | |
107 | ||
108 | if (vaddr < FIXADDR_START) | |
109 | return virt_to_page(ptr); | |
110 | ||
111 | idx = virt_to_fix(vaddr); | |
112 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); | |
113 | return pte_page(*pte); | |
114 | } | |
256cd2ef | 115 | EXPORT_SYMBOL(kmap_atomic_to_page); |
867c5b52 | 116 | |
867c5b52 PE |
117 | void __init set_highmem_pages_init(void) |
118 | { | |
119 | struct zone *zone; | |
120 | int nid; | |
121 | ||
122 | for_each_zone(zone) { | |
123 | unsigned long zone_start_pfn, zone_end_pfn; | |
124 | ||
125 | if (!is_highmem(zone)) | |
126 | continue; | |
127 | ||
128 | zone_start_pfn = zone->zone_start_pfn; | |
129 | zone_end_pfn = zone_start_pfn + zone->spanned_pages; | |
130 | ||
131 | nid = zone_to_nid(zone); | |
132 | printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n", | |
133 | zone->name, nid, zone_start_pfn, zone_end_pfn); | |
134 | ||
135 | add_highpages_with_active_regions(nid, zone_start_pfn, | |
136 | zone_end_pfn); | |
137 | } | |
138 | totalram_pages += totalhigh_pages; | |
139 | } |