]>
Commit | Line | Data |
---|---|---|
b99fbc10 | 1 | #include <linux/compiler.h> |
1da177e4 LT |
2 | #include <linux/module.h> |
3 | #include <linux/highmem.h> | |
52ab320a | 4 | #include <linux/sched.h> |
631330f5 | 5 | #include <linux/smp.h> |
bb86bf28 | 6 | #include <asm/fixmap.h> |
1da177e4 LT |
7 | #include <asm/tlbflush.h> |
8 | ||
bb86bf28 RB |
9 | static pte_t *kmap_pte; |
10 | ||
11 | unsigned long highstart_pfn, highend_pfn; | |
12 | ||
3e4d3af5 | 13 | void *kmap(struct page *page) |
1da177e4 LT |
14 | { |
15 | void *addr; | |
16 | ||
17 | might_sleep(); | |
18 | if (!PageHighMem(page)) | |
19 | return page_address(page); | |
20 | addr = kmap_high(page); | |
21 | flush_tlb_one((unsigned long)addr); | |
22 | ||
23 | return addr; | |
24 | } | |
3e4d3af5 | 25 | EXPORT_SYMBOL(kmap); |
1da177e4 | 26 | |
3e4d3af5 | 27 | void kunmap(struct page *page) |
1da177e4 | 28 | { |
b72b7092 | 29 | BUG_ON(in_interrupt()); |
1da177e4 LT |
30 | if (!PageHighMem(page)) |
31 | return; | |
32 | kunmap_high(page); | |
33 | } | |
3e4d3af5 | 34 | EXPORT_SYMBOL(kunmap); |
1da177e4 LT |
35 | |
36 | /* | |
37 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because | |
38 | * no global lock is needed and because the kmap code must perform a global TLB | |
39 | * invalidation when the kmap pool wraps. | |
40 | * | |
41 | * However when holding an atomic kmap is is not legal to sleep, so atomic | |
42 | * kmaps are appropriate for short, tight code paths only. | |
43 | */ | |
44 | ||
a24401bc | 45 | void *kmap_atomic(struct page *page) |
1da177e4 | 46 | { |
1da177e4 | 47 | unsigned long vaddr; |
3e4d3af5 | 48 | int idx, type; |
1da177e4 LT |
49 | |
50 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | |
a866374a | 51 | pagefault_disable(); |
1da177e4 LT |
52 | if (!PageHighMem(page)) |
53 | return page_address(page); | |
54 | ||
3e4d3af5 | 55 | type = kmap_atomic_idx_push(); |
1da177e4 LT |
56 | idx = type + KM_TYPE_NR*smp_processor_id(); |
57 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
58 | #ifdef CONFIG_DEBUG_HIGHMEM | |
b72b7092 | 59 | BUG_ON(!pte_none(*(kmap_pte - idx))); |
1da177e4 | 60 | #endif |
bb86bf28 | 61 | set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL)); |
1da177e4 LT |
62 | local_flush_tlb_one((unsigned long)vaddr); |
63 | ||
64 | return (void*) vaddr; | |
65 | } | |
a24401bc | 66 | EXPORT_SYMBOL(kmap_atomic); |
1da177e4 | 67 | |
3e4d3af5 | 68 | void __kunmap_atomic(void *kvaddr) |
1da177e4 | 69 | { |
1da177e4 | 70 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
b99fbc10 | 71 | int type __maybe_unused; |
1da177e4 LT |
72 | |
73 | if (vaddr < FIXADDR_START) { // FIXME | |
a866374a | 74 | pagefault_enable(); |
1da177e4 LT |
75 | return; |
76 | } | |
77 | ||
20273941 | 78 | type = kmap_atomic_idx(); |
3e4d3af5 PZ |
79 | #ifdef CONFIG_DEBUG_HIGHMEM |
80 | { | |
81 | int idx = type + KM_TYPE_NR * smp_processor_id(); | |
1da177e4 | 82 | |
3e4d3af5 | 83 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
1da177e4 | 84 | |
3e4d3af5 PZ |
85 | /* |
86 | * force other mappings to Oops if they'll try to access | |
87 | * this pte without first remap it | |
88 | */ | |
89 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | |
90 | local_flush_tlb_one(vaddr); | |
91 | } | |
92 | #endif | |
20273941 | 93 | kmap_atomic_idx_pop(); |
a866374a | 94 | pagefault_enable(); |
1da177e4 | 95 | } |
3e4d3af5 | 96 | EXPORT_SYMBOL(__kunmap_atomic); |
1da177e4 | 97 | |
60080265 RB |
98 | /* |
99 | * This is the same as kmap_atomic() but can map memory that doesn't | |
100 | * have a struct page associated with it. | |
101 | */ | |
3e4d3af5 | 102 | void *kmap_atomic_pfn(unsigned long pfn) |
60080265 | 103 | { |
60080265 | 104 | unsigned long vaddr; |
3e4d3af5 | 105 | int idx, type; |
60080265 | 106 | |
a866374a | 107 | pagefault_disable(); |
60080265 | 108 | |
3e4d3af5 | 109 | type = kmap_atomic_idx_push(); |
60080265 RB |
110 | idx = type + KM_TYPE_NR*smp_processor_id(); |
111 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
bb86bf28 | 112 | set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); |
60080265 RB |
113 | flush_tlb_one(vaddr); |
114 | ||
115 | return (void*) vaddr; | |
116 | } | |
117 | ||
3e4d3af5 | 118 | struct page *kmap_atomic_to_page(void *ptr) |
1da177e4 LT |
119 | { |
120 | unsigned long idx, vaddr = (unsigned long)ptr; | |
121 | pte_t *pte; | |
122 | ||
123 | if (vaddr < FIXADDR_START) | |
124 | return virt_to_page(ptr); | |
125 | ||
126 | idx = virt_to_fix(vaddr); | |
127 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); | |
128 | return pte_page(*pte); | |
129 | } | |
130 | ||
bb86bf28 RB |
131 | void __init kmap_init(void) |
132 | { | |
133 | unsigned long kmap_vstart; | |
134 | ||
135 | /* cache the first kmap pte */ | |
136 | kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); | |
137 | kmap_pte = kmap_get_fixmap_pte(kmap_vstart); | |
138 | } |