]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_TLB_H |
2 | #define _ASM_IA64_TLB_H | |
3 | /* | |
4 | * Based on <asm-generic/tlb.h>. | |
5 | * | |
6 | * Copyright (C) 2002-2003 Hewlett-Packard Co | |
7 | * David Mosberger-Tang <[email protected]> | |
8 | */ | |
9 | /* | |
10 | * Removing a translation from a page table (including TLB-shootdown) is a four-step | |
11 | * procedure: | |
12 | * | |
13 | * (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory | |
14 | * (this is a no-op on ia64). | |
15 | * (2) Clear the relevant portions of the page-table | |
16 | * (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs | |
17 | * (4) Release the pages that were freed up in step (2). | |
18 | * | |
19 | * Note that the ordering of these steps is crucial to avoid races on MP machines. | |
20 | * | |
21 | * The Linux kernel defines several platform-specific hooks for TLB-shootdown. When | |
22 | * unmapping a portion of the virtual address space, these hooks are called according to | |
23 | * the following template: | |
24 | * | |
25 | * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM | |
26 | * { | |
27 | * for each vma that needs a shootdown do { | |
28 | * tlb_start_vma(tlb, vma); | |
29 | * for each page-table-entry PTE that needs to be removed do { | |
30 | * tlb_remove_tlb_entry(tlb, pte, address); | |
31 | * if (pte refers to a normal page) { | |
32 | * tlb_remove_page(tlb, page); | |
33 | * } | |
34 | * } | |
35 | * tlb_end_vma(tlb, vma); | |
36 | * } | |
37 | * } | |
38 | * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM | |
39 | */ | |
40 | #include <linux/config.h> | |
41 | #include <linux/mm.h> | |
42 | #include <linux/pagemap.h> | |
43 | #include <linux/swap.h> | |
44 | ||
45 | #include <asm/pgalloc.h> | |
46 | #include <asm/processor.h> | |
47 | #include <asm/tlbflush.h> | |
48 | #include <asm/machvec.h> | |
49 | ||
50 | #ifdef CONFIG_SMP | |
51 | # define FREE_PTE_NR 2048 | |
52 | # define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) | |
53 | #else | |
54 | # define FREE_PTE_NR 0 | |
55 | # define tlb_fast_mode(tlb) (1) | |
56 | #endif | |
57 | ||
58 | struct mmu_gather { | |
59 | struct mm_struct *mm; | |
60 | unsigned int nr; /* == ~0U => fast mode */ | |
61 | unsigned char fullmm; /* non-zero means full mm flush */ | |
62 | unsigned char need_flush; /* really unmapped some PTEs? */ | |
1da177e4 LT |
63 | unsigned long start_addr; |
64 | unsigned long end_addr; | |
65 | struct page *pages[FREE_PTE_NR]; | |
66 | }; | |
67 | ||
68 | /* Users of the generic TLB shootdown code must declare this storage space. */ | |
69 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | |
70 | ||
71 | /* | |
72 | * Flush the TLB for address range START to END and, if not in fast mode, release the | |
73 | * freed pages that where gathered up to this point. | |
74 | */ | |
75 | static inline void | |
76 | ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) | |
77 | { | |
78 | unsigned int nr; | |
79 | ||
80 | if (!tlb->need_flush) | |
81 | return; | |
82 | tlb->need_flush = 0; | |
83 | ||
84 | if (tlb->fullmm) { | |
85 | /* | |
86 | * Tearing down the entire address space. This happens both as a result | |
87 | * of exit() and execve(). The latter case necessitates the call to | |
88 | * flush_tlb_mm() here. | |
89 | */ | |
90 | flush_tlb_mm(tlb->mm); | |
91 | } else if (unlikely (end - start >= 1024*1024*1024*1024UL | |
92 | || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) | |
93 | { | |
94 | /* | |
95 | * If we flush more than a tera-byte or across regions, we're probably | |
96 | * better off just flushing the entire TLB(s). This should be very rare | |
97 | * and is not worth optimizing for. | |
98 | */ | |
99 | flush_tlb_all(); | |
100 | } else { | |
101 | /* | |
102 | * XXX fix me: flush_tlb_range() should take an mm pointer instead of a | |
103 | * vma pointer. | |
104 | */ | |
105 | struct vm_area_struct vma; | |
106 | ||
107 | vma.vm_mm = tlb->mm; | |
108 | /* flush the address range from the tlb: */ | |
109 | flush_tlb_range(&vma, start, end); | |
110 | /* now flush the virt. page-table area mapping the address range: */ | |
111 | flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end)); | |
112 | } | |
113 | ||
114 | /* lastly, release the freed pages */ | |
115 | nr = tlb->nr; | |
116 | if (!tlb_fast_mode(tlb)) { | |
117 | unsigned long i; | |
118 | tlb->nr = 0; | |
119 | tlb->start_addr = ~0UL; | |
120 | for (i = 0; i < nr; ++i) | |
121 | free_page_and_swap_cache(tlb->pages[i]); | |
122 | } | |
123 | } | |
124 | ||
125 | /* | |
126 | * Return a pointer to an initialized struct mmu_gather. | |
127 | */ | |
128 | static inline struct mmu_gather * | |
129 | tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush) | |
130 | { | |
15a23ffa | 131 | struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); |
1da177e4 LT |
132 | |
133 | tlb->mm = mm; | |
134 | /* | |
135 | * Use fast mode if only 1 CPU is online. | |
136 | * | |
137 | * It would be tempting to turn on fast-mode for full_mm_flush as well. But this | |
138 | * doesn't work because of speculative accesses and software prefetching: the page | |
139 | * table of "mm" may (and usually is) the currently active page table and even | |
140 | * though the kernel won't do any user-space accesses during the TLB shoot down, a | |
141 | * compiler might use speculation or lfetch.fault on what happens to be a valid | |
142 | * user-space address. This in turn could trigger a TLB miss fault (or a VHPT | |
143 | * walk) and re-insert a TLB entry we just removed. Slow mode avoids such | |
144 | * problems. (We could make fast-mode work by switching the current task to a | |
145 | * different "mm" during the shootdown.) --davidm 08/02/2002 | |
146 | */ | |
147 | tlb->nr = (num_online_cpus() == 1) ? ~0U : 0; | |
148 | tlb->fullmm = full_mm_flush; | |
1da177e4 LT |
149 | tlb->start_addr = ~0UL; |
150 | return tlb; | |
151 | } | |
152 | ||
153 | /* | |
154 | * Called at the end of the shootdown operation to free up any resources that were | |
15a23ffa | 155 | * collected. |
1da177e4 LT |
156 | */ |
157 | static inline void | |
158 | tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) | |
159 | { | |
1da177e4 LT |
160 | /* |
161 | * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and | |
162 | * tlb->end_addr. | |
163 | */ | |
164 | ia64_tlb_flush_mmu(tlb, start, end); | |
165 | ||
166 | /* keep the page table cache within bounds */ | |
167 | check_pgt_cache(); | |
15a23ffa HD |
168 | |
169 | put_cpu_var(mmu_gathers); | |
1da177e4 LT |
170 | } |
171 | ||
1da177e4 LT |
172 | /* |
173 | * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page | |
174 | * must be delayed until after the TLB has been flushed (see comments at the beginning of | |
175 | * this file). | |
176 | */ | |
177 | static inline void | |
178 | tlb_remove_page (struct mmu_gather *tlb, struct page *page) | |
179 | { | |
180 | tlb->need_flush = 1; | |
181 | ||
182 | if (tlb_fast_mode(tlb)) { | |
183 | free_page_and_swap_cache(page); | |
184 | return; | |
185 | } | |
186 | tlb->pages[tlb->nr++] = page; | |
187 | if (tlb->nr >= FREE_PTE_NR) | |
188 | ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); | |
189 | } | |
190 | ||
191 | /* | |
192 | * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any | |
193 | * PTE, not just those pointing to (normal) physical memory. | |
194 | */ | |
195 | static inline void | |
196 | __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address) | |
197 | { | |
198 | if (tlb->start_addr == ~0UL) | |
199 | tlb->start_addr = address; | |
200 | tlb->end_addr = address + PAGE_SIZE; | |
201 | } | |
202 | ||
203 | #define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm) | |
204 | ||
205 | #define tlb_start_vma(tlb, vma) do { } while (0) | |
206 | #define tlb_end_vma(tlb, vma) do { } while (0) | |
207 | ||
208 | #define tlb_remove_tlb_entry(tlb, ptep, addr) \ | |
209 | do { \ | |
210 | tlb->need_flush = 1; \ | |
211 | __tlb_remove_tlb_entry(tlb, ptep, addr); \ | |
212 | } while (0) | |
213 | ||
214 | #define pte_free_tlb(tlb, ptep) \ | |
215 | do { \ | |
216 | tlb->need_flush = 1; \ | |
217 | __pte_free_tlb(tlb, ptep); \ | |
218 | } while (0) | |
219 | ||
220 | #define pmd_free_tlb(tlb, ptep) \ | |
221 | do { \ | |
222 | tlb->need_flush = 1; \ | |
223 | __pmd_free_tlb(tlb, ptep); \ | |
224 | } while (0) | |
225 | ||
226 | #define pud_free_tlb(tlb, pudp) \ | |
227 | do { \ | |
228 | tlb->need_flush = 1; \ | |
229 | __pud_free_tlb(tlb, pudp); \ | |
230 | } while (0) | |
231 | ||
232 | #endif /* _ASM_IA64_TLB_H */ |