]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/include/asm-arm/pgtable.h | |
3 | * | |
4 | * Copyright (C) 1995-2002 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #ifndef _ASMARM_PGTABLE_H | |
11 | #define _ASMARM_PGTABLE_H | |
12 | ||
13 | #include <asm-generic/4level-fixup.h> | |
002547b4 RK |
14 | #include <asm/proc-fns.h> |
15 | ||
16 | #ifndef CONFIG_MMU | |
17 | ||
18 | #include "pgtable-nommu.h" | |
19 | ||
20 | #else | |
1da177e4 LT |
21 | |
22 | #include <asm/memory.h> | |
1da177e4 | 23 | #include <asm/arch/vmalloc.h> |
ad1ae2fe | 24 | #include <asm/pgtable-hwdef.h> |
1da177e4 | 25 | |
5c3073e6 RK |
26 | /* |
27 | * Just any arbitrary offset to the start of the vmalloc VM area: the | |
28 | * current 8MB value just means that there will be a 8MB "hole" after the | |
29 | * physical memory until the kernel virtual memory starts. That means that | |
30 | * any out-of-bounds memory accesses will hopefully be caught. | |
31 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | |
32 | * area for the same reason. ;) | |
33 | * | |
34 | * Note that platforms may override VMALLOC_START, but they must provide | |
35 | * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space, | |
36 | * which may not overlap IO space. | |
37 | */ | |
38 | #ifndef VMALLOC_START | |
39 | #define VMALLOC_OFFSET (8*1024*1024) | |
40 | #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) | |
41 | #endif | |
42 | ||
1da177e4 LT |
43 | /* |
44 | * Hardware-wise, we have a two level page table structure, where the first | |
45 | * level has 4096 entries, and the second level has 256 entries. Each entry | |
46 | * is one 32-bit word. Most of the bits in the second level entry are used | |
47 | * by hardware, and there aren't any "accessed" and "dirty" bits. | |
48 | * | |
49 | * Linux on the other hand has a three level page table structure, which can | |
50 | * be wrapped to fit a two level page table structure easily - using the PGD | |
51 | * and PTE only. However, Linux also expects one "PTE" table per page, and | |
52 | * at least a "dirty" bit. | |
53 | * | |
54 | * Therefore, we tweak the implementation slightly - we tell Linux that we | |
55 | * have 2048 entries in the first level, each of which is 8 bytes (iow, two | |
56 | * hardware pointers to the second level.) The second level contains two | |
57 | * hardware PTE tables arranged contiguously, followed by Linux versions | |
58 | * which contain the state information Linux needs. We, therefore, end up | |
59 | * with 512 entries in the "PTE" level. | |
60 | * | |
61 | * This leads to the page tables having the following layout: | |
62 | * | |
63 | * pgd pte | |
64 | * | | | |
65 | * +--------+ +0 | |
66 | * | |-----> +------------+ +0 | |
67 | * +- - - - + +4 | h/w pt 0 | | |
68 | * | |-----> +------------+ +1024 | |
69 | * +--------+ +8 | h/w pt 1 | | |
70 | * | | +------------+ +2048 | |
71 | * +- - - - + | Linux pt 0 | | |
72 | * | | +------------+ +3072 | |
73 | * +--------+ | Linux pt 1 | | |
74 | * | | +------------+ +4096 | |
75 | * | |
76 | * See L_PTE_xxx below for definitions of bits in the "Linux pt", and | |
77 | * PTE_xxx for definitions of bits appearing in the "h/w pt". | |
78 | * | |
79 | * PMD_xxx definitions refer to bits in the first level page table. | |
80 | * | |
81 | * The "dirty" bit is emulated by only granting hardware write permission | |
82 | * iff the page is marked "writable" and "dirty" in the Linux PTE. This | |
83 | * means that a write to a clean page will cause a permission fault, and | |
84 | * the Linux MM layer will mark the page dirty via handle_pte_fault(). | |
85 | * For the hardware to notice the permission change, the TLB entry must | |
f0e47c22 | 86 | * be flushed, and ptep_set_access_flags() does that for us. |
1da177e4 LT |
87 | * |
88 | * The "accessed" or "young" bit is emulated by a similar method; we only | |
89 | * allow accesses to the page if the "young" bit is set. Accesses to the | |
90 | * page will cause a fault, and handle_pte_fault() will set the young bit | |
91 | * for us as long as the page is marked present in the corresponding Linux | |
f0e47c22 MS |
92 | * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is |
93 | * up to date. | |
1da177e4 LT |
94 | * |
95 | * However, when the "young" bit is cleared, we deny access to the page | |
96 | * by clearing the hardware PTE. Currently Linux does not flush the TLB | |
97 | * for us in this case, which means the TLB will retain the transation | |
98 | * until either the TLB entry is evicted under pressure, or a context | |
99 | * switch which changes the user space mapping occurs. | |
100 | */ | |
101 | #define PTRS_PER_PTE 512 | |
102 | #define PTRS_PER_PMD 1 | |
103 | #define PTRS_PER_PGD 2048 | |
104 | ||
105 | /* | |
106 | * PMD_SHIFT determines the size of the area a second-level page table can map | |
107 | * PGDIR_SHIFT determines what a third-level page table entry can map | |
108 | */ | |
109 | #define PMD_SHIFT 21 | |
110 | #define PGDIR_SHIFT 21 | |
111 | ||
112 | #define LIBRARY_TEXT_START 0x0c000000 | |
113 | ||
114 | #ifndef __ASSEMBLY__ | |
115 | extern void __pte_error(const char *file, int line, unsigned long val); | |
116 | extern void __pmd_error(const char *file, int line, unsigned long val); | |
117 | extern void __pgd_error(const char *file, int line, unsigned long val); | |
118 | ||
119 | #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) | |
120 | #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) | |
121 | #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) | |
122 | #endif /* !__ASSEMBLY__ */ | |
123 | ||
124 | #define PMD_SIZE (1UL << PMD_SHIFT) | |
125 | #define PMD_MASK (~(PMD_SIZE-1)) | |
126 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | |
127 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
128 | ||
6119be0b HD |
129 | /* |
130 | * This is the lowest virtual address we can permit any user space | |
131 | * mapping to be mapped at. This is particularly important for | |
132 | * non-high vector CPUs. | |
133 | */ | |
134 | #define FIRST_USER_ADDRESS PAGE_SIZE | |
135 | ||
1da177e4 LT |
136 | #define FIRST_USER_PGD_NR 1 |
137 | #define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR) | |
138 | ||
4052ebb7 GD |
139 | /* |
140 | * section address mask and size definitions. | |
141 | */ | |
142 | #define SECTION_SHIFT 20 | |
143 | #define SECTION_SIZE (1UL << SECTION_SHIFT) | |
144 | #define SECTION_MASK (~(SECTION_SIZE-1)) | |
145 | ||
1da177e4 LT |
146 | /* |
147 | * ARMv6 supersection address mask and size definitions. | |
148 | */ | |
149 | #define SUPERSECTION_SHIFT 24 | |
150 | #define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT) | |
151 | #define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1)) | |
152 | ||
1da177e4 LT |
153 | /* |
154 | * "Linux" PTE definitions. | |
155 | * | |
156 | * We keep two sets of PTEs - the hardware and the linux version. | |
157 | * This allows greater flexibility in the way we map the Linux bits | |
158 | * onto the hardware tables, and allows us to have YOUNG and DIRTY | |
159 | * bits. | |
160 | * | |
161 | * The PTE table pointer refers to the hardware entries; the "Linux" | |
162 | * entries are stored 1024 bytes below. | |
163 | */ | |
164 | #define L_PTE_PRESENT (1 << 0) | |
165 | #define L_PTE_FILE (1 << 1) /* only when !PRESENT */ | |
166 | #define L_PTE_YOUNG (1 << 1) | |
167 | #define L_PTE_BUFFERABLE (1 << 2) /* matches PTE */ | |
168 | #define L_PTE_CACHEABLE (1 << 3) /* matches PTE */ | |
169 | #define L_PTE_USER (1 << 4) | |
170 | #define L_PTE_WRITE (1 << 5) | |
171 | #define L_PTE_EXEC (1 << 6) | |
172 | #define L_PTE_DIRTY (1 << 7) | |
0e5fdca7 | 173 | #define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */ |
1da177e4 LT |
174 | |
175 | #ifndef __ASSEMBLY__ | |
176 | ||
1da177e4 | 177 | /* |
44b18693 I |
178 | * The pgprot_* and protection_map entries will be fixed up in runtime |
179 | * to include the cachable and bufferable bits based on memory policy, | |
180 | * as well as any architecture dependent bits like global/ASID and SMP | |
181 | * shared mapping bits. | |
1da177e4 LT |
182 | */ |
183 | #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_CACHEABLE | L_PTE_BUFFERABLE | |
184 | #define _L_PTE_READ L_PTE_USER | L_PTE_EXEC | |
185 | ||
44b18693 | 186 | extern pgprot_t pgprot_user; |
1da177e4 LT |
187 | extern pgprot_t pgprot_kernel; |
188 | ||
44b18693 I |
189 | #define PAGE_NONE pgprot_user |
190 | #define PAGE_COPY __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ) | |
191 | #define PAGE_SHARED __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ | \ | |
192 | L_PTE_WRITE) | |
193 | #define PAGE_READONLY __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ) | |
1da177e4 LT |
194 | #define PAGE_KERNEL pgprot_kernel |
195 | ||
44b18693 I |
196 | #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT) |
197 | #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ) | |
198 | #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE) | |
199 | #define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ) | |
200 | ||
1da177e4 LT |
201 | #endif /* __ASSEMBLY__ */ |
202 | ||
203 | /* | |
204 | * The table below defines the page protection levels that we insert into our | |
205 | * Linux page table version. These get translated into the best that the | |
206 | * architecture can perform. Note that on most ARM hardware: | |
207 | * 1) We cannot do execute protection | |
208 | * 2) If we could do execute protection, then read is implied | |
209 | * 3) write implies read permissions | |
210 | */ | |
44b18693 I |
211 | #define __P000 __PAGE_NONE |
212 | #define __P001 __PAGE_READONLY | |
213 | #define __P010 __PAGE_COPY | |
214 | #define __P011 __PAGE_COPY | |
215 | #define __P100 __PAGE_READONLY | |
216 | #define __P101 __PAGE_READONLY | |
217 | #define __P110 __PAGE_COPY | |
218 | #define __P111 __PAGE_COPY | |
219 | ||
220 | #define __S000 __PAGE_NONE | |
221 | #define __S001 __PAGE_READONLY | |
222 | #define __S010 __PAGE_SHARED | |
223 | #define __S011 __PAGE_SHARED | |
224 | #define __S100 __PAGE_READONLY | |
225 | #define __S101 __PAGE_READONLY | |
226 | #define __S110 __PAGE_SHARED | |
227 | #define __S111 __PAGE_SHARED | |
1da177e4 LT |
228 | |
229 | #ifndef __ASSEMBLY__ | |
230 | /* | |
231 | * ZERO_PAGE is a global shared page that is always zero: used | |
232 | * for zero-mapped memory areas etc.. | |
233 | */ | |
234 | extern struct page *empty_zero_page; | |
235 | #define ZERO_PAGE(vaddr) (empty_zero_page) | |
236 | ||
237 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) | |
238 | #define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) | |
239 | ||
240 | #define pte_none(pte) (!pte_val(pte)) | |
ad1ae2fe | 241 | #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) |
1da177e4 | 242 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) |
46a82b2d DM |
243 | #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) |
244 | #define pte_offset_map(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) | |
245 | #define pte_offset_map_nested(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) | |
1da177e4 LT |
246 | #define pte_unmap(pte) do { } while (0) |
247 | #define pte_unmap_nested(pte) do { } while (0) | |
248 | ||
ad1ae2fe RK |
249 | #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) |
250 | ||
251 | #define set_pte_at(mm,addr,ptep,pteval) do { \ | |
a8fa9ba6 | 252 | set_pte_ext(ptep, pteval, (addr) >= TASK_SIZE ? 0 : PTE_EXT_NG); \ |
ad1ae2fe | 253 | } while (0) |
1da177e4 LT |
254 | |
255 | /* | |
256 | * The following only work if pte_present() is true. | |
257 | * Undefined behaviour if not.. | |
258 | */ | |
259 | #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) | |
1da177e4 | 260 | #define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) |
1da177e4 LT |
261 | #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) |
262 | #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) | |
263 | ||
264 | /* | |
265 | * The following only works if pte_present() is not true. | |
266 | */ | |
267 | #define pte_file(pte) (pte_val(pte) & L_PTE_FILE) | |
268 | #define pte_to_pgoff(x) (pte_val(x) >> 2) | |
269 | #define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE) | |
270 | ||
271 | #define PTE_FILE_MAX_BITS 30 | |
272 | ||
273 | #define PTE_BIT_FUNC(fn,op) \ | |
274 | static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } | |
275 | ||
1da177e4 LT |
276 | PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE); |
277 | PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE); | |
1da177e4 LT |
278 | PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); |
279 | PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); | |
280 | PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); | |
281 | PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); | |
282 | ||
283 | /* | |
284 | * Mark the prot value as uncacheable and unbufferable. | |
285 | */ | |
286 | #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE)) | |
287 | #define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE) | |
288 | ||
289 | #define pmd_none(pmd) (!pmd_val(pmd)) | |
290 | #define pmd_present(pmd) (pmd_val(pmd)) | |
291 | #define pmd_bad(pmd) (pmd_val(pmd) & 2) | |
292 | ||
293 | #define copy_pmd(pmdpd,pmdps) \ | |
294 | do { \ | |
295 | pmdpd[0] = pmdps[0]; \ | |
296 | pmdpd[1] = pmdps[1]; \ | |
297 | flush_pmd_entry(pmdpd); \ | |
298 | } while (0) | |
299 | ||
300 | #define pmd_clear(pmdp) \ | |
301 | do { \ | |
302 | pmdp[0] = __pmd(0); \ | |
303 | pmdp[1] = __pmd(0); \ | |
304 | clean_pmd_entry(pmdp); \ | |
305 | } while (0) | |
306 | ||
46a82b2d | 307 | static inline pte_t *pmd_page_vaddr(pmd_t pmd) |
1da177e4 LT |
308 | { |
309 | unsigned long ptr; | |
310 | ||
311 | ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1); | |
312 | ptr += PTRS_PER_PTE * sizeof(void *); | |
313 | ||
314 | return __va(ptr); | |
315 | } | |
316 | ||
317 | #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) | |
318 | ||
319 | /* | |
320 | * Permanent address of a page. We never have highmem, so this is trivial. | |
321 | */ | |
322 | #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) | |
323 | ||
324 | /* | |
325 | * Conversion functions: convert a page and protection to a page entry, | |
326 | * and a page entry and page directory to the page they refer to. | |
327 | */ | |
328 | #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) | |
329 | ||
330 | /* | |
331 | * The "pgd_xxx()" functions here are trivial for a folded two-level | |
332 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | |
333 | * into the pgd entry) | |
334 | */ | |
335 | #define pgd_none(pgd) (0) | |
336 | #define pgd_bad(pgd) (0) | |
337 | #define pgd_present(pgd) (1) | |
338 | #define pgd_clear(pgdp) do { } while (0) | |
339 | #define set_pgd(pgd,pgdp) do { } while (0) | |
340 | ||
1da177e4 LT |
341 | /* to find an entry in a page-table-directory */ |
342 | #define pgd_index(addr) ((addr) >> PGDIR_SHIFT) | |
343 | ||
344 | #define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) | |
345 | ||
346 | /* to find an entry in a kernel page-table-directory */ | |
347 | #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) | |
348 | ||
349 | /* Find an entry in the second-level page table.. */ | |
350 | #define pmd_offset(dir, addr) ((pmd_t *)(dir)) | |
351 | ||
352 | /* Find an entry in the third-level page table.. */ | |
353 | #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
354 | ||
355 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |
356 | { | |
357 | const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER; | |
358 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); | |
359 | return pte; | |
360 | } | |
361 | ||
362 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | |
363 | ||
364 | /* Encode and decode a swap entry. | |
365 | * | |
366 | * We support up to 32GB of swap on 4k machines | |
367 | */ | |
368 | #define __swp_type(x) (((x).val >> 2) & 0x7f) | |
369 | #define __swp_offset(x) ((x).val >> 9) | |
370 | #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) }) | |
371 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
372 | #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) | |
373 | ||
374 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ | |
375 | /* FIXME: this is not correct */ | |
376 | #define kern_addr_valid(addr) (1) | |
377 | ||
378 | #include <asm-generic/pgtable.h> | |
379 | ||
380 | /* | |
381 | * We provide our own arch_get_unmapped_area to cope with VIPT caches. | |
382 | */ | |
383 | #define HAVE_ARCH_UNMAPPED_AREA | |
384 | ||
385 | /* | |
33bf5610 | 386 | * remap a physical page `pfn' of size `size' with page protection `prot' |
1da177e4 LT |
387 | * into virtual address `from' |
388 | */ | |
1da177e4 LT |
389 | #define io_remap_pfn_range(vma,from,pfn,size,prot) \ |
390 | remap_pfn_range(vma, from, pfn, size, prot) | |
391 | ||
1da177e4 LT |
392 | #define pgtable_cache_init() do { } while (0) |
393 | ||
394 | #endif /* !__ASSEMBLY__ */ | |
395 | ||
002547b4 RK |
396 | #endif /* CONFIG_MMU */ |
397 | ||
1da177e4 | 398 | #endif /* _ASMARM_PGTABLE_H */ |