2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000, 03 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
13 #include <linux/const.h>
14 #include <linux/kernel.h>
15 #include <asm/mipsregs.h>
18 * PAGE_SHIFT determines the page size
20 #define PAGE_SHIFT CONFIG_PAGE_SHIFT
21 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
22 #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
25 * This is used for calculating the real page sizes
26 * for FTLB or VTLB + FTLB configurations.
28 static inline unsigned int page_size_ftlb(unsigned int mmuextdef)
31 case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
32 if (PAGE_SIZE == (1 << 30))
34 if (PAGE_SIZE == (1llu << 32))
36 if (PAGE_SIZE > (256 << 10))
37 return 7; /* reserved */
39 case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT:
40 return (PAGE_SHIFT - 10) / 2;
42 panic("Invalid FTLB configuration with Conf4_mmuextdef=%d value\n",
47 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
48 #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
49 #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
50 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
51 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
52 #else /* !CONFIG_MIPS_HUGE_TLB_SUPPORT */
53 #define HPAGE_SHIFT ({BUILD_BUG(); 0; })
54 #define HPAGE_SIZE ({BUILD_BUG(); 0; })
55 #define HPAGE_MASK ({BUILD_BUG(); 0; })
56 #define HUGETLB_PAGE_ORDER ({BUILD_BUG(); 0; })
57 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
59 #include <linux/pfn.h>
61 extern void build_clear_page(void);
62 extern void build_copy_page(void);
65 * It's normally defined only for FLATMEM config but it's
66 * used in our early mem init code for all memory models.
67 * So always define it.
69 #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
70 extern unsigned long ARCH_PFN_OFFSET;
71 # define ARCH_PFN_OFFSET ARCH_PFN_OFFSET
73 # define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
76 extern void clear_page(void * page);
77 extern void copy_page(void * to, void * from);
79 extern unsigned long shm_align_mask;
81 static inline unsigned long pages_do_alias(unsigned long addr1,
84 return (addr1 ^ addr2) & shm_align_mask;
89 static inline void clear_user_page(void *addr, unsigned long vaddr,
92 extern void (*flush_data_cache_page)(unsigned long addr);
95 if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK))
96 flush_data_cache_page((unsigned long)addr);
99 struct vm_area_struct;
100 extern void copy_user_highpage(struct page *to, struct page *from,
101 unsigned long vaddr, struct vm_area_struct *vma);
103 #define __HAVE_ARCH_COPY_USER_HIGHPAGE
106 * These are used to make use of C type-checking..
108 #ifdef CONFIG_PHYS_ADDR_T_64BIT
109 #ifdef CONFIG_CPU_MIPS32
110 typedef struct { unsigned long pte_low, pte_high; } pte_t;
111 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
112 #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
114 typedef struct { unsigned long long pte; } pte_t;
115 #define pte_val(x) ((x).pte)
116 #define __pte(x) ((pte_t) { (x) } )
119 typedef struct { unsigned long pte; } pte_t;
120 #define pte_val(x) ((x).pte)
121 #define __pte(x) ((pte_t) { (x) } )
123 typedef struct page *pgtable_t;
126 * Right now we don't support 4-level pagetables, so all pud-related
127 * definitions come from <asm-generic/pgtable-nopud.h>.
131 * Finall the top of the hierarchy, the pgd
133 typedef struct { unsigned long pgd; } pgd_t;
134 #define pgd_val(x) ((x).pgd)
135 #define __pgd(x) ((pgd_t) { (x) } )
138 * Manipulate page protection bits
140 typedef struct { unsigned long pgprot; } pgprot_t;
141 #define pgprot_val(x) ((x).pgprot)
142 #define __pgprot(x) ((pgprot_t) { (x) } )
143 #define pte_pgprot(x) __pgprot(pte_val(x) & ~_PFN_MASK)
146 * On R4000-style MMUs where a TLB entry is mapping a adjacent even / odd
147 * pair of pages we only have a single global bit per pair of pages. When
148 * writing to the TLB make sure we always have the bit set for both pages
149 * or none. This macro is used to access the `buddy' of the pte we're just
152 #define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
155 * __pa()/__va() should be used only during mem init.
157 static inline unsigned long ___pa(unsigned long x)
159 if (IS_ENABLED(CONFIG_64BIT)) {
161 * For MIPS64 the virtual address may either be in one of
162 * the compatibility segments ckseg0 or ckseg1, or it may
165 return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
168 if (!IS_ENABLED(CONFIG_EVA)) {
170 * We're using the standard MIPS32 legacy memory map, ie.
171 * the address x is going to be in kseg0 or kseg1. We can
172 * handle either case by masking out the desired bits using
179 * EVA is in use so the memory map could be anything, making it not
180 * safe to just mask out bits.
182 return x - PAGE_OFFSET + PHYS_OFFSET;
184 #define __pa(x) ___pa((unsigned long)(x))
185 #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
189 * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
190 * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The
191 * discussion can be found in
194 * It is unclear if the misscompilations mentioned in
196 * also affect MIPS so we keep this one until GCC 3.x has been retired
197 * before we can apply https://patchwork.linux-mips.org/patch/1541/
199 #define __pa_symbol_nodebug(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
201 #ifdef CONFIG_DEBUG_VIRTUAL
202 extern phys_addr_t __phys_addr_symbol(unsigned long x);
204 #define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
208 #define __pa_symbol(x) __phys_addr_symbol((unsigned long)(x))
211 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
213 #define virt_to_pfn(kaddr) PFN_DOWN(virt_to_phys((void *)(kaddr)))
214 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
216 extern bool __virt_addr_valid(const volatile void *kaddr);
217 #define virt_addr_valid(kaddr) \
218 __virt_addr_valid((const volatile void *) (kaddr))
220 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
222 extern unsigned long __kaslr_offset;
223 static inline unsigned long kaslr_offset(void)
225 return __kaslr_offset;
228 #include <asm-generic/memory_model.h>
229 #include <asm-generic/getorder.h>
231 #endif /* _ASM_PAGE_H */