1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
3 #define _ASM_POWERPC_NOHASH_PGTABLE_H
6 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
7 unsigned long clr, unsigned long set, int huge);
10 #if defined(CONFIG_PPC64)
11 #include <asm/nohash/64/pgtable.h>
13 #include <asm/nohash/32/pgtable.h>
17 * _PAGE_CHG_MASK masks of bits that are to be preserved across
20 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
22 /* Permission masks used for kernel mappings */
23 #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
24 #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
25 #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED)
26 #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
27 #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
28 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
32 extern int icache_44x_need_flush;
35 static inline unsigned long pte_huge_size(pte_t pte)
42 * PTE updates. This function is called whenever an existing
43 * valid PTE is updated. This does -not- include set_pte_at()
44 * which nowadays only sets a new PTE.
46 * Depending on the type of MMU, we may need to use atomic updates
47 * and the PTE may be either 32 or 64 bit wide. In the later case,
48 * when using atomic updates, only the low part of the PTE is
49 * accessed atomically.
51 * In addition, on 44x, we also maintain a global flag indicating
52 * that an executable user mapping was modified, which is needed
53 * to properly flush the virtually tagged instruction cache of
54 * those implementations.
57 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
58 unsigned long clr, unsigned long set, int huge)
60 pte_basic_t old = pte_val(*p);
61 pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
70 sz = pte_huge_size(__pte(old));
76 else if (sz < PUD_SIZE)
78 else if (sz < P4D_SIZE)
80 else if (sz < PGDIR_SIZE)
85 for (i = 0; i < sz / pdsize; i++, p++) {
88 new += (unsigned long long)(pdsize / PAGE_SIZE) << PTE_RPN_SHIFT;
91 if (IS_ENABLED(CONFIG_44x) && !is_kernel_addr(addr) && (old & _PAGE_EXEC))
92 icache_44x_need_flush = 1;
94 /* huge pages use the old page table lock */
96 assert_pte_locked(mm, addr);
102 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
103 unsigned long addr, pte_t *ptep)
107 old = pte_update(vma->vm_mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
109 return (old & _PAGE_ACCESSED) != 0;
111 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
113 #ifndef ptep_set_wrprotect
114 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
117 pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
120 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
122 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
125 return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0));
127 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
129 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
131 pte_update(mm, addr, ptep, ~0UL, 0, 0);
134 /* Set the dirty and/or accessed bits atomically in a linux PTE */
135 #ifndef __ptep_set_access_flags
136 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
137 pte_t *ptep, pte_t entry,
138 unsigned long address,
141 unsigned long set = pte_val(entry) &
142 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
143 int huge = psize > mmu_virtual_psize ? 1 : 0;
145 pte_update(vma->vm_mm, address, ptep, 0, set, huge);
147 flush_tlb_page(vma, address);
151 /* Generic accessors to PTE bits */
152 #ifndef pte_mkwrite_novma
153 static inline pte_t pte_mkwrite_novma(pte_t pte)
156 * write implies read, hence set both
158 return __pte(pte_val(pte) | _PAGE_RW);
162 static inline pte_t pte_mkdirty(pte_t pte)
164 return __pte(pte_val(pte) | _PAGE_DIRTY);
167 static inline pte_t pte_mkyoung(pte_t pte)
169 return __pte(pte_val(pte) | _PAGE_ACCESSED);
172 #ifndef pte_wrprotect
173 static inline pte_t pte_wrprotect(pte_t pte)
175 return __pte(pte_val(pte) & ~_PAGE_WRITE);
180 static inline pte_t pte_mkexec(pte_t pte)
182 return __pte(pte_val(pte) | _PAGE_EXEC);
187 static inline int pte_write(pte_t pte)
189 return pte_val(pte) & _PAGE_WRITE;
192 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
193 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
194 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
195 static inline bool pte_hashpte(pte_t pte) { return false; }
196 static inline bool pte_ci(pte_t pte) { return pte_val(pte) & _PAGE_NO_CACHE; }
197 static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
199 static inline int pte_present(pte_t pte)
201 return pte_val(pte) & _PAGE_PRESENT;
204 static inline bool pte_hw_valid(pte_t pte)
206 return pte_val(pte) & _PAGE_PRESENT;
209 static inline int pte_young(pte_t pte)
211 return pte_val(pte) & _PAGE_ACCESSED;
215 * Don't just check for any non zero bits in __PAGE_READ, since for book3e
216 * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
217 * _PAGE_READ. Need to explicitly match _PAGE_BAP_UR bit in that case too.
220 static inline bool pte_read(pte_t pte)
222 return (pte_val(pte) & _PAGE_READ) == _PAGE_READ;
227 * We only find page table entry in the last level
228 * Hence no need for other accessors
230 #define pte_access_permitted pte_access_permitted
231 static inline bool pte_access_permitted(pte_t pte, bool write)
234 * A read-only access is controlled by _PAGE_READ bit.
235 * We have _PAGE_READ set for WRITE
237 if (!pte_present(pte) || !pte_read(pte))
240 if (write && !pte_write(pte))
246 /* Conversion functions: convert a page and protection to a page entry,
247 * and a page entry and page directory to the page they refer to.
249 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
252 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
253 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
254 pgprot_val(pgprot)); }
256 /* Generic modifiers for PTE bits */
257 static inline pte_t pte_exprotect(pte_t pte)
259 return __pte(pte_val(pte) & ~_PAGE_EXEC);
262 static inline pte_t pte_mkclean(pte_t pte)
264 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
267 static inline pte_t pte_mkold(pte_t pte)
269 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
272 static inline pte_t pte_mkspecial(pte_t pte)
274 return __pte(pte_val(pte) | _PAGE_SPECIAL);
278 static inline pte_t pte_mkhuge(pte_t pte)
280 return __pte(pte_val(pte));
284 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
286 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
289 static inline int pte_swp_exclusive(pte_t pte)
291 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
294 static inline pte_t pte_swp_mkexclusive(pte_t pte)
296 return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
299 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
301 return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
304 /* This low level function performs the actual PTE insertion
305 * Setting the PTE depends on the MMU type and other factors. It's
306 * an horrible mess that I'm not going to try to clean up now but
307 * I'm keeping it in one place rather than spread around
309 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
310 pte_t *ptep, pte_t pte, int percpu)
312 /* Second case is 32-bit with 64-bit PTE. In this case, we
313 * can just store as long as we do the two halves in the right order
314 * with a barrier in between.
315 * In the percpu case, we also fallback to the simple update
317 if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
318 __asm__ __volatile__("\
322 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
323 : "r" (pte) : "memory");
326 /* Anything else just stores the PTE normally. That covers all 64-bit
327 * cases, and 32-bit non-hash with 32-bit PTEs.
329 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
330 ptep->pte3 = ptep->pte2 = ptep->pte1 = ptep->pte = pte_val(pte);
336 * With hardware tablewalk, a sync is needed to ensure that
337 * subsequent accesses see the PTE we just wrote. Unlike userspace
338 * mappings, we can't tolerate spurious faults, so make sure
339 * the new PTE will be seen the first time.
341 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr))
346 * Macro to mark a page protection value as "uncacheable".
349 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
352 #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
353 _PAGE_NO_CACHE | _PAGE_GUARDED))
355 #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
358 #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
361 #if _PAGE_WRITETHRU != 0
362 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
363 _PAGE_COHERENT | _PAGE_WRITETHRU))
365 #define pgprot_cached_wthru(prot) pgprot_noncached(prot)
368 #define pgprot_cached_noncoherent(prot) \
369 (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
371 #define pgprot_writecombine pgprot_noncached_wc
373 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
374 void unmap_kernel_page(unsigned long va);
376 #endif /* __ASSEMBLY__ */