2 * arch/xtensa/mm/cache.c
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2001-2006 Tensilica Inc.
16 #include <linux/init.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/memblock.h>
25 #include <linux/swap.h>
26 #include <linux/pagemap.h>
27 #include <linux/pgtable.h>
29 #include <asm/bootparam.h>
30 #include <asm/mmu_context.h>
32 #include <asm/tlbflush.h>
37 * The kernel provides one architecture bit PG_arch_1 in the page flags that
38 * can be used for cache coherency.
42 * The Xtensa architecture doesn't keep the instruction cache coherent with
43 * the data cache. We use the architecture bit to indicate if the caches
44 * are coherent. The kernel clears this bit whenever a page is added to the
45 * page cache. At that time, the caches might not be in sync. We, therefore,
46 * define this flag as 'clean' if set.
50 * With cache aliasing, we have to always flush the cache when pages are
51 * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
58 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
59 static inline void kmap_invalidate_coherent(struct page *page,
62 if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
65 if (!PageHighMem(page)) {
66 kvaddr = (unsigned long)page_to_virt(page);
68 __invalidate_dcache_page(kvaddr);
70 kvaddr = TLBTEMP_BASE_1 +
71 (page_to_phys(page) & DCACHE_ALIAS_MASK);
74 __invalidate_dcache_page_alias(kvaddr,
81 static inline void *coherent_kvaddr(struct page *page, unsigned long base,
82 unsigned long vaddr, unsigned long *paddr)
84 *paddr = page_to_phys(page);
85 return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
88 void clear_user_highpage(struct page *page, unsigned long vaddr)
90 struct folio *folio = page_folio(page);
92 void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
95 kmap_invalidate_coherent(page, vaddr);
96 set_bit(PG_arch_1, folio_flags(folio, 0));
97 clear_page_alias(kvaddr, paddr);
100 EXPORT_SYMBOL(clear_user_highpage);
102 void copy_user_highpage(struct page *dst, struct page *src,
103 unsigned long vaddr, struct vm_area_struct *vma)
105 struct folio *folio = page_folio(dst);
106 unsigned long dst_paddr, src_paddr;
107 void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
109 void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
113 kmap_invalidate_coherent(dst, vaddr);
114 set_bit(PG_arch_1, folio_flags(folio, 0));
115 copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
118 EXPORT_SYMBOL(copy_user_highpage);
121 * Any time the kernel writes to a user page cache page, or it is about to
122 * read from a page cache page this routine is called.
126 void flush_dcache_folio(struct folio *folio)
128 struct address_space *mapping = folio_flush_mapping(folio);
131 * If we have a mapping but the page is not mapped to user-space
132 * yet, we simply mark this page dirty and defer flushing the
133 * caches until update_mmu().
136 if (mapping && !mapping_mapped(mapping)) {
137 if (!test_bit(PG_arch_1, &folio->flags))
138 set_bit(PG_arch_1, &folio->flags);
142 unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
143 unsigned long temp = folio_pos(folio);
144 unsigned int i, nr = folio_nr_pages(folio);
145 unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
149 * Flush the page in kernel space and user space.
150 * Note that we can omit that step if aliasing is not
151 * an issue, but we do have to synchronize I$ and D$
152 * if we have a mapping.
155 if (!alias && !mapping)
159 for (i = 0; i < nr; i++) {
160 virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
161 __flush_invalidate_dcache_page_alias(virt, phys);
163 virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
166 __flush_invalidate_dcache_page_alias(virt, phys);
169 __invalidate_icache_page_alias(virt, phys);
176 /* There shouldn't be an entry in the cache for this page anymore. */
178 EXPORT_SYMBOL(flush_dcache_folio);
181 * For now, flush the whole cache. FIXME??
184 void local_flush_cache_range(struct vm_area_struct *vma,
185 unsigned long start, unsigned long end)
187 __flush_invalidate_dcache_all();
188 __invalidate_icache_all();
190 EXPORT_SYMBOL(local_flush_cache_range);
193 * Remove any entry in the cache for this page.
195 * Note that this function is only called for user pages, so use the
196 * alias versions of the cache flush functions.
199 void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
202 /* Note that we have to use the 'alias' address to avoid multi-hit */
204 unsigned long phys = page_to_phys(pfn_to_page(pfn));
205 unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
208 __flush_invalidate_dcache_page_alias(virt, phys);
209 __invalidate_icache_page_alias(virt, phys);
212 EXPORT_SYMBOL(local_flush_cache_page);
214 #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
216 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
217 unsigned long addr, pte_t *ptep, unsigned int nr)
219 unsigned long pfn = pte_pfn(*ptep);
226 folio = page_folio(pfn_to_page(pfn));
228 /* Invalidate old entries in TLBs */
229 for (i = 0; i < nr; i++)
230 flush_tlb_page(vma, addr + i * PAGE_SIZE);
231 nr = folio_nr_pages(folio);
233 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
235 if (!folio_test_reserved(folio) && test_bit(PG_arch_1, &folio->flags)) {
236 unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
240 for (i = 0; i < nr; i++) {
241 tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
242 __flush_invalidate_dcache_page_alias(tmp, phys);
243 tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
244 __flush_invalidate_dcache_page_alias(tmp, phys);
245 __invalidate_icache_page_alias(tmp, phys);
250 clear_bit(PG_arch_1, &folio->flags);
253 if (!folio_test_reserved(folio) && !test_bit(PG_arch_1, &folio->flags)
254 && (vma->vm_flags & VM_EXEC) != 0) {
255 for (i = 0; i < nr; i++) {
256 void *paddr = kmap_local_folio(folio, i * PAGE_SIZE);
257 __flush_dcache_page((unsigned long)paddr);
258 __invalidate_icache_page((unsigned long)paddr);
261 set_bit(PG_arch_1, &folio->flags);
267 * access_process_vm() has called get_user_pages(), which has done a
268 * flush_dcache_page() on the page.
271 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
273 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
274 unsigned long vaddr, void *dst, const void *src,
277 unsigned long phys = page_to_phys(page);
278 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
280 /* Flush and invalidate user page if aliased. */
283 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
285 __flush_invalidate_dcache_page_alias(t, phys);
291 memcpy(dst, src, len);
294 * Flush and invalidate kernel page if aliased and synchronize
295 * data and instruction caches for executable pages.
299 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
302 __flush_invalidate_dcache_range((unsigned long) dst, len);
303 if ((vma->vm_flags & VM_EXEC) != 0)
304 __invalidate_icache_page_alias(t, phys);
307 } else if ((vma->vm_flags & VM_EXEC) != 0) {
308 __flush_dcache_range((unsigned long)dst,len);
309 __invalidate_icache_range((unsigned long) dst, len);
313 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
314 unsigned long vaddr, void *dst, const void *src,
317 unsigned long phys = page_to_phys(page);
318 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
321 * Flush user page if aliased.
322 * (Note: a simply flush would be sufficient)
326 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
328 __flush_invalidate_dcache_page_alias(t, phys);
332 memcpy(dst, src, len);