1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
5 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/module.h>
9 #include <asm/cacheflush.h>
10 #include <asm/proc-fns.h>
11 #include <asm/shmparam.h>
12 #include <asm/cache_info.h>
14 extern struct cache_info L1_cache_info[2];
16 #ifndef CONFIG_CPU_CACHE_ALIASING
17 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
21 unsigned long pfn = pte_pfn(*pte);
27 if (vma->vm_mm == current->active_mm) {
28 local_irq_save(flags);
29 __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
30 __nds32__tlbop_rwr(*pte);
32 local_irq_restore(flags);
34 page = pfn_to_page(pfn);
36 if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
37 (vma->vm_flags & VM_EXEC)) {
39 if (!PageHighMem(page)) {
40 cpu_cache_wbinval_page((unsigned long)
42 vma->vm_flags & VM_EXEC);
44 unsigned long kaddr = (unsigned long)kmap_atomic(page);
45 cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
46 kunmap_atomic((void *)kaddr);
51 extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
53 static inline unsigned long aliasing(unsigned long addr, unsigned long page)
55 return ((addr & PAGE_MASK) ^ page) & (SHMLBA - 1);
58 static inline unsigned long kremap0(unsigned long uaddr, unsigned long pa)
60 unsigned long kaddr, pte;
62 #define BASE_ADDR0 0xffffc000
63 kaddr = BASE_ADDR0 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
64 pte = (pa | PAGE_KERNEL);
65 __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
66 __nds32__tlbop_rwlk(pte);
71 static inline void kunmap01(unsigned long kaddr)
73 __nds32__tlbop_unlk(kaddr);
74 __nds32__tlbop_inv(kaddr);
78 static inline unsigned long kremap1(unsigned long uaddr, unsigned long pa)
80 unsigned long kaddr, pte;
82 #define BASE_ADDR1 0xffff8000
83 kaddr = BASE_ADDR1 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
84 pte = (pa | PAGE_KERNEL);
85 __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
86 __nds32__tlbop_rwlk(pte);
91 void flush_cache_mm(struct mm_struct *mm)
95 local_irq_save(flags);
96 cpu_dcache_wbinval_all();
97 cpu_icache_inval_all();
98 local_irq_restore(flags);
101 void flush_cache_dup_mm(struct mm_struct *mm)
105 void flush_cache_range(struct vm_area_struct *vma,
106 unsigned long start, unsigned long end)
110 if ((end - start) > 8 * PAGE_SIZE) {
111 cpu_dcache_wbinval_all();
112 if (vma->vm_flags & VM_EXEC)
113 cpu_icache_inval_all();
116 local_irq_save(flags);
117 while (start < end) {
118 if (va_present(vma->vm_mm, start))
119 cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC);
122 local_irq_restore(flags);
126 void flush_cache_page(struct vm_area_struct *vma,
127 unsigned long addr, unsigned long pfn)
129 unsigned long vto, flags;
131 local_irq_save(flags);
132 vto = kremap0(addr, pfn << PAGE_SHIFT);
133 cpu_cache_wbinval_page(vto, vma->vm_flags & VM_EXEC);
135 local_irq_restore(flags);
138 void flush_cache_vmap(unsigned long start, unsigned long end)
140 cpu_dcache_wbinval_all();
141 cpu_icache_inval_all();
144 void flush_cache_vunmap(unsigned long start, unsigned long end)
146 cpu_dcache_wbinval_all();
147 cpu_icache_inval_all();
150 void copy_user_highpage(struct page *to, struct page *from,
151 unsigned long vaddr, struct vm_area_struct *vma)
153 unsigned long vto, vfrom, flags, kto, kfrom, pfrom, pto;
154 kto = ((unsigned long)page_address(to) & PAGE_MASK);
155 kfrom = ((unsigned long)page_address(from) & PAGE_MASK);
156 pto = page_to_phys(to);
157 pfrom = page_to_phys(from);
159 if (aliasing(vaddr, (unsigned long)kfrom))
160 cpu_dcache_wb_page((unsigned long)kfrom);
161 if (aliasing(vaddr, (unsigned long)kto))
162 cpu_dcache_inval_page((unsigned long)kto);
163 local_irq_save(flags);
164 vto = kremap0(vaddr, pto);
165 vfrom = kremap1(vaddr, pfrom);
166 copy_page((void *)vto, (void *)vfrom);
169 local_irq_restore(flags);
172 EXPORT_SYMBOL(copy_user_highpage);
174 void clear_user_highpage(struct page *page, unsigned long vaddr)
176 unsigned long vto, flags, kto;
178 kto = ((unsigned long)page_address(page) & PAGE_MASK);
180 local_irq_save(flags);
181 if (aliasing(kto, vaddr) && kto != 0) {
182 cpu_dcache_inval_page(kto);
183 cpu_icache_inval_page(kto);
185 vto = kremap0(vaddr, page_to_phys(page));
186 clear_page((void *)vto);
188 local_irq_restore(flags);
191 EXPORT_SYMBOL(clear_user_highpage);
193 void flush_dcache_page(struct page *page)
195 struct address_space *mapping;
197 mapping = page_mapping(page);
198 if (mapping && !mapping_mapped(mapping))
199 set_bit(PG_dcache_dirty, &page->flags);
202 unsigned long vto, kaddr, flags;
203 kaddr = (unsigned long)page_address(page);
204 cpu_dcache_wbinval_page(kaddr);
205 pc = CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE) / PAGE_SIZE;
206 local_irq_save(flags);
207 for (i = 0; i < pc; i++) {
209 kremap0(kaddr + i * PAGE_SIZE, page_to_phys(page));
210 cpu_dcache_wbinval_page(vto);
213 local_irq_restore(flags);
217 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
218 unsigned long vaddr, void *dst, void *src, int len)
220 unsigned long line_size, start, end, vto, flags;
222 local_irq_save(flags);
223 vto = kremap0(vaddr, page_to_phys(page));
224 dst = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
225 memcpy(dst, src, len);
226 if (vma->vm_flags & VM_EXEC) {
227 line_size = L1_cache_info[DCACHE].line_size;
228 start = (unsigned long)dst & ~(line_size - 1);
230 ((unsigned long)dst + len + line_size - 1) & ~(line_size -
232 cpu_cache_wbinval_range(start, end, 1);
235 local_irq_restore(flags);
238 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
239 unsigned long vaddr, void *dst, void *src, int len)
241 unsigned long vto, flags;
243 local_irq_save(flags);
244 vto = kremap0(vaddr, page_to_phys(page));
245 src = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
246 memcpy(dst, src, len);
248 local_irq_restore(flags);
251 void flush_anon_page(struct vm_area_struct *vma,
252 struct page *page, unsigned long vaddr)
258 if (vma->vm_mm != current->active_mm)
261 local_irq_save(flags);
262 if (vma->vm_flags & VM_EXEC)
263 cpu_icache_inval_page(vaddr & PAGE_MASK);
264 cpu_dcache_wbinval_page((unsigned long)page_address(page));
265 local_irq_restore(flags);
268 void flush_kernel_dcache_page(struct page *page)
271 local_irq_save(flags);
272 cpu_dcache_wbinval_page((unsigned long)page_address(page));
273 local_irq_restore(flags);
276 void flush_icache_range(unsigned long start, unsigned long end)
278 unsigned long line_size, flags;
279 line_size = L1_cache_info[DCACHE].line_size;
280 start = start & ~(line_size - 1);
281 end = (end + line_size - 1) & ~(line_size - 1);
282 local_irq_save(flags);
283 cpu_cache_wbinval_range(start, end, 1);
284 local_irq_restore(flags);
287 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
290 local_irq_save(flags);
291 cpu_cache_wbinval_page((unsigned long)page_address(page),
292 vma->vm_flags & VM_EXEC);
293 local_irq_restore(flags);
296 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
301 unsigned long pfn = pte_pfn(*pte);
306 if (vma->vm_mm == current->active_mm) {
307 local_irq_save(flags);
308 __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
309 __nds32__tlbop_rwr(*pte);
311 local_irq_restore(flags);
314 page = pfn_to_page(pfn);
315 if (test_and_clear_bit(PG_dcache_dirty, &page->flags) ||
316 (vma->vm_flags & VM_EXEC)) {
317 local_irq_save(flags);
318 cpu_dcache_wbinval_page((unsigned long)page_address(page));
319 local_irq_restore(flags);