]> Git Repo - linux.git/blob - arch/nds32/mm/cacheflush.c
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / arch / nds32 / mm / cacheflush.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
3
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/module.h>
9 #include <asm/cacheflush.h>
10 #include <asm/proc-fns.h>
11 #include <asm/shmparam.h>
12 #include <asm/cache_info.h>
13
14 extern struct cache_info L1_cache_info[2];
15
16 #ifndef CONFIG_CPU_CACHE_ALIASING
17 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
18                       pte_t * pte)
19 {
20         struct page *page;
21         unsigned long pfn = pte_pfn(*pte);
22         unsigned long flags;
23
24         if (!pfn_valid(pfn))
25                 return;
26
27         if (vma->vm_mm == current->active_mm) {
28                 local_irq_save(flags);
29                 __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
30                 __nds32__tlbop_rwr(*pte);
31                 __nds32__isb();
32                 local_irq_restore(flags);
33         }
34         page = pfn_to_page(pfn);
35
36         if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
37             (vma->vm_flags & VM_EXEC)) {
38
39                 if (!PageHighMem(page)) {
40                         cpu_cache_wbinval_page((unsigned long)
41                                                page_address(page),
42                                                vma->vm_flags & VM_EXEC);
43                 } else {
44                         unsigned long kaddr = (unsigned long)kmap_atomic(page);
45                         cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
46                         kunmap_atomic((void *)kaddr);
47                 }
48         }
49 }
50 #else
51 extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
52
53 static inline unsigned long aliasing(unsigned long addr, unsigned long page)
54 {
55         return ((addr & PAGE_MASK) ^ page) & (SHMLBA - 1);
56 }
57
58 static inline unsigned long kremap0(unsigned long uaddr, unsigned long pa)
59 {
60         unsigned long kaddr, pte;
61
62 #define BASE_ADDR0 0xffffc000
63         kaddr = BASE_ADDR0 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
64         pte = (pa | PAGE_KERNEL);
65         __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
66         __nds32__tlbop_rwlk(pte);
67         __nds32__isb();
68         return kaddr;
69 }
70
71 static inline void kunmap01(unsigned long kaddr)
72 {
73         __nds32__tlbop_unlk(kaddr);
74         __nds32__tlbop_inv(kaddr);
75         __nds32__isb();
76 }
77
78 static inline unsigned long kremap1(unsigned long uaddr, unsigned long pa)
79 {
80         unsigned long kaddr, pte;
81
82 #define BASE_ADDR1 0xffff8000
83         kaddr = BASE_ADDR1 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
84         pte = (pa | PAGE_KERNEL);
85         __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
86         __nds32__tlbop_rwlk(pte);
87         __nds32__isb();
88         return kaddr;
89 }
90
91 void flush_cache_mm(struct mm_struct *mm)
92 {
93         unsigned long flags;
94
95         local_irq_save(flags);
96         cpu_dcache_wbinval_all();
97         cpu_icache_inval_all();
98         local_irq_restore(flags);
99 }
100
101 void flush_cache_dup_mm(struct mm_struct *mm)
102 {
103 }
104
105 void flush_cache_range(struct vm_area_struct *vma,
106                        unsigned long start, unsigned long end)
107 {
108         unsigned long flags;
109
110         if ((end - start) > 8 * PAGE_SIZE) {
111                 cpu_dcache_wbinval_all();
112                 if (vma->vm_flags & VM_EXEC)
113                         cpu_icache_inval_all();
114                 return;
115         }
116         local_irq_save(flags);
117         while (start < end) {
118                 if (va_present(vma->vm_mm, start))
119                         cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC);
120                 start += PAGE_SIZE;
121         }
122         local_irq_restore(flags);
123         return;
124 }
125
126 void flush_cache_page(struct vm_area_struct *vma,
127                       unsigned long addr, unsigned long pfn)
128 {
129         unsigned long vto, flags;
130
131         local_irq_save(flags);
132         vto = kremap0(addr, pfn << PAGE_SHIFT);
133         cpu_cache_wbinval_page(vto, vma->vm_flags & VM_EXEC);
134         kunmap01(vto);
135         local_irq_restore(flags);
136 }
137
138 void flush_cache_vmap(unsigned long start, unsigned long end)
139 {
140         cpu_dcache_wbinval_all();
141         cpu_icache_inval_all();
142 }
143
144 void flush_cache_vunmap(unsigned long start, unsigned long end)
145 {
146         cpu_dcache_wbinval_all();
147         cpu_icache_inval_all();
148 }
149
150 void copy_user_highpage(struct page *to, struct page *from,
151                         unsigned long vaddr, struct vm_area_struct *vma)
152 {
153         unsigned long vto, vfrom, flags, kto, kfrom, pfrom, pto;
154         kto = ((unsigned long)page_address(to) & PAGE_MASK);
155         kfrom = ((unsigned long)page_address(from) & PAGE_MASK);
156         pto = page_to_phys(to);
157         pfrom = page_to_phys(from);
158
159         if (aliasing(vaddr, (unsigned long)kfrom))
160                 cpu_dcache_wb_page((unsigned long)kfrom);
161         if (aliasing(vaddr, (unsigned long)kto))
162                 cpu_dcache_inval_page((unsigned long)kto);
163         local_irq_save(flags);
164         vto = kremap0(vaddr, pto);
165         vfrom = kremap1(vaddr, pfrom);
166         copy_page((void *)vto, (void *)vfrom);
167         kunmap01(vfrom);
168         kunmap01(vto);
169         local_irq_restore(flags);
170 }
171
172 EXPORT_SYMBOL(copy_user_highpage);
173
174 void clear_user_highpage(struct page *page, unsigned long vaddr)
175 {
176         unsigned long vto, flags, kto;
177
178         kto = ((unsigned long)page_address(page) & PAGE_MASK);
179
180         local_irq_save(flags);
181         if (aliasing(kto, vaddr) && kto != 0) {
182                 cpu_dcache_inval_page(kto);
183                 cpu_icache_inval_page(kto);
184         }
185         vto = kremap0(vaddr, page_to_phys(page));
186         clear_page((void *)vto);
187         kunmap01(vto);
188         local_irq_restore(flags);
189 }
190
191 EXPORT_SYMBOL(clear_user_highpage);
192
193 void flush_dcache_page(struct page *page)
194 {
195         struct address_space *mapping;
196
197         mapping = page_mapping(page);
198         if (mapping && !mapping_mapped(mapping))
199                 set_bit(PG_dcache_dirty, &page->flags);
200         else {
201                 int i, pc;
202                 unsigned long vto, kaddr, flags;
203                 kaddr = (unsigned long)page_address(page);
204                 cpu_dcache_wbinval_page(kaddr);
205                 pc = CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE) / PAGE_SIZE;
206                 local_irq_save(flags);
207                 for (i = 0; i < pc; i++) {
208                         vto =
209                             kremap0(kaddr + i * PAGE_SIZE, page_to_phys(page));
210                         cpu_dcache_wbinval_page(vto);
211                         kunmap01(vto);
212                 }
213                 local_irq_restore(flags);
214         }
215 }
216
217 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
218                        unsigned long vaddr, void *dst, void *src, int len)
219 {
220         unsigned long line_size, start, end, vto, flags;
221
222         local_irq_save(flags);
223         vto = kremap0(vaddr, page_to_phys(page));
224         dst = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
225         memcpy(dst, src, len);
226         if (vma->vm_flags & VM_EXEC) {
227                 line_size = L1_cache_info[DCACHE].line_size;
228                 start = (unsigned long)dst & ~(line_size - 1);
229                 end =
230                     ((unsigned long)dst + len + line_size - 1) & ~(line_size -
231                                                                    1);
232                 cpu_cache_wbinval_range(start, end, 1);
233         }
234         kunmap01(vto);
235         local_irq_restore(flags);
236 }
237
238 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
239                          unsigned long vaddr, void *dst, void *src, int len)
240 {
241         unsigned long vto, flags;
242
243         local_irq_save(flags);
244         vto = kremap0(vaddr, page_to_phys(page));
245         src = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
246         memcpy(dst, src, len);
247         kunmap01(vto);
248         local_irq_restore(flags);
249 }
250
251 void flush_anon_page(struct vm_area_struct *vma,
252                      struct page *page, unsigned long vaddr)
253 {
254         unsigned long flags;
255         if (!PageAnon(page))
256                 return;
257
258         if (vma->vm_mm != current->active_mm)
259                 return;
260
261         local_irq_save(flags);
262         if (vma->vm_flags & VM_EXEC)
263                 cpu_icache_inval_page(vaddr & PAGE_MASK);
264         cpu_dcache_wbinval_page((unsigned long)page_address(page));
265         local_irq_restore(flags);
266 }
267
268 void flush_kernel_dcache_page(struct page *page)
269 {
270         unsigned long flags;
271         local_irq_save(flags);
272         cpu_dcache_wbinval_page((unsigned long)page_address(page));
273         local_irq_restore(flags);
274 }
275
276 void flush_icache_range(unsigned long start, unsigned long end)
277 {
278         unsigned long line_size, flags;
279         line_size = L1_cache_info[DCACHE].line_size;
280         start = start & ~(line_size - 1);
281         end = (end + line_size - 1) & ~(line_size - 1);
282         local_irq_save(flags);
283         cpu_cache_wbinval_range(start, end, 1);
284         local_irq_restore(flags);
285 }
286
287 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
288 {
289         unsigned long flags;
290         local_irq_save(flags);
291         cpu_cache_wbinval_page((unsigned long)page_address(page),
292                                vma->vm_flags & VM_EXEC);
293         local_irq_restore(flags);
294 }
295
296 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
297                       pte_t * pte)
298 {
299         struct page *page;
300         unsigned long flags;
301         unsigned long pfn = pte_pfn(*pte);
302
303         if (!pfn_valid(pfn))
304                 return;
305
306         if (vma->vm_mm == current->active_mm) {
307                 local_irq_save(flags);
308                 __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
309                 __nds32__tlbop_rwr(*pte);
310                 __nds32__isb();
311                 local_irq_restore(flags);
312         }
313
314         page = pfn_to_page(pfn);
315         if (test_and_clear_bit(PG_dcache_dirty, &page->flags) ||
316             (vma->vm_flags & VM_EXEC)) {
317                 local_irq_save(flags);
318                 cpu_dcache_wbinval_page((unsigned long)page_address(page));
319                 local_irq_restore(flags);
320         }
321 }
322 #endif
This page took 0.049591 seconds and 4 git commands to generate.