]> Git Repo - linux.git/blame - arch/nds32/mm/cacheflush.c
nds32: Fix the unaligned access handler
[linux.git] / arch / nds32 / mm / cacheflush.c
CommitLineData
7de9cf47
GH
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2017 Andes Technology Corporation
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6#include <linux/fs.h>
7#include <linux/pagemap.h>
8#include <linux/module.h>
9#include <asm/cacheflush.h>
10#include <asm/proc-fns.h>
11#include <asm/shmparam.h>
12#include <asm/cache_info.h>
13
14extern struct cache_info L1_cache_info[2];
15
16#ifndef CONFIG_CPU_CACHE_ALIASING
17void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
18 pte_t * pte)
19{
20 struct page *page;
21 unsigned long pfn = pte_pfn(*pte);
22 unsigned long flags;
23
24 if (!pfn_valid(pfn))
25 return;
26
27 if (vma->vm_mm == current->active_mm) {
28 local_irq_save(flags);
29 __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
30 __nds32__tlbop_rwr(*pte);
31 __nds32__isb();
32 local_irq_restore(flags);
33 }
34 page = pfn_to_page(pfn);
35
36 if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
37 (vma->vm_flags & VM_EXEC)) {
38
39 if (!PageHighMem(page)) {
40 cpu_cache_wbinval_page((unsigned long)
41 page_address(page),
42 vma->vm_flags & VM_EXEC);
43 } else {
44 unsigned long kaddr = (unsigned long)kmap_atomic(page);
45 cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
46 kunmap_atomic((void *)kaddr);
47 }
48 }
49}
50#else
51extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
52
53static inline unsigned long aliasing(unsigned long addr, unsigned long page)
54{
55 return ((addr & PAGE_MASK) ^ page) & (SHMLBA - 1);
56}
57
58static inline unsigned long kremap0(unsigned long uaddr, unsigned long pa)
59{
60 unsigned long kaddr, pte;
61
62#define BASE_ADDR0 0xffffc000
63 kaddr = BASE_ADDR0 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
64 pte = (pa | PAGE_KERNEL);
65 __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
66 __nds32__tlbop_rwlk(pte);
67 __nds32__isb();
68 return kaddr;
69}
70
71static inline void kunmap01(unsigned long kaddr)
72{
73 __nds32__tlbop_unlk(kaddr);
74 __nds32__tlbop_inv(kaddr);
75 __nds32__isb();
76}
77
78static inline unsigned long kremap1(unsigned long uaddr, unsigned long pa)
79{
80 unsigned long kaddr, pte;
81
82#define BASE_ADDR1 0xffff8000
83 kaddr = BASE_ADDR1 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
84 pte = (pa | PAGE_KERNEL);
85 __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
86 __nds32__tlbop_rwlk(pte);
87 __nds32__isb();
88 return kaddr;
89}
90
91void flush_cache_mm(struct mm_struct *mm)
92{
93 unsigned long flags;
94
95 local_irq_save(flags);
96 cpu_dcache_wbinval_all();
97 cpu_icache_inval_all();
98 local_irq_restore(flags);
99}
100
101void flush_cache_dup_mm(struct mm_struct *mm)
102{
103}
104
105void flush_cache_range(struct vm_area_struct *vma,
106 unsigned long start, unsigned long end)
107{
108 unsigned long flags;
109
110 if ((end - start) > 8 * PAGE_SIZE) {
111 cpu_dcache_wbinval_all();
112 if (vma->vm_flags & VM_EXEC)
113 cpu_icache_inval_all();
114 return;
115 }
116 local_irq_save(flags);
117 while (start < end) {
118 if (va_present(vma->vm_mm, start))
119 cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC);
120 start += PAGE_SIZE;
121 }
122 local_irq_restore(flags);
123 return;
124}
125
126void flush_cache_page(struct vm_area_struct *vma,
127 unsigned long addr, unsigned long pfn)
128{
129 unsigned long vto, flags;
130
131 local_irq_save(flags);
132 vto = kremap0(addr, pfn << PAGE_SHIFT);
133 cpu_cache_wbinval_page(vto, vma->vm_flags & VM_EXEC);
134 kunmap01(vto);
135 local_irq_restore(flags);
136}
137
138void flush_cache_vmap(unsigned long start, unsigned long end)
139{
140 cpu_dcache_wbinval_all();
141 cpu_icache_inval_all();
142}
143
144void flush_cache_vunmap(unsigned long start, unsigned long end)
145{
146 cpu_dcache_wbinval_all();
147 cpu_icache_inval_all();
148}
149
e3f46243
GH
150void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
151 struct page *to)
152{
153 cpu_dcache_wbinval_page((unsigned long)vaddr);
154 cpu_icache_inval_page((unsigned long)vaddr);
155 copy_page(vto, vfrom);
156 cpu_dcache_wbinval_page((unsigned long)vto);
157 cpu_icache_inval_page((unsigned long)vto);
158}
159
160void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
161{
162 cpu_dcache_wbinval_page((unsigned long)vaddr);
163 cpu_icache_inval_page((unsigned long)vaddr);
164 clear_page(addr);
165 cpu_dcache_wbinval_page((unsigned long)addr);
166 cpu_icache_inval_page((unsigned long)addr);
167}
168
7de9cf47
GH
169void copy_user_highpage(struct page *to, struct page *from,
170 unsigned long vaddr, struct vm_area_struct *vma)
171{
172 unsigned long vto, vfrom, flags, kto, kfrom, pfrom, pto;
173 kto = ((unsigned long)page_address(to) & PAGE_MASK);
174 kfrom = ((unsigned long)page_address(from) & PAGE_MASK);
175 pto = page_to_phys(to);
176 pfrom = page_to_phys(from);
177
178 if (aliasing(vaddr, (unsigned long)kfrom))
179 cpu_dcache_wb_page((unsigned long)kfrom);
180 if (aliasing(vaddr, (unsigned long)kto))
181 cpu_dcache_inval_page((unsigned long)kto);
182 local_irq_save(flags);
183 vto = kremap0(vaddr, pto);
184 vfrom = kremap1(vaddr, pfrom);
185 copy_page((void *)vto, (void *)vfrom);
186 kunmap01(vfrom);
187 kunmap01(vto);
188 local_irq_restore(flags);
189}
190
191EXPORT_SYMBOL(copy_user_highpage);
192
193void clear_user_highpage(struct page *page, unsigned long vaddr)
194{
195 unsigned long vto, flags, kto;
196
197 kto = ((unsigned long)page_address(page) & PAGE_MASK);
198
199 local_irq_save(flags);
200 if (aliasing(kto, vaddr) && kto != 0) {
201 cpu_dcache_inval_page(kto);
202 cpu_icache_inval_page(kto);
203 }
204 vto = kremap0(vaddr, page_to_phys(page));
205 clear_page((void *)vto);
206 kunmap01(vto);
207 local_irq_restore(flags);
208}
209
210EXPORT_SYMBOL(clear_user_highpage);
211
212void flush_dcache_page(struct page *page)
213{
214 struct address_space *mapping;
215
216 mapping = page_mapping(page);
217 if (mapping && !mapping_mapped(mapping))
218 set_bit(PG_dcache_dirty, &page->flags);
219 else {
220 int i, pc;
221 unsigned long vto, kaddr, flags;
222 kaddr = (unsigned long)page_address(page);
223 cpu_dcache_wbinval_page(kaddr);
224 pc = CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE) / PAGE_SIZE;
225 local_irq_save(flags);
226 for (i = 0; i < pc; i++) {
227 vto =
228 kremap0(kaddr + i * PAGE_SIZE, page_to_phys(page));
229 cpu_dcache_wbinval_page(vto);
230 kunmap01(vto);
231 }
232 local_irq_restore(flags);
233 }
234}
e3f46243 235EXPORT_SYMBOL(flush_dcache_page);
7de9cf47
GH
236
237void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
238 unsigned long vaddr, void *dst, void *src, int len)
239{
240 unsigned long line_size, start, end, vto, flags;
241
242 local_irq_save(flags);
243 vto = kremap0(vaddr, page_to_phys(page));
244 dst = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
245 memcpy(dst, src, len);
246 if (vma->vm_flags & VM_EXEC) {
247 line_size = L1_cache_info[DCACHE].line_size;
248 start = (unsigned long)dst & ~(line_size - 1);
249 end =
250 ((unsigned long)dst + len + line_size - 1) & ~(line_size -
251 1);
252 cpu_cache_wbinval_range(start, end, 1);
253 }
254 kunmap01(vto);
255 local_irq_restore(flags);
256}
257
258void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
259 unsigned long vaddr, void *dst, void *src, int len)
260{
261 unsigned long vto, flags;
262
263 local_irq_save(flags);
264 vto = kremap0(vaddr, page_to_phys(page));
265 src = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
266 memcpy(dst, src, len);
267 kunmap01(vto);
268 local_irq_restore(flags);
269}
270
271void flush_anon_page(struct vm_area_struct *vma,
272 struct page *page, unsigned long vaddr)
273{
274 unsigned long flags;
275 if (!PageAnon(page))
276 return;
277
278 if (vma->vm_mm != current->active_mm)
279 return;
280
281 local_irq_save(flags);
282 if (vma->vm_flags & VM_EXEC)
283 cpu_icache_inval_page(vaddr & PAGE_MASK);
284 cpu_dcache_wbinval_page((unsigned long)page_address(page));
285 local_irq_restore(flags);
286}
287
288void flush_kernel_dcache_page(struct page *page)
289{
290 unsigned long flags;
291 local_irq_save(flags);
292 cpu_dcache_wbinval_page((unsigned long)page_address(page));
293 local_irq_restore(flags);
294}
e3f46243 295EXPORT_SYMBOL(flush_kernel_dcache_page);
7de9cf47 296
03969d0b
GH
297void flush_kernel_vmap_range(void *addr, int size)
298{
299 unsigned long flags;
300 local_irq_save(flags);
301 cpu_dcache_wb_range((unsigned long)addr, (unsigned long)addr + size);
302 local_irq_restore(flags);
303}
304EXPORT_SYMBOL(flush_kernel_vmap_range);
305
306void invalidate_kernel_vmap_range(void *addr, int size)
307{
308 unsigned long flags;
309 local_irq_save(flags);
310 cpu_dcache_inval_range((unsigned long)addr, (unsigned long)addr + size);
311 local_irq_restore(flags);
312}
313EXPORT_SYMBOL(invalidate_kernel_vmap_range);
314
7de9cf47
GH
315void flush_icache_range(unsigned long start, unsigned long end)
316{
317 unsigned long line_size, flags;
318 line_size = L1_cache_info[DCACHE].line_size;
319 start = start & ~(line_size - 1);
320 end = (end + line_size - 1) & ~(line_size - 1);
321 local_irq_save(flags);
322 cpu_cache_wbinval_range(start, end, 1);
323 local_irq_restore(flags);
324}
e3f46243 325EXPORT_SYMBOL(flush_icache_range);
7de9cf47
GH
326
327void flush_icache_page(struct vm_area_struct *vma, struct page *page)
328{
329 unsigned long flags;
330 local_irq_save(flags);
331 cpu_cache_wbinval_page((unsigned long)page_address(page),
332 vma->vm_flags & VM_EXEC);
333 local_irq_restore(flags);
334}
335
336void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
337 pte_t * pte)
338{
339 struct page *page;
340 unsigned long flags;
341 unsigned long pfn = pte_pfn(*pte);
342
343 if (!pfn_valid(pfn))
344 return;
345
346 if (vma->vm_mm == current->active_mm) {
347 local_irq_save(flags);
348 __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
349 __nds32__tlbop_rwr(*pte);
350 __nds32__isb();
351 local_irq_restore(flags);
352 }
353
354 page = pfn_to_page(pfn);
355 if (test_and_clear_bit(PG_dcache_dirty, &page->flags) ||
356 (vma->vm_flags & VM_EXEC)) {
357 local_irq_save(flags);
358 cpu_dcache_wbinval_page((unsigned long)page_address(page));
359 local_irq_restore(flags);
360 }
361}
362#endif
This page took 0.122563 seconds and 4 git commands to generate.