1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
4 #include <linux/module.h>
5 #include <linux/sched.h>
8 #include <asm/pgtable.h>
9 #include <asm/tlbflush.h>
10 #include <asm/cacheflush.h>
11 #include <asm/l2_cache.h>
12 #include <nds32_intrinsic.h>
14 #include <asm/cache_info.h>
15 extern struct cache_info L1_cache_info[2];
17 int va_kernel_present(unsigned long addr)
22 pmd = pmd_offset(pgd_offset_k(addr), addr);
23 if (!pmd_none(*pmd)) {
24 ptep = pte_offset_map(pmd, addr);
32 pte_t va_present(struct mm_struct * mm, unsigned long addr)
39 pgd = pgd_offset(mm, addr);
40 if (!pgd_none(*pgd)) {
41 pud = pud_offset(pgd, addr);
42 if (!pud_none(*pud)) {
43 pmd = pmd_offset(pud, addr);
44 if (!pmd_none(*pmd)) {
45 ptep = pte_offset_map(pmd, addr);
56 int va_readable(struct pt_regs *regs, unsigned long addr)
58 struct mm_struct *mm = current->mm;
62 if (user_mode(regs)) {
64 pte = va_present(mm, addr);
65 if (!pte && pte_read(pte))
68 /* superuser mode is always readable, so we can only
69 * check it is present or not*/
70 return (! !va_kernel_present(addr));
75 int va_writable(struct pt_regs *regs, unsigned long addr)
77 struct mm_struct *mm = current->mm;
81 if (user_mode(regs)) {
83 pte = va_present(mm, addr);
84 if (!pte && pte_write(pte))
88 pte = va_kernel_present(addr);
89 if (!pte && pte_kernel_write(pte))
98 void cpu_icache_inval_all(void)
100 unsigned long end, line_size;
102 line_size = L1_cache_info[ICACHE].line_size;
104 line_size * L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].sets;
108 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
110 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
112 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
114 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
119 void cpu_dcache_inval_all(void)
121 __nds32__cctl_l1d_invalall();
124 #ifdef CONFIG_CACHE_L2
125 void dcache_wb_all_level(void)
127 unsigned long flags, cmd;
128 local_irq_save(flags);
129 __nds32__cctl_l1d_wball_alvl();
130 /* Section 1: Ensure the section 2 & 3 program code execution after */
131 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
133 /* Section 2: Confirm the writeback all level is done in CPU and L2C */
134 cmd = CCTL_CMD_L2_SYNC;
136 L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
139 /* Section 3: Writeback whole L2 cache */
140 cmd = CCTL_ALL_CMD | CCTL_CMD_L2_IX_WB;
142 L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
144 __nds32__msync_all();
145 local_irq_restore(flags);
147 EXPORT_SYMBOL(dcache_wb_all_level);
150 void cpu_dcache_wb_all(void)
152 __nds32__cctl_l1d_wball_one_lvl();
153 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
156 void cpu_dcache_wbinval_all(void)
158 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
160 local_irq_save(flags);
163 cpu_dcache_inval_all();
164 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
165 local_irq_restore(flags);
172 void cpu_icache_inval_page(unsigned long start)
174 unsigned long line_size, end;
176 line_size = L1_cache_info[ICACHE].line_size;
177 end = start + PAGE_SIZE;
181 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
183 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
185 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
187 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
188 } while (end != start);
192 void cpu_dcache_inval_page(unsigned long start)
194 unsigned long line_size, end;
196 line_size = L1_cache_info[DCACHE].line_size;
197 end = start + PAGE_SIZE;
201 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
203 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
205 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
207 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
208 } while (end != start);
211 void cpu_dcache_wb_page(unsigned long start)
213 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
214 unsigned long line_size, end;
216 line_size = L1_cache_info[DCACHE].line_size;
217 end = start + PAGE_SIZE;
221 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
223 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
225 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
227 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
228 } while (end != start);
229 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
233 void cpu_dcache_wbinval_page(unsigned long start)
235 unsigned long line_size, end;
237 line_size = L1_cache_info[DCACHE].line_size;
238 end = start + PAGE_SIZE;
242 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
243 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
245 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
247 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
248 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
250 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
252 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
253 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
255 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
257 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
258 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
260 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
261 } while (end != start);
262 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
265 void cpu_cache_wbinval_page(unsigned long page, int flushi)
267 cpu_dcache_wbinval_page(page);
269 cpu_icache_inval_page(page);
275 void cpu_icache_inval_range(unsigned long start, unsigned long end)
277 unsigned long line_size;
279 line_size = L1_cache_info[ICACHE].line_size;
281 while (end > start) {
282 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (start));
288 void cpu_dcache_inval_range(unsigned long start, unsigned long end)
290 unsigned long line_size;
292 line_size = L1_cache_info[DCACHE].line_size;
294 while (end > start) {
295 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
300 void cpu_dcache_wb_range(unsigned long start, unsigned long end)
302 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
303 unsigned long line_size;
305 line_size = L1_cache_info[DCACHE].line_size;
307 while (end > start) {
308 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
311 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
315 void cpu_dcache_wbinval_range(unsigned long start, unsigned long end)
317 unsigned long line_size;
319 line_size = L1_cache_info[DCACHE].line_size;
321 while (end > start) {
322 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
323 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
325 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
328 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
331 void cpu_cache_wbinval_range(unsigned long start, unsigned long end, int flushi)
333 unsigned long line_size, align_start, align_end;
335 line_size = L1_cache_info[DCACHE].line_size;
336 align_start = start & ~(line_size - 1);
337 align_end = (end + line_size - 1) & ~(line_size - 1);
338 cpu_dcache_wbinval_range(align_start, align_end);
341 line_size = L1_cache_info[ICACHE].line_size;
342 align_start = start & ~(line_size - 1);
343 align_end = (end + line_size - 1) & ~(line_size - 1);
344 cpu_icache_inval_range(align_start, align_end);
348 void cpu_cache_wbinval_range_check(struct vm_area_struct *vma,
349 unsigned long start, unsigned long end,
350 bool flushi, bool wbd)
352 unsigned long line_size, t_start, t_end;
356 line_size = L1_cache_info[DCACHE].line_size;
357 start = start & ~(line_size - 1);
358 end = (end + line_size - 1) & ~(line_size - 1);
360 if ((end - start) > (8 * PAGE_SIZE)) {
362 cpu_dcache_wbinval_all();
364 cpu_icache_inval_all();
368 t_start = (start + PAGE_SIZE) & PAGE_MASK;
369 t_end = ((end - 1) & PAGE_MASK);
371 if ((start & PAGE_MASK) == t_end) {
372 if (va_present(vma->vm_mm, start)) {
374 cpu_dcache_wbinval_range(start, end);
376 cpu_icache_inval_range(start, end);
381 if (va_present(vma->vm_mm, start)) {
383 cpu_dcache_wbinval_range(start, t_start);
385 cpu_icache_inval_range(start, t_start);
388 if (va_present(vma->vm_mm, end - 1)) {
390 cpu_dcache_wbinval_range(t_end, end);
392 cpu_icache_inval_range(t_end, end);
395 while (t_start < t_end) {
396 if (va_present(vma->vm_mm, t_start)) {
398 cpu_dcache_wbinval_page(t_start);
400 cpu_icache_inval_page(t_start);
402 t_start += PAGE_SIZE;
406 #ifdef CONFIG_CACHE_L2
407 static inline void cpu_l2cache_op(unsigned long start, unsigned long end, unsigned long op)
410 unsigned long p_start = __pa(start);
411 unsigned long p_end = __pa(end);
413 unsigned long line_size;
414 /* TODO Can Use PAGE Mode to optimize if range large than PAGE_SIZE */
415 line_size = L2_CACHE_LINE_SIZE();
416 p_start = p_start & (~(line_size - 1));
417 p_end = (p_end + line_size - 1) & (~(line_size - 1));
419 (p_start & ~(line_size - 1)) | op |
423 L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
425 p_start += line_size;
426 } while (p_end > p_start);
427 cmd = CCTL_CMD_L2_SYNC;
429 L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
434 #define cpu_l2cache_op(start,end,op) do { } while (0)
439 void cpu_dma_wb_range(unsigned long start, unsigned long end)
441 unsigned long line_size;
443 line_size = L1_cache_info[DCACHE].line_size;
444 start = start & (~(line_size - 1));
445 end = (end + line_size - 1) & (~(line_size - 1));
446 if (unlikely(start == end))
449 local_irq_save(flags);
450 cpu_dcache_wb_range(start, end);
451 cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WB);
452 __nds32__msync_all();
453 local_irq_restore(flags);
456 void cpu_dma_inval_range(unsigned long start, unsigned long end)
458 unsigned long line_size;
459 unsigned long old_start = start;
460 unsigned long old_end = end;
462 line_size = L1_cache_info[DCACHE].line_size;
463 start = start & (~(line_size - 1));
464 end = (end + line_size - 1) & (~(line_size - 1));
465 if (unlikely(start == end))
467 local_irq_save(flags);
468 if (start != old_start) {
469 cpu_dcache_wbinval_range(start, start + line_size);
470 cpu_l2cache_op(start, start + line_size, CCTL_CMD_L2_PA_WBINVAL);
472 if (end != old_end) {
473 cpu_dcache_wbinval_range(end - line_size, end);
474 cpu_l2cache_op(end - line_size, end, CCTL_CMD_L2_PA_WBINVAL);
476 cpu_dcache_inval_range(start, end);
477 cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_INVAL);
478 __nds32__msync_all();
479 local_irq_restore(flags);
483 void cpu_dma_wbinval_range(unsigned long start, unsigned long end)
485 unsigned long line_size;
487 line_size = L1_cache_info[DCACHE].line_size;
488 start = start & (~(line_size - 1));
489 end = (end + line_size - 1) & (~(line_size - 1));
490 if (unlikely(start == end))
493 local_irq_save(flags);
494 cpu_dcache_wbinval_range(start, end);
495 cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WBINVAL);
496 __nds32__msync_all();
497 local_irq_restore(flags);
500 void cpu_proc_init(void)
504 void cpu_proc_fin(void)
508 void cpu_do_idle(void)
510 __nds32__standby_no_wake_grant();
513 void cpu_reset(unsigned long reset)
517 tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL);
518 tmp &= ~(CACHE_CTL_mskIC_EN | CACHE_CTL_mskDC_EN);
519 __nds32__mtsr_isb(tmp, NDS32_SR_CACHE_CTL);
520 cpu_dcache_wbinval_all();
521 cpu_icache_inval_all();
523 __asm__ __volatile__("jr.toff %0\n\t"::"r"(reset));
526 void cpu_switch_mm(struct mm_struct *mm)
529 cid = __nds32__mfsr(NDS32_SR_TLB_MISC);
530 cid = (cid & ~TLB_MISC_mskCID) | mm->context.id;
531 __nds32__mtsr_dsb(cid, NDS32_SR_TLB_MISC);
532 __nds32__mtsr_isb(__pa(mm->pgd), NDS32_SR_L1_PPTB);