]> Git Repo - linux.git/blob - arch/nds32/mm/proc.c
KVM: nVMX: Emulate MTF when performing instruction emulation
[linux.git] / arch / nds32 / mm / proc.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
3
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/mm.h>
7 #include <asm/nds32.h>
8 #include <asm/pgtable.h>
9 #include <asm/tlbflush.h>
10 #include <asm/cacheflush.h>
11 #include <asm/l2_cache.h>
12 #include <nds32_intrinsic.h>
13
14 #include <asm/cache_info.h>
15 extern struct cache_info L1_cache_info[2];
16
17 int va_kernel_present(unsigned long addr)
18 {
19         p4d_t *p4d;
20         pud_t *pud;
21         pmd_t *pmd;
22         pte_t *ptep, pte;
23
24         p4d = p4d_offset(pgd_offset_k(addr), addr);
25         pud = pud_offset(p4d, addr);
26         pmd = pmd_offset(pud, addr);
27         if (!pmd_none(*pmd)) {
28                 ptep = pte_offset_map(pmd, addr);
29                 pte = *ptep;
30                 if (pte_present(pte))
31                         return pte;
32         }
33         return 0;
34 }
35
36 pte_t va_present(struct mm_struct * mm, unsigned long addr)
37 {
38         pgd_t *pgd;
39         p4d_t *p4d;
40         pud_t *pud;
41         pmd_t *pmd;
42         pte_t *ptep, pte;
43
44         pgd = pgd_offset(mm, addr);
45         if (!pgd_none(*pgd)) {
46                 p4d = p4d_offset(pgd, addr);
47                 if (!p4d_none(*p4d)) {
48                         pud = pud_offset(p4d, addr);
49                         if (!pud_none(*pud)) {
50                                 pmd = pmd_offset(pud, addr);
51                                 if (!pmd_none(*pmd)) {
52                                         ptep = pte_offset_map(pmd, addr);
53                                         pte = *ptep;
54                                         if (pte_present(pte))
55                                                 return pte;
56                                 }
57                         }
58                 }
59         }
60         return 0;
61
62 }
63
64 int va_readable(struct pt_regs *regs, unsigned long addr)
65 {
66         struct mm_struct *mm = current->mm;
67         pte_t pte;
68         int ret = 0;
69
70         if (user_mode(regs)) {
71                 /* user mode */
72                 pte = va_present(mm, addr);
73                 if (!pte && pte_read(pte))
74                         ret = 1;
75         } else {
76                 /* superuser mode is always readable, so we can only
77                  * check it is present or not*/
78                 return (! !va_kernel_present(addr));
79         }
80         return ret;
81 }
82
83 int va_writable(struct pt_regs *regs, unsigned long addr)
84 {
85         struct mm_struct *mm = current->mm;
86         pte_t pte;
87         int ret = 0;
88
89         if (user_mode(regs)) {
90                 /* user mode */
91                 pte = va_present(mm, addr);
92                 if (!pte && pte_write(pte))
93                         ret = 1;
94         } else {
95                 /* superuser mode */
96                 pte = va_kernel_present(addr);
97                 if (!pte && pte_kernel_write(pte))
98                         ret = 1;
99         }
100         return ret;
101 }
102
103 /*
104  * All
105  */
106 void cpu_icache_inval_all(void)
107 {
108         unsigned long end, line_size;
109
110         line_size = L1_cache_info[ICACHE].line_size;
111         end =
112             line_size * L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].sets;
113
114         do {
115                 end -= line_size;
116                 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
117                 end -= line_size;
118                 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
119                 end -= line_size;
120                 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
121                 end -= line_size;
122                 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
123         } while (end > 0);
124         __nds32__isb();
125 }
126
127 void cpu_dcache_inval_all(void)
128 {
129         __nds32__cctl_l1d_invalall();
130 }
131
132 #ifdef CONFIG_CACHE_L2
133 void dcache_wb_all_level(void)
134 {
135         unsigned long flags, cmd;
136         local_irq_save(flags);
137         __nds32__cctl_l1d_wball_alvl();
138         /* Section 1: Ensure the section 2 & 3 program code execution after */
139         __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
140
141         /* Section 2: Confirm the writeback all level is done in CPU and L2C */
142         cmd = CCTL_CMD_L2_SYNC;
143         L2_CMD_RDY();
144         L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
145         L2_CMD_RDY();
146
147         /* Section 3: Writeback whole L2 cache */
148         cmd = CCTL_ALL_CMD | CCTL_CMD_L2_IX_WB;
149         L2_CMD_RDY();
150         L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
151         L2_CMD_RDY();
152         __nds32__msync_all();
153         local_irq_restore(flags);
154 }
155 EXPORT_SYMBOL(dcache_wb_all_level);
156 #endif
157
158 void cpu_dcache_wb_all(void)
159 {
160         __nds32__cctl_l1d_wball_one_lvl();
161         __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
162 }
163
164 void cpu_dcache_wbinval_all(void)
165 {
166 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
167         unsigned long flags;
168         local_irq_save(flags);
169 #endif
170         cpu_dcache_wb_all();
171         cpu_dcache_inval_all();
172 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
173         local_irq_restore(flags);
174 #endif
175 }
176
177 /*
178  * Page
179  */
180 void cpu_icache_inval_page(unsigned long start)
181 {
182         unsigned long line_size, end;
183
184         line_size = L1_cache_info[ICACHE].line_size;
185         end = start + PAGE_SIZE;
186
187         do {
188                 end -= line_size;
189                 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
190                 end -= line_size;
191                 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
192                 end -= line_size;
193                 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
194                 end -= line_size;
195                 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
196         } while (end != start);
197         __nds32__isb();
198 }
199
200 void cpu_dcache_inval_page(unsigned long start)
201 {
202         unsigned long line_size, end;
203
204         line_size = L1_cache_info[DCACHE].line_size;
205         end = start + PAGE_SIZE;
206
207         do {
208                 end -= line_size;
209                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
210                 end -= line_size;
211                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
212                 end -= line_size;
213                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
214                 end -= line_size;
215                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
216         } while (end != start);
217 }
218
219 void cpu_dcache_wb_page(unsigned long start)
220 {
221 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
222         unsigned long line_size, end;
223
224         line_size = L1_cache_info[DCACHE].line_size;
225         end = start + PAGE_SIZE;
226
227         do {
228                 end -= line_size;
229                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
230                 end -= line_size;
231                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
232                 end -= line_size;
233                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
234                 end -= line_size;
235                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
236         } while (end != start);
237         __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
238 #endif
239 }
240
241 void cpu_dcache_wbinval_page(unsigned long start)
242 {
243         unsigned long line_size, end;
244
245         line_size = L1_cache_info[DCACHE].line_size;
246         end = start + PAGE_SIZE;
247
248         do {
249                 end -= line_size;
250 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
251                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
252 #endif
253                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
254                 end -= line_size;
255 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
256                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
257 #endif
258                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
259                 end -= line_size;
260 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
261                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
262 #endif
263                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
264                 end -= line_size;
265 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
266                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
267 #endif
268                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
269         } while (end != start);
270         __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
271 }
272
273 void cpu_cache_wbinval_page(unsigned long page, int flushi)
274 {
275         cpu_dcache_wbinval_page(page);
276         if (flushi)
277                 cpu_icache_inval_page(page);
278 }
279
280 /*
281  * Range
282  */
283 void cpu_icache_inval_range(unsigned long start, unsigned long end)
284 {
285         unsigned long line_size;
286
287         line_size = L1_cache_info[ICACHE].line_size;
288
289         while (end > start) {
290                 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (start));
291                 start += line_size;
292         }
293         __nds32__isb();
294 }
295
296 void cpu_dcache_inval_range(unsigned long start, unsigned long end)
297 {
298         unsigned long line_size;
299
300         line_size = L1_cache_info[DCACHE].line_size;
301
302         while (end > start) {
303                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
304                 start += line_size;
305         }
306 }
307
308 void cpu_dcache_wb_range(unsigned long start, unsigned long end)
309 {
310 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
311         unsigned long line_size;
312
313         line_size = L1_cache_info[DCACHE].line_size;
314
315         while (end > start) {
316                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
317                 start += line_size;
318         }
319         __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
320 #endif
321 }
322
323 void cpu_dcache_wbinval_range(unsigned long start, unsigned long end)
324 {
325         unsigned long line_size;
326
327         line_size = L1_cache_info[DCACHE].line_size;
328
329         while (end > start) {
330 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
331                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
332 #endif
333                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
334                 start += line_size;
335         }
336         __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
337 }
338
339 void cpu_cache_wbinval_range(unsigned long start, unsigned long end, int flushi)
340 {
341         unsigned long line_size, align_start, align_end;
342
343         line_size = L1_cache_info[DCACHE].line_size;
344         align_start = start & ~(line_size - 1);
345         align_end = (end + line_size - 1) & ~(line_size - 1);
346         cpu_dcache_wbinval_range(align_start, align_end);
347
348         if (flushi) {
349                 line_size = L1_cache_info[ICACHE].line_size;
350                 align_start = start & ~(line_size - 1);
351                 align_end = (end + line_size - 1) & ~(line_size - 1);
352                 cpu_icache_inval_range(align_start, align_end);
353         }
354 }
355
356 void cpu_cache_wbinval_range_check(struct vm_area_struct *vma,
357                                    unsigned long start, unsigned long end,
358                                    bool flushi, bool wbd)
359 {
360         unsigned long line_size, t_start, t_end;
361
362         if (!flushi && !wbd)
363                 return;
364         line_size = L1_cache_info[DCACHE].line_size;
365         start = start & ~(line_size - 1);
366         end = (end + line_size - 1) & ~(line_size - 1);
367
368         if ((end - start) > (8 * PAGE_SIZE)) {
369                 if (wbd)
370                         cpu_dcache_wbinval_all();
371                 if (flushi)
372                         cpu_icache_inval_all();
373                 return;
374         }
375
376         t_start = (start + PAGE_SIZE) & PAGE_MASK;
377         t_end = ((end - 1) & PAGE_MASK);
378
379         if ((start & PAGE_MASK) == t_end) {
380                 if (va_present(vma->vm_mm, start)) {
381                         if (wbd)
382                                 cpu_dcache_wbinval_range(start, end);
383                         if (flushi)
384                                 cpu_icache_inval_range(start, end);
385                 }
386                 return;
387         }
388
389         if (va_present(vma->vm_mm, start)) {
390                 if (wbd)
391                         cpu_dcache_wbinval_range(start, t_start);
392                 if (flushi)
393                         cpu_icache_inval_range(start, t_start);
394         }
395
396         if (va_present(vma->vm_mm, end - 1)) {
397                 if (wbd)
398                         cpu_dcache_wbinval_range(t_end, end);
399                 if (flushi)
400                         cpu_icache_inval_range(t_end, end);
401         }
402
403         while (t_start < t_end) {
404                 if (va_present(vma->vm_mm, t_start)) {
405                         if (wbd)
406                                 cpu_dcache_wbinval_page(t_start);
407                         if (flushi)
408                                 cpu_icache_inval_page(t_start);
409                 }
410                 t_start += PAGE_SIZE;
411         }
412 }
413
414 #ifdef CONFIG_CACHE_L2
415 static inline void cpu_l2cache_op(unsigned long start, unsigned long end, unsigned long op)
416 {
417         if (atl2c_base) {
418                 unsigned long p_start = __pa(start);
419                 unsigned long p_end = __pa(end);
420                 unsigned long cmd;
421                 unsigned long line_size;
422                 /* TODO Can Use PAGE Mode to optimize if range large than PAGE_SIZE */
423                 line_size = L2_CACHE_LINE_SIZE();
424                 p_start = p_start & (~(line_size - 1));
425                 p_end = (p_end + line_size - 1) & (~(line_size - 1));
426                 cmd =
427                     (p_start & ~(line_size - 1)) | op |
428                     CCTL_SINGLE_CMD;
429                 do {
430                         L2_CMD_RDY();
431                         L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
432                         cmd += line_size;
433                         p_start += line_size;
434                 } while (p_end > p_start);
435                 cmd = CCTL_CMD_L2_SYNC;
436                 L2_CMD_RDY();
437                 L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
438                 L2_CMD_RDY();
439         }
440 }
441 #else
442 #define cpu_l2cache_op(start,end,op) do { } while (0)
443 #endif
444 /*
445  * DMA
446  */
447 void cpu_dma_wb_range(unsigned long start, unsigned long end)
448 {
449         unsigned long line_size;
450         unsigned long flags;
451         line_size = L1_cache_info[DCACHE].line_size;
452         start = start & (~(line_size - 1));
453         end = (end + line_size - 1) & (~(line_size - 1));
454         if (unlikely(start == end))
455                 return;
456
457         local_irq_save(flags);
458         cpu_dcache_wb_range(start, end);
459         cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WB);
460         __nds32__msync_all();
461         local_irq_restore(flags);
462 }
463
464 void cpu_dma_inval_range(unsigned long start, unsigned long end)
465 {
466         unsigned long line_size;
467         unsigned long old_start = start;
468         unsigned long old_end = end;
469         unsigned long flags;
470         line_size = L1_cache_info[DCACHE].line_size;
471         start = start & (~(line_size - 1));
472         end = (end + line_size - 1) & (~(line_size - 1));
473         if (unlikely(start == end))
474                 return;
475         local_irq_save(flags);
476         if (start != old_start) {
477                 cpu_dcache_wbinval_range(start, start + line_size);
478                 cpu_l2cache_op(start, start + line_size, CCTL_CMD_L2_PA_WBINVAL);
479         }
480         if (end != old_end) {
481                 cpu_dcache_wbinval_range(end - line_size, end);
482                 cpu_l2cache_op(end - line_size, end, CCTL_CMD_L2_PA_WBINVAL);
483         }
484         cpu_dcache_inval_range(start, end);
485         cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_INVAL);
486         __nds32__msync_all();
487         local_irq_restore(flags);
488
489 }
490
491 void cpu_dma_wbinval_range(unsigned long start, unsigned long end)
492 {
493         unsigned long line_size;
494         unsigned long flags;
495         line_size = L1_cache_info[DCACHE].line_size;
496         start = start & (~(line_size - 1));
497         end = (end + line_size - 1) & (~(line_size - 1));
498         if (unlikely(start == end))
499                 return;
500
501         local_irq_save(flags);
502         cpu_dcache_wbinval_range(start, end);
503         cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WBINVAL);
504         __nds32__msync_all();
505         local_irq_restore(flags);
506 }
507
508 void cpu_proc_init(void)
509 {
510 }
511
512 void cpu_proc_fin(void)
513 {
514 }
515
516 void cpu_do_idle(void)
517 {
518         __nds32__standby_no_wake_grant();
519 }
520
521 void cpu_reset(unsigned long reset)
522 {
523         u32 tmp;
524         GIE_DISABLE();
525         tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL);
526         tmp &= ~(CACHE_CTL_mskIC_EN | CACHE_CTL_mskDC_EN);
527         __nds32__mtsr_isb(tmp, NDS32_SR_CACHE_CTL);
528         cpu_dcache_wbinval_all();
529         cpu_icache_inval_all();
530
531         __asm__ __volatile__("jr.toff %0\n\t"::"r"(reset));
532 }
533
534 void cpu_switch_mm(struct mm_struct *mm)
535 {
536         unsigned long cid;
537         cid = __nds32__mfsr(NDS32_SR_TLB_MISC);
538         cid = (cid & ~TLB_MISC_mskCID) | mm->context.id;
539         __nds32__mtsr_dsb(cid, NDS32_SR_TLB_MISC);
540         __nds32__mtsr_isb(__pa(mm->pgd), NDS32_SR_L1_PPTB);
541 }
This page took 0.064305 seconds and 4 git commands to generate.