]> Git Repo - J-linux.git/blob - arch/mips/mm/c-r4k.c
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[J-linux.git] / arch / mips / mm / c-r4k.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller ([email protected])
7  * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle ([email protected])
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  */
10 #include <linux/cpu_pm.h>
11 #include <linux/hardirq.h>
12 #include <linux/init.h>
13 #include <linux/highmem.h>
14 #include <linux/kernel.h>
15 #include <linux/linkage.h>
16 #include <linux/preempt.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
19 #include <linux/mm.h>
20 #include <linux/export.h>
21 #include <linux/bitops.h>
22 #include <linux/dma-map-ops.h> /* for dma_default_coherent */
23
24 #include <asm/bcache.h>
25 #include <asm/bootinfo.h>
26 #include <asm/cache.h>
27 #include <asm/cacheops.h>
28 #include <asm/cpu.h>
29 #include <asm/cpu-features.h>
30 #include <asm/cpu-type.h>
31 #include <asm/io.h>
32 #include <asm/page.h>
33 #include <asm/r4kcache.h>
34 #include <asm/sections.h>
35 #include <asm/mmu_context.h>
36 #include <asm/cacheflush.h> /* for run_uncached() */
37 #include <asm/traps.h>
38 #include <asm/mips-cps.h>
39
40 /*
41  * Bits describing what cache ops an SMP callback function may perform.
42  *
43  * R4K_HIT   -  Virtual user or kernel address based cache operations. The
44  *              active_mm must be checked before using user addresses, falling
45  *              back to kmap.
46  * R4K_INDEX -  Index based cache operations.
47  */
48
49 #define R4K_HIT         BIT(0)
50 #define R4K_INDEX       BIT(1)
51
52 /**
53  * r4k_op_needs_ipi() - Decide if a cache op needs to be done on every core.
54  * @type:       Type of cache operations (R4K_HIT or R4K_INDEX).
55  *
56  * Decides whether a cache op needs to be performed on every core in the system.
57  * This may change depending on the @type of cache operation, as well as the set
58  * of online CPUs, so preemption should be disabled by the caller to prevent CPU
59  * hotplug from changing the result.
60  *
61  * Returns:     1 if the cache operation @type should be done on every core in
62  *              the system.
63  *              0 if the cache operation @type is globalized and only needs to
64  *              be performed on a simple CPU.
65  */
66 static inline bool r4k_op_needs_ipi(unsigned int type)
67 {
68         /* The MIPS Coherence Manager (CM) globalizes address-based cache ops */
69         if (type == R4K_HIT && mips_cm_present())
70                 return false;
71
72         /*
73          * Hardware doesn't globalize the required cache ops, so SMP calls may
74          * be needed, but only if there are foreign CPUs (non-siblings with
75          * separate caches).
76          */
77         /* cpu_foreign_map[] undeclared when !CONFIG_SMP */
78 #ifdef CONFIG_SMP
79         return !cpumask_empty(&cpu_foreign_map[0]);
80 #else
81         return false;
82 #endif
83 }
84
85 /*
86  * Special Variant of smp_call_function for use by cache functions:
87  *
88  *  o No return value
89  *  o collapses to normal function call on UP kernels
90  *  o collapses to normal function call on systems with a single shared
91  *    primary cache.
92  *  o doesn't disable interrupts on the local CPU
93  */
94 static inline void r4k_on_each_cpu(unsigned int type,
95                                    void (*func)(void *info), void *info)
96 {
97         preempt_disable();
98         if (r4k_op_needs_ipi(type))
99                 smp_call_function_many(&cpu_foreign_map[smp_processor_id()],
100                                        func, info, 1);
101         func(info);
102         preempt_enable();
103 }
104
105 /*
106  * Must die.
107  */
108 static unsigned long icache_size __read_mostly;
109 static unsigned long dcache_size __read_mostly;
110 static unsigned long vcache_size __read_mostly;
111 static unsigned long scache_size __read_mostly;
112
113 #define cpu_is_r4600_v1_x()     ((read_c0_prid() & 0xfffffff0) == 0x00002010)
114 #define cpu_is_r4600_v2_x()     ((read_c0_prid() & 0xfffffff0) == 0x00002020)
115
116 #define R4600_HIT_CACHEOP_WAR_IMPL                                      \
117 do {                                                                    \
118         if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) &&              \
119             cpu_is_r4600_v2_x())                                        \
120                 *(volatile unsigned long *)CKSEG1;                      \
121         if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP))                                        \
122                 __asm__ __volatile__("nop;nop;nop;nop");                \
123 } while (0)
124
125 static void (*r4k_blast_dcache_page)(unsigned long addr);
126
127 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
128 {
129         R4600_HIT_CACHEOP_WAR_IMPL;
130         blast_dcache32_page(addr);
131 }
132
133 static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
134 {
135         blast_dcache64_page(addr);
136 }
137
138 static inline void r4k_blast_dcache_page_dc128(unsigned long addr)
139 {
140         blast_dcache128_page(addr);
141 }
142
143 static void r4k_blast_dcache_page_setup(void)
144 {
145         unsigned long  dc_lsize = cpu_dcache_line_size();
146
147         switch (dc_lsize) {
148         case 0:
149                 r4k_blast_dcache_page = (void *)cache_noop;
150                 break;
151         case 16:
152                 r4k_blast_dcache_page = blast_dcache16_page;
153                 break;
154         case 32:
155                 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
156                 break;
157         case 64:
158                 r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
159                 break;
160         case 128:
161                 r4k_blast_dcache_page = r4k_blast_dcache_page_dc128;
162                 break;
163         default:
164                 break;
165         }
166 }
167
168 #ifndef CONFIG_EVA
169 #define r4k_blast_dcache_user_page  r4k_blast_dcache_page
170 #else
171
172 static void (*r4k_blast_dcache_user_page)(unsigned long addr);
173
174 static void r4k_blast_dcache_user_page_setup(void)
175 {
176         unsigned long  dc_lsize = cpu_dcache_line_size();
177
178         if (dc_lsize == 0)
179                 r4k_blast_dcache_user_page = (void *)cache_noop;
180         else if (dc_lsize == 16)
181                 r4k_blast_dcache_user_page = blast_dcache16_user_page;
182         else if (dc_lsize == 32)
183                 r4k_blast_dcache_user_page = blast_dcache32_user_page;
184         else if (dc_lsize == 64)
185                 r4k_blast_dcache_user_page = blast_dcache64_user_page;
186 }
187
188 #endif
189
190 void (* r4k_blast_dcache)(void);
191 EXPORT_SYMBOL(r4k_blast_dcache);
192
193 static void r4k_blast_dcache_setup(void)
194 {
195         unsigned long dc_lsize = cpu_dcache_line_size();
196
197         if (dc_lsize == 0)
198                 r4k_blast_dcache = (void *)cache_noop;
199         else if (dc_lsize == 16)
200                 r4k_blast_dcache = blast_dcache16;
201         else if (dc_lsize == 32)
202                 r4k_blast_dcache = blast_dcache32;
203         else if (dc_lsize == 64)
204                 r4k_blast_dcache = blast_dcache64;
205         else if (dc_lsize == 128)
206                 r4k_blast_dcache = blast_dcache128;
207 }
208
209 /* force code alignment (used for CONFIG_WAR_TX49XX_ICACHE_INDEX_INV) */
210 #define JUMP_TO_ALIGN(order) \
211         __asm__ __volatile__( \
212                 "b\t1f\n\t" \
213                 ".align\t" #order "\n\t" \
214                 "1:\n\t" \
215                 )
216 #define CACHE32_UNROLL32_ALIGN  JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
217 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
218
219 static inline void blast_r4600_v1_icache32(void)
220 {
221         unsigned long flags;
222
223         local_irq_save(flags);
224         blast_icache32();
225         local_irq_restore(flags);
226 }
227
228 static inline void tx49_blast_icache32(void)
229 {
230         unsigned long start = INDEX_BASE;
231         unsigned long end = start + current_cpu_data.icache.waysize;
232         unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
233         unsigned long ws_end = current_cpu_data.icache.ways <<
234                                current_cpu_data.icache.waybit;
235         unsigned long ws, addr;
236
237         CACHE32_UNROLL32_ALIGN2;
238         /* I'm in even chunk.  blast odd chunks */
239         for (ws = 0; ws < ws_end; ws += ws_inc)
240                 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
241                         cache_unroll(32, kernel_cache, Index_Invalidate_I,
242                                      addr | ws, 32);
243         CACHE32_UNROLL32_ALIGN;
244         /* I'm in odd chunk.  blast even chunks */
245         for (ws = 0; ws < ws_end; ws += ws_inc)
246                 for (addr = start; addr < end; addr += 0x400 * 2)
247                         cache_unroll(32, kernel_cache, Index_Invalidate_I,
248                                      addr | ws, 32);
249 }
250
251 static void (* r4k_blast_icache_page)(unsigned long addr);
252
253 static void r4k_blast_icache_page_setup(void)
254 {
255         unsigned long ic_lsize = cpu_icache_line_size();
256
257         if (ic_lsize == 0)
258                 r4k_blast_icache_page = (void *)cache_noop;
259         else if (ic_lsize == 16)
260                 r4k_blast_icache_page = blast_icache16_page;
261         else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2EF)
262                 r4k_blast_icache_page = loongson2_blast_icache32_page;
263         else if (ic_lsize == 32)
264                 r4k_blast_icache_page = blast_icache32_page;
265         else if (ic_lsize == 64)
266                 r4k_blast_icache_page = blast_icache64_page;
267         else if (ic_lsize == 128)
268                 r4k_blast_icache_page = blast_icache128_page;
269 }
270
271 #ifndef CONFIG_EVA
272 #define r4k_blast_icache_user_page  r4k_blast_icache_page
273 #else
274
275 static void (*r4k_blast_icache_user_page)(unsigned long addr);
276
277 static void r4k_blast_icache_user_page_setup(void)
278 {
279         unsigned long ic_lsize = cpu_icache_line_size();
280
281         if (ic_lsize == 0)
282                 r4k_blast_icache_user_page = (void *)cache_noop;
283         else if (ic_lsize == 16)
284                 r4k_blast_icache_user_page = blast_icache16_user_page;
285         else if (ic_lsize == 32)
286                 r4k_blast_icache_user_page = blast_icache32_user_page;
287         else if (ic_lsize == 64)
288                 r4k_blast_icache_user_page = blast_icache64_user_page;
289 }
290
291 #endif
292
293 void (* r4k_blast_icache)(void);
294 EXPORT_SYMBOL(r4k_blast_icache);
295
296 static void r4k_blast_icache_setup(void)
297 {
298         unsigned long ic_lsize = cpu_icache_line_size();
299
300         if (ic_lsize == 0)
301                 r4k_blast_icache = (void *)cache_noop;
302         else if (ic_lsize == 16)
303                 r4k_blast_icache = blast_icache16;
304         else if (ic_lsize == 32) {
305                 if (IS_ENABLED(CONFIG_WAR_R4600_V1_INDEX_ICACHEOP) &&
306                     cpu_is_r4600_v1_x())
307                         r4k_blast_icache = blast_r4600_v1_icache32;
308                 else if (IS_ENABLED(CONFIG_WAR_TX49XX_ICACHE_INDEX_INV))
309                         r4k_blast_icache = tx49_blast_icache32;
310                 else if (current_cpu_type() == CPU_LOONGSON2EF)
311                         r4k_blast_icache = loongson2_blast_icache32;
312                 else
313                         r4k_blast_icache = blast_icache32;
314         } else if (ic_lsize == 64)
315                 r4k_blast_icache = blast_icache64;
316         else if (ic_lsize == 128)
317                 r4k_blast_icache = blast_icache128;
318 }
319
320 static void (* r4k_blast_scache_page)(unsigned long addr);
321
322 static void r4k_blast_scache_page_setup(void)
323 {
324         unsigned long sc_lsize = cpu_scache_line_size();
325
326         if (scache_size == 0)
327                 r4k_blast_scache_page = (void *)cache_noop;
328         else if (sc_lsize == 16)
329                 r4k_blast_scache_page = blast_scache16_page;
330         else if (sc_lsize == 32)
331                 r4k_blast_scache_page = blast_scache32_page;
332         else if (sc_lsize == 64)
333                 r4k_blast_scache_page = blast_scache64_page;
334         else if (sc_lsize == 128)
335                 r4k_blast_scache_page = blast_scache128_page;
336 }
337
338 static void (* r4k_blast_scache)(void);
339
340 static void r4k_blast_scache_setup(void)
341 {
342         unsigned long sc_lsize = cpu_scache_line_size();
343
344         if (scache_size == 0)
345                 r4k_blast_scache = (void *)cache_noop;
346         else if (sc_lsize == 16)
347                 r4k_blast_scache = blast_scache16;
348         else if (sc_lsize == 32)
349                 r4k_blast_scache = blast_scache32;
350         else if (sc_lsize == 64)
351                 r4k_blast_scache = blast_scache64;
352         else if (sc_lsize == 128)
353                 r4k_blast_scache = blast_scache128;
354 }
355
356 static void (*r4k_blast_scache_node)(long node);
357
358 static void r4k_blast_scache_node_setup(void)
359 {
360         unsigned long sc_lsize = cpu_scache_line_size();
361
362         if (current_cpu_type() != CPU_LOONGSON64)
363                 r4k_blast_scache_node = (void *)cache_noop;
364         else if (sc_lsize == 16)
365                 r4k_blast_scache_node = blast_scache16_node;
366         else if (sc_lsize == 32)
367                 r4k_blast_scache_node = blast_scache32_node;
368         else if (sc_lsize == 64)
369                 r4k_blast_scache_node = blast_scache64_node;
370         else if (sc_lsize == 128)
371                 r4k_blast_scache_node = blast_scache128_node;
372 }
373
374 static inline void local_r4k___flush_cache_all(void * args)
375 {
376         switch (current_cpu_type()) {
377         case CPU_LOONGSON2EF:
378         case CPU_R4000SC:
379         case CPU_R4000MC:
380         case CPU_R4400SC:
381         case CPU_R4400MC:
382         case CPU_R10000:
383         case CPU_R12000:
384         case CPU_R14000:
385         case CPU_R16000:
386                 /*
387                  * These caches are inclusive caches, that is, if something
388                  * is not cached in the S-cache, we know it also won't be
389                  * in one of the primary caches.
390                  */
391                 r4k_blast_scache();
392                 break;
393
394         case CPU_LOONGSON64:
395                 /* Use get_ebase_cpunum() for both NUMA=y/n */
396                 r4k_blast_scache_node(get_ebase_cpunum() >> 2);
397                 break;
398
399         case CPU_BMIPS5000:
400                 r4k_blast_scache();
401                 __sync();
402                 break;
403
404         default:
405                 r4k_blast_dcache();
406                 r4k_blast_icache();
407                 break;
408         }
409 }
410
411 static void r4k___flush_cache_all(void)
412 {
413         r4k_on_each_cpu(R4K_INDEX, local_r4k___flush_cache_all, NULL);
414 }
415
416 /**
417  * has_valid_asid() - Determine if an mm already has an ASID.
418  * @mm:         Memory map.
419  * @type:       R4K_HIT or R4K_INDEX, type of cache op.
420  *
421  * Determines whether @mm already has an ASID on any of the CPUs which cache ops
422  * of type @type within an r4k_on_each_cpu() call will affect. If
423  * r4k_on_each_cpu() does an SMP call to a single VPE in each core, then the
424  * scope of the operation is confined to sibling CPUs, otherwise all online CPUs
425  * will need to be checked.
426  *
427  * Must be called in non-preemptive context.
428  *
429  * Returns:     1 if the CPUs affected by @type cache ops have an ASID for @mm.
430  *              0 otherwise.
431  */
432 static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type)
433 {
434         unsigned int i;
435         const cpumask_t *mask = cpu_present_mask;
436
437         if (cpu_has_mmid)
438                 return cpu_context(0, mm) != 0;
439
440         /* cpu_sibling_map[] undeclared when !CONFIG_SMP */
441 #ifdef CONFIG_SMP
442         /*
443          * If r4k_on_each_cpu does SMP calls, it does them to a single VPE in
444          * each foreign core, so we only need to worry about siblings.
445          * Otherwise we need to worry about all present CPUs.
446          */
447         if (r4k_op_needs_ipi(type))
448                 mask = &cpu_sibling_map[smp_processor_id()];
449 #endif
450         for_each_cpu(i, mask)
451                 if (cpu_context(i, mm))
452                         return 1;
453         return 0;
454 }
455
456 static void r4k__flush_cache_vmap(void)
457 {
458         r4k_blast_dcache();
459 }
460
461 static void r4k__flush_cache_vunmap(void)
462 {
463         r4k_blast_dcache();
464 }
465
466 /*
467  * Note: flush_tlb_range() assumes flush_cache_range() sufficiently flushes
468  * whole caches when vma is executable.
469  */
470 static inline void local_r4k_flush_cache_range(void * args)
471 {
472         struct vm_area_struct *vma = args;
473         int exec = vma->vm_flags & VM_EXEC;
474
475         if (!has_valid_asid(vma->vm_mm, R4K_INDEX))
476                 return;
477
478         /*
479          * If dcache can alias, we must blast it since mapping is changing.
480          * If executable, we must ensure any dirty lines are written back far
481          * enough to be visible to icache.
482          */
483         if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
484                 r4k_blast_dcache();
485         /* If executable, blast stale lines from icache */
486         if (exec)
487                 r4k_blast_icache();
488 }
489
490 static void r4k_flush_cache_range(struct vm_area_struct *vma,
491         unsigned long start, unsigned long end)
492 {
493         int exec = vma->vm_flags & VM_EXEC;
494
495         if (cpu_has_dc_aliases || exec)
496                 r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_range, vma);
497 }
498
499 static inline void local_r4k_flush_cache_mm(void * args)
500 {
501         struct mm_struct *mm = args;
502
503         if (!has_valid_asid(mm, R4K_INDEX))
504                 return;
505
506         /*
507          * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we
508          * only flush the primary caches but R1x000 behave sane ...
509          * R4000SC and R4400SC indexed S-cache ops also invalidate primary
510          * caches, so we can bail out early.
511          */
512         if (current_cpu_type() == CPU_R4000SC ||
513             current_cpu_type() == CPU_R4000MC ||
514             current_cpu_type() == CPU_R4400SC ||
515             current_cpu_type() == CPU_R4400MC) {
516                 r4k_blast_scache();
517                 return;
518         }
519
520         r4k_blast_dcache();
521 }
522
523 static void r4k_flush_cache_mm(struct mm_struct *mm)
524 {
525         if (!cpu_has_dc_aliases)
526                 return;
527
528         r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_mm, mm);
529 }
530
531 struct flush_cache_page_args {
532         struct vm_area_struct *vma;
533         unsigned long addr;
534         unsigned long pfn;
535 };
536
537 static inline void local_r4k_flush_cache_page(void *args)
538 {
539         struct flush_cache_page_args *fcp_args = args;
540         struct vm_area_struct *vma = fcp_args->vma;
541         unsigned long addr = fcp_args->addr;
542         struct page *page = pfn_to_page(fcp_args->pfn);
543         int exec = vma->vm_flags & VM_EXEC;
544         struct mm_struct *mm = vma->vm_mm;
545         int map_coherent = 0;
546         pmd_t *pmdp;
547         pte_t *ptep;
548         void *vaddr;
549
550         /*
551          * If owns no valid ASID yet, cannot possibly have gotten
552          * this page into the cache.
553          */
554         if (!has_valid_asid(mm, R4K_HIT))
555                 return;
556
557         addr &= PAGE_MASK;
558         pmdp = pmd_off(mm, addr);
559         ptep = pte_offset_kernel(pmdp, addr);
560
561         /*
562          * If the page isn't marked valid, the page cannot possibly be
563          * in the cache.
564          */
565         if (!(pte_present(*ptep)))
566                 return;
567
568         if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
569                 vaddr = NULL;
570         else {
571                 /*
572                  * Use kmap_coherent or kmap_atomic to do flushes for
573                  * another ASID than the current one.
574                  */
575                 map_coherent = (cpu_has_dc_aliases &&
576                                 page_mapcount(page) &&
577                                 !Page_dcache_dirty(page));
578                 if (map_coherent)
579                         vaddr = kmap_coherent(page, addr);
580                 else
581                         vaddr = kmap_atomic(page);
582                 addr = (unsigned long)vaddr;
583         }
584
585         if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
586                 vaddr ? r4k_blast_dcache_page(addr) :
587                         r4k_blast_dcache_user_page(addr);
588                 if (exec && !cpu_icache_snoops_remote_store)
589                         r4k_blast_scache_page(addr);
590         }
591         if (exec) {
592                 if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
593                         drop_mmu_context(mm);
594                 } else
595                         vaddr ? r4k_blast_icache_page(addr) :
596                                 r4k_blast_icache_user_page(addr);
597         }
598
599         if (vaddr) {
600                 if (map_coherent)
601                         kunmap_coherent();
602                 else
603                         kunmap_atomic(vaddr);
604         }
605 }
606
607 static void r4k_flush_cache_page(struct vm_area_struct *vma,
608         unsigned long addr, unsigned long pfn)
609 {
610         struct flush_cache_page_args args;
611
612         args.vma = vma;
613         args.addr = addr;
614         args.pfn = pfn;
615
616         r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_page, &args);
617 }
618
619 static inline void local_r4k_flush_data_cache_page(void * addr)
620 {
621         r4k_blast_dcache_page((unsigned long) addr);
622 }
623
624 static void r4k_flush_data_cache_page(unsigned long addr)
625 {
626         if (in_atomic())
627                 local_r4k_flush_data_cache_page((void *)addr);
628         else
629                 r4k_on_each_cpu(R4K_HIT, local_r4k_flush_data_cache_page,
630                                 (void *) addr);
631 }
632
633 struct flush_icache_range_args {
634         unsigned long start;
635         unsigned long end;
636         unsigned int type;
637         bool user;
638 };
639
640 static inline void __local_r4k_flush_icache_range(unsigned long start,
641                                                   unsigned long end,
642                                                   unsigned int type,
643                                                   bool user)
644 {
645         if (!cpu_has_ic_fills_f_dc) {
646                 if (type == R4K_INDEX ||
647                     (type & R4K_INDEX && end - start >= dcache_size)) {
648                         r4k_blast_dcache();
649                 } else {
650                         R4600_HIT_CACHEOP_WAR_IMPL;
651                         if (user)
652                                 protected_blast_dcache_range(start, end);
653                         else
654                                 blast_dcache_range(start, end);
655                 }
656         }
657
658         if (type == R4K_INDEX ||
659             (type & R4K_INDEX && end - start > icache_size))
660                 r4k_blast_icache();
661         else {
662                 switch (boot_cpu_type()) {
663                 case CPU_LOONGSON2EF:
664                         protected_loongson2_blast_icache_range(start, end);
665                         break;
666
667                 default:
668                         if (user)
669                                 protected_blast_icache_range(start, end);
670                         else
671                                 blast_icache_range(start, end);
672                         break;
673                 }
674         }
675 }
676
677 static inline void local_r4k_flush_icache_range(unsigned long start,
678                                                 unsigned long end)
679 {
680         __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, false);
681 }
682
683 static inline void local_r4k_flush_icache_user_range(unsigned long start,
684                                                      unsigned long end)
685 {
686         __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, true);
687 }
688
689 static inline void local_r4k_flush_icache_range_ipi(void *args)
690 {
691         struct flush_icache_range_args *fir_args = args;
692         unsigned long start = fir_args->start;
693         unsigned long end = fir_args->end;
694         unsigned int type = fir_args->type;
695         bool user = fir_args->user;
696
697         __local_r4k_flush_icache_range(start, end, type, user);
698 }
699
700 static void __r4k_flush_icache_range(unsigned long start, unsigned long end,
701                                      bool user)
702 {
703         struct flush_icache_range_args args;
704         unsigned long size, cache_size;
705
706         args.start = start;
707         args.end = end;
708         args.type = R4K_HIT | R4K_INDEX;
709         args.user = user;
710
711         /*
712          * Indexed cache ops require an SMP call.
713          * Consider if that can or should be avoided.
714          */
715         preempt_disable();
716         if (r4k_op_needs_ipi(R4K_INDEX) && !r4k_op_needs_ipi(R4K_HIT)) {
717                 /*
718                  * If address-based cache ops don't require an SMP call, then
719                  * use them exclusively for small flushes.
720                  */
721                 size = end - start;
722                 cache_size = icache_size;
723                 if (!cpu_has_ic_fills_f_dc) {
724                         size *= 2;
725                         cache_size += dcache_size;
726                 }
727                 if (size <= cache_size)
728                         args.type &= ~R4K_INDEX;
729         }
730         r4k_on_each_cpu(args.type, local_r4k_flush_icache_range_ipi, &args);
731         preempt_enable();
732         instruction_hazard();
733 }
734
735 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
736 {
737         return __r4k_flush_icache_range(start, end, false);
738 }
739
740 static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
741 {
742         return __r4k_flush_icache_range(start, end, true);
743 }
744
745 #ifdef CONFIG_DMA_NONCOHERENT
746
747 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
748 {
749         /* Catch bad driver code */
750         if (WARN_ON(size == 0))
751                 return;
752
753         preempt_disable();
754         if (cpu_has_inclusive_pcaches) {
755                 if (size >= scache_size) {
756                         if (current_cpu_type() != CPU_LOONGSON64)
757                                 r4k_blast_scache();
758                         else
759                                 r4k_blast_scache_node(pa_to_nid(addr));
760                 } else {
761                         blast_scache_range(addr, addr + size);
762                 }
763                 preempt_enable();
764                 __sync();
765                 return;
766         }
767
768         /*
769          * Either no secondary cache or the available caches don't have the
770          * subset property so we have to flush the primary caches
771          * explicitly.
772          * If we would need IPI to perform an INDEX-type operation, then
773          * we have to use the HIT-type alternative as IPI cannot be used
774          * here due to interrupts possibly being disabled.
775          */
776         if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
777                 r4k_blast_dcache();
778         } else {
779                 R4600_HIT_CACHEOP_WAR_IMPL;
780                 blast_dcache_range(addr, addr + size);
781         }
782         preempt_enable();
783
784         bc_wback_inv(addr, size);
785         __sync();
786 }
787
788 static void prefetch_cache_inv(unsigned long addr, unsigned long size)
789 {
790         unsigned int linesz = cpu_scache_line_size();
791         unsigned long addr0 = addr, addr1;
792
793         addr0 &= ~(linesz - 1);
794         addr1 = (addr0 + size - 1) & ~(linesz - 1);
795
796         protected_writeback_scache_line(addr0);
797         if (likely(addr1 != addr0))
798                 protected_writeback_scache_line(addr1);
799         else
800                 return;
801
802         addr0 += linesz;
803         if (likely(addr1 != addr0))
804                 protected_writeback_scache_line(addr0);
805         else
806                 return;
807
808         addr1 -= linesz;
809         if (likely(addr1 > addr0))
810                 protected_writeback_scache_line(addr0);
811 }
812
813 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
814 {
815         /* Catch bad driver code */
816         if (WARN_ON(size == 0))
817                 return;
818
819         preempt_disable();
820
821         if (current_cpu_type() == CPU_BMIPS5000)
822                 prefetch_cache_inv(addr, size);
823
824         if (cpu_has_inclusive_pcaches) {
825                 if (size >= scache_size) {
826                         if (current_cpu_type() != CPU_LOONGSON64)
827                                 r4k_blast_scache();
828                         else
829                                 r4k_blast_scache_node(pa_to_nid(addr));
830                 } else {
831                         /*
832                          * There is no clearly documented alignment requirement
833                          * for the cache instruction on MIPS processors and
834                          * some processors, among them the RM5200 and RM7000
835                          * QED processors will throw an address error for cache
836                          * hit ops with insufficient alignment.  Solved by
837                          * aligning the address to cache line size.
838                          */
839                         blast_inv_scache_range(addr, addr + size);
840                 }
841                 preempt_enable();
842                 __sync();
843                 return;
844         }
845
846         if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
847                 r4k_blast_dcache();
848         } else {
849                 R4600_HIT_CACHEOP_WAR_IMPL;
850                 blast_inv_dcache_range(addr, addr + size);
851         }
852         preempt_enable();
853
854         bc_inv(addr, size);
855         __sync();
856 }
857 #endif /* CONFIG_DMA_NONCOHERENT */
858
859 static void r4k_flush_icache_all(void)
860 {
861         if (cpu_has_vtag_icache)
862                 r4k_blast_icache();
863 }
864
865 struct flush_kernel_vmap_range_args {
866         unsigned long   vaddr;
867         int             size;
868 };
869
870 static inline void local_r4k_flush_kernel_vmap_range_index(void *args)
871 {
872         /*
873          * Aliases only affect the primary caches so don't bother with
874          * S-caches or T-caches.
875          */
876         r4k_blast_dcache();
877 }
878
879 static inline void local_r4k_flush_kernel_vmap_range(void *args)
880 {
881         struct flush_kernel_vmap_range_args *vmra = args;
882         unsigned long vaddr = vmra->vaddr;
883         int size = vmra->size;
884
885         /*
886          * Aliases only affect the primary caches so don't bother with
887          * S-caches or T-caches.
888          */
889         R4600_HIT_CACHEOP_WAR_IMPL;
890         blast_dcache_range(vaddr, vaddr + size);
891 }
892
893 static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
894 {
895         struct flush_kernel_vmap_range_args args;
896
897         args.vaddr = (unsigned long) vaddr;
898         args.size = size;
899
900         if (size >= dcache_size)
901                 r4k_on_each_cpu(R4K_INDEX,
902                                 local_r4k_flush_kernel_vmap_range_index, NULL);
903         else
904                 r4k_on_each_cpu(R4K_HIT, local_r4k_flush_kernel_vmap_range,
905                                 &args);
906 }
907
908 static inline void rm7k_erratum31(void)
909 {
910         const unsigned long ic_lsize = 32;
911         unsigned long addr;
912
913         /* RM7000 erratum #31. The icache is screwed at startup. */
914         write_c0_taglo(0);
915         write_c0_taghi(0);
916
917         for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
918                 __asm__ __volatile__ (
919                         ".set push\n\t"
920                         ".set noreorder\n\t"
921                         ".set mips3\n\t"
922                         "cache\t%1, 0(%0)\n\t"
923                         "cache\t%1, 0x1000(%0)\n\t"
924                         "cache\t%1, 0x2000(%0)\n\t"
925                         "cache\t%1, 0x3000(%0)\n\t"
926                         "cache\t%2, 0(%0)\n\t"
927                         "cache\t%2, 0x1000(%0)\n\t"
928                         "cache\t%2, 0x2000(%0)\n\t"
929                         "cache\t%2, 0x3000(%0)\n\t"
930                         "cache\t%1, 0(%0)\n\t"
931                         "cache\t%1, 0x1000(%0)\n\t"
932                         "cache\t%1, 0x2000(%0)\n\t"
933                         "cache\t%1, 0x3000(%0)\n\t"
934                         ".set pop\n"
935                         :
936                         : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill_I));
937         }
938 }
939
940 static inline int alias_74k_erratum(struct cpuinfo_mips *c)
941 {
942         unsigned int imp = c->processor_id & PRID_IMP_MASK;
943         unsigned int rev = c->processor_id & PRID_REV_MASK;
944         int present = 0;
945
946         /*
947          * Early versions of the 74K do not update the cache tags on a
948          * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
949          * aliases.  In this case it is better to treat the cache as always
950          * having aliases.  Also disable the synonym tag update feature
951          * where available.  In this case no opportunistic tag update will
952          * happen where a load causes a virtual address miss but a physical
953          * address hit during a D-cache look-up.
954          */
955         switch (imp) {
956         case PRID_IMP_74K:
957                 if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
958                         present = 1;
959                 if (rev == PRID_REV_ENCODE_332(2, 4, 0))
960                         write_c0_config6(read_c0_config6() | MTI_CONF6_SYND);
961                 break;
962         case PRID_IMP_1074K:
963                 if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
964                         present = 1;
965                         write_c0_config6(read_c0_config6() | MTI_CONF6_SYND);
966                 }
967                 break;
968         default:
969                 BUG();
970         }
971
972         return present;
973 }
974
975 static void b5k_instruction_hazard(void)
976 {
977         __sync();
978         __sync();
979         __asm__ __volatile__(
980         "       nop; nop; nop; nop; nop; nop; nop; nop\n"
981         "       nop; nop; nop; nop; nop; nop; nop; nop\n"
982         "       nop; nop; nop; nop; nop; nop; nop; nop\n"
983         "       nop; nop; nop; nop; nop; nop; nop; nop\n"
984         : : : "memory");
985 }
986
987 static char *way_string[] = { NULL, "direct mapped", "2-way",
988         "3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
989         "9-way", "10-way", "11-way", "12-way",
990         "13-way", "14-way", "15-way", "16-way",
991 };
992
993 static void probe_pcache(void)
994 {
995         struct cpuinfo_mips *c = &current_cpu_data;
996         unsigned int config = read_c0_config();
997         unsigned int prid = read_c0_prid();
998         int has_74k_erratum = 0;
999         unsigned long config1;
1000         unsigned int lsize;
1001
1002         switch (current_cpu_type()) {
1003         case CPU_R4600:                 /* QED style two way caches? */
1004         case CPU_R4700:
1005         case CPU_R5000:
1006         case CPU_NEVADA:
1007                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1008                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1009                 c->icache.ways = 2;
1010                 c->icache.waybit = __ffs(icache_size/2);
1011
1012                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1013                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1014                 c->dcache.ways = 2;
1015                 c->dcache.waybit= __ffs(dcache_size/2);
1016
1017                 c->options |= MIPS_CPU_CACHE_CDEX_P;
1018                 break;
1019
1020         case CPU_R5500:
1021                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1022                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1023                 c->icache.ways = 2;
1024                 c->icache.waybit= 0;
1025
1026                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1027                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1028                 c->dcache.ways = 2;
1029                 c->dcache.waybit = 0;
1030
1031                 c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
1032                 break;
1033
1034         case CPU_TX49XX:
1035                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1036                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1037                 c->icache.ways = 4;
1038                 c->icache.waybit= 0;
1039
1040                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1041                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1042                 c->dcache.ways = 4;
1043                 c->dcache.waybit = 0;
1044
1045                 c->options |= MIPS_CPU_CACHE_CDEX_P;
1046                 c->options |= MIPS_CPU_PREFETCH;
1047                 break;
1048
1049         case CPU_R4000PC:
1050         case CPU_R4000SC:
1051         case CPU_R4000MC:
1052         case CPU_R4400PC:
1053         case CPU_R4400SC:
1054         case CPU_R4400MC:
1055         case CPU_R4300:
1056                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1057                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1058                 c->icache.ways = 1;
1059                 c->icache.waybit = 0;   /* doesn't matter */
1060
1061                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1062                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1063                 c->dcache.ways = 1;
1064                 c->dcache.waybit = 0;   /* does not matter */
1065
1066                 c->options |= MIPS_CPU_CACHE_CDEX_P;
1067                 break;
1068
1069         case CPU_R10000:
1070         case CPU_R12000:
1071         case CPU_R14000:
1072         case CPU_R16000:
1073                 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
1074                 c->icache.linesz = 64;
1075                 c->icache.ways = 2;
1076                 c->icache.waybit = 0;
1077
1078                 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
1079                 c->dcache.linesz = 32;
1080                 c->dcache.ways = 2;
1081                 c->dcache.waybit = 0;
1082
1083                 c->options |= MIPS_CPU_PREFETCH;
1084                 break;
1085
1086         case CPU_RM7000:
1087                 rm7k_erratum31();
1088
1089                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1090                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1091                 c->icache.ways = 4;
1092                 c->icache.waybit = __ffs(icache_size / c->icache.ways);
1093
1094                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1095                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1096                 c->dcache.ways = 4;
1097                 c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
1098
1099                 c->options |= MIPS_CPU_CACHE_CDEX_P;
1100                 c->options |= MIPS_CPU_PREFETCH;
1101                 break;
1102
1103         case CPU_LOONGSON2EF:
1104                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1105                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1106                 if (prid & 0x3)
1107                         c->icache.ways = 4;
1108                 else
1109                         c->icache.ways = 2;
1110                 c->icache.waybit = 0;
1111
1112                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1113                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1114                 if (prid & 0x3)
1115                         c->dcache.ways = 4;
1116                 else
1117                         c->dcache.ways = 2;
1118                 c->dcache.waybit = 0;
1119                 break;
1120
1121         case CPU_LOONGSON64:
1122                 config1 = read_c0_config1();
1123                 lsize = (config1 >> 19) & 7;
1124                 if (lsize)
1125                         c->icache.linesz = 2 << lsize;
1126                 else
1127                         c->icache.linesz = 0;
1128                 c->icache.sets = 64 << ((config1 >> 22) & 7);
1129                 c->icache.ways = 1 + ((config1 >> 16) & 7);
1130                 icache_size = c->icache.sets *
1131                                           c->icache.ways *
1132                                           c->icache.linesz;
1133                 c->icache.waybit = 0;
1134
1135                 lsize = (config1 >> 10) & 7;
1136                 if (lsize)
1137                         c->dcache.linesz = 2 << lsize;
1138                 else
1139                         c->dcache.linesz = 0;
1140                 c->dcache.sets = 64 << ((config1 >> 13) & 7);
1141                 c->dcache.ways = 1 + ((config1 >> 7) & 7);
1142                 dcache_size = c->dcache.sets *
1143                                           c->dcache.ways *
1144                                           c->dcache.linesz;
1145                 c->dcache.waybit = 0;
1146                 if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
1147                                 (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) ||
1148                                 (c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
1149                         c->options |= MIPS_CPU_PREFETCH;
1150                 break;
1151
1152         case CPU_CAVIUM_OCTEON3:
1153                 /* For now lie about the number of ways. */
1154                 c->icache.linesz = 128;
1155                 c->icache.sets = 16;
1156                 c->icache.ways = 8;
1157                 c->icache.flags |= MIPS_CACHE_VTAG;
1158                 icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
1159
1160                 c->dcache.linesz = 128;
1161                 c->dcache.ways = 8;
1162                 c->dcache.sets = 8;
1163                 dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
1164                 c->options |= MIPS_CPU_PREFETCH;
1165                 break;
1166
1167         default:
1168                 if (!(config & MIPS_CONF_M))
1169                         panic("Don't know how to probe P-caches on this cpu.");
1170
1171                 /*
1172                  * So we seem to be a MIPS32 or MIPS64 CPU
1173                  * So let's probe the I-cache ...
1174                  */
1175                 config1 = read_c0_config1();
1176
1177                 lsize = (config1 >> 19) & 7;
1178
1179                 /* IL == 7 is reserved */
1180                 if (lsize == 7)
1181                         panic("Invalid icache line size");
1182
1183                 c->icache.linesz = lsize ? 2 << lsize : 0;
1184
1185                 c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
1186                 c->icache.ways = 1 + ((config1 >> 16) & 7);
1187
1188                 icache_size = c->icache.sets *
1189                               c->icache.ways *
1190                               c->icache.linesz;
1191                 c->icache.waybit = __ffs(icache_size/c->icache.ways);
1192
1193                 if (config & MIPS_CONF_VI)
1194                         c->icache.flags |= MIPS_CACHE_VTAG;
1195
1196                 /*
1197                  * Now probe the MIPS32 / MIPS64 data cache.
1198                  */
1199                 c->dcache.flags = 0;
1200
1201                 lsize = (config1 >> 10) & 7;
1202
1203                 /* DL == 7 is reserved */
1204                 if (lsize == 7)
1205                         panic("Invalid dcache line size");
1206
1207                 c->dcache.linesz = lsize ? 2 << lsize : 0;
1208
1209                 c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
1210                 c->dcache.ways = 1 + ((config1 >> 7) & 7);
1211
1212                 dcache_size = c->dcache.sets *
1213                               c->dcache.ways *
1214                               c->dcache.linesz;
1215                 c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
1216
1217                 c->options |= MIPS_CPU_PREFETCH;
1218                 break;
1219         }
1220
1221         /*
1222          * Processor configuration sanity check for the R4000SC erratum
1223          * #5.  With page sizes larger than 32kB there is no possibility
1224          * to get a VCE exception anymore so we don't care about this
1225          * misconfiguration.  The case is rather theoretical anyway;
1226          * presumably no vendor is shipping his hardware in the "bad"
1227          * configuration.
1228          */
1229         if ((prid & PRID_IMP_MASK) == PRID_IMP_R4000 &&
1230             (prid & PRID_REV_MASK) < PRID_REV_R4400 &&
1231             !(config & CONF_SC) && c->icache.linesz != 16 &&
1232             PAGE_SIZE <= 0x8000)
1233                 panic("Improper R4000SC processor configuration detected");
1234
1235         /* compute a couple of other cache variables */
1236         c->icache.waysize = icache_size / c->icache.ways;
1237         c->dcache.waysize = dcache_size / c->dcache.ways;
1238
1239         c->icache.sets = c->icache.linesz ?
1240                 icache_size / (c->icache.linesz * c->icache.ways) : 0;
1241         c->dcache.sets = c->dcache.linesz ?
1242                 dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
1243
1244         /*
1245          * R1x000 P-caches are odd in a positive way.  They're 32kB 2-way
1246          * virtually indexed so normally would suffer from aliases.  So
1247          * normally they'd suffer from aliases but magic in the hardware deals
1248          * with that for us so we don't need to take care ourselves.
1249          */
1250         switch (current_cpu_type()) {
1251         case CPU_20KC:
1252         case CPU_25KF:
1253         case CPU_I6400:
1254         case CPU_I6500:
1255         case CPU_SB1:
1256         case CPU_SB1A:
1257                 c->dcache.flags |= MIPS_CACHE_PINDEX;
1258                 break;
1259
1260         case CPU_R10000:
1261         case CPU_R12000:
1262         case CPU_R14000:
1263         case CPU_R16000:
1264                 break;
1265
1266         case CPU_74K:
1267         case CPU_1074K:
1268                 has_74k_erratum = alias_74k_erratum(c);
1269                 fallthrough;
1270         case CPU_M14KC:
1271         case CPU_M14KEC:
1272         case CPU_24K:
1273         case CPU_34K:
1274         case CPU_1004K:
1275         case CPU_INTERAPTIV:
1276         case CPU_P5600:
1277         case CPU_PROAPTIV:
1278         case CPU_M5150:
1279         case CPU_QEMU_GENERIC:
1280         case CPU_P6600:
1281         case CPU_M6250:
1282                 if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
1283                     (c->icache.waysize > PAGE_SIZE))
1284                         c->icache.flags |= MIPS_CACHE_ALIASES;
1285                 if (!has_74k_erratum && (read_c0_config7() & MIPS_CONF7_AR)) {
1286                         /*
1287                          * Effectively physically indexed dcache,
1288                          * thus no virtual aliases.
1289                         */
1290                         c->dcache.flags |= MIPS_CACHE_PINDEX;
1291                         break;
1292                 }
1293                 fallthrough;
1294         default:
1295                 if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE)
1296                         c->dcache.flags |= MIPS_CACHE_ALIASES;
1297         }
1298
1299         /* Physically indexed caches don't suffer from virtual aliasing */
1300         if (c->dcache.flags & MIPS_CACHE_PINDEX)
1301                 c->dcache.flags &= ~MIPS_CACHE_ALIASES;
1302
1303         /*
1304          * In systems with CM the icache fills from L2 or closer caches, and
1305          * thus sees remote stores without needing to write them back any
1306          * further than that.
1307          */
1308         if (mips_cm_present())
1309                 c->icache.flags |= MIPS_IC_SNOOPS_REMOTE;
1310
1311         switch (current_cpu_type()) {
1312         case CPU_20KC:
1313                 /*
1314                  * Some older 20Kc chips doesn't have the 'VI' bit in
1315                  * the config register.
1316                  */
1317                 c->icache.flags |= MIPS_CACHE_VTAG;
1318                 break;
1319
1320         case CPU_ALCHEMY:
1321         case CPU_I6400:
1322         case CPU_I6500:
1323                 c->icache.flags |= MIPS_CACHE_IC_F_DC;
1324                 break;
1325
1326         case CPU_BMIPS5000:
1327                 c->icache.flags |= MIPS_CACHE_IC_F_DC;
1328                 /* Cache aliases are handled in hardware; allow HIGHMEM */
1329                 c->dcache.flags &= ~MIPS_CACHE_ALIASES;
1330                 break;
1331
1332         case CPU_LOONGSON2EF:
1333                 /*
1334                  * LOONGSON2 has 4 way icache, but when using indexed cache op,
1335                  * one op will act on all 4 ways
1336                  */
1337                 c->icache.ways = 1;
1338         }
1339
1340         pr_info("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1341                 icache_size >> 10,
1342                 c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
1343                 way_string[c->icache.ways], c->icache.linesz);
1344
1345         pr_info("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1346                 dcache_size >> 10, way_string[c->dcache.ways],
1347                 (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
1348                 (c->dcache.flags & MIPS_CACHE_ALIASES) ?
1349                         "cache aliases" : "no aliases",
1350                 c->dcache.linesz);
1351 }
1352
1353 static void probe_vcache(void)
1354 {
1355         struct cpuinfo_mips *c = &current_cpu_data;
1356         unsigned int config2, lsize;
1357
1358         if (current_cpu_type() != CPU_LOONGSON64)
1359                 return;
1360
1361         config2 = read_c0_config2();
1362         if ((lsize = ((config2 >> 20) & 15)))
1363                 c->vcache.linesz = 2 << lsize;
1364         else
1365                 c->vcache.linesz = lsize;
1366
1367         c->vcache.sets = 64 << ((config2 >> 24) & 15);
1368         c->vcache.ways = 1 + ((config2 >> 16) & 15);
1369
1370         vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
1371
1372         c->vcache.waybit = 0;
1373         c->vcache.waysize = vcache_size / c->vcache.ways;
1374
1375         pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
1376                 vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
1377 }
1378
1379 /*
1380  * If you even _breathe_ on this function, look at the gcc output and make sure
1381  * it does not pop things on and off the stack for the cache sizing loop that
1382  * executes in KSEG1 space or else you will crash and burn badly.  You have
1383  * been warned.
1384  */
1385 static int probe_scache(void)
1386 {
1387         unsigned long flags, addr, begin, end, pow2;
1388         unsigned int config = read_c0_config();
1389         struct cpuinfo_mips *c = &current_cpu_data;
1390
1391         if (config & CONF_SC)
1392                 return 0;
1393
1394         begin = (unsigned long) &_stext;
1395         begin &= ~((4 * 1024 * 1024) - 1);
1396         end = begin + (4 * 1024 * 1024);
1397
1398         /*
1399          * This is such a bitch, you'd think they would make it easy to do
1400          * this.  Away you daemons of stupidity!
1401          */
1402         local_irq_save(flags);
1403
1404         /* Fill each size-multiple cache line with a valid tag. */
1405         pow2 = (64 * 1024);
1406         for (addr = begin; addr < end; addr = (begin + pow2)) {
1407                 unsigned long *p = (unsigned long *) addr;
1408                 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1409                 pow2 <<= 1;
1410         }
1411
1412         /* Load first line with zero (therefore invalid) tag. */
1413         write_c0_taglo(0);
1414         write_c0_taghi(0);
1415         __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1416         cache_op(Index_Store_Tag_I, begin);
1417         cache_op(Index_Store_Tag_D, begin);
1418         cache_op(Index_Store_Tag_SD, begin);
1419
1420         /* Now search for the wrap around point. */
1421         pow2 = (128 * 1024);
1422         for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1423                 cache_op(Index_Load_Tag_SD, addr);
1424                 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1425                 if (!read_c0_taglo())
1426                         break;
1427                 pow2 <<= 1;
1428         }
1429         local_irq_restore(flags);
1430         addr -= begin;
1431
1432         scache_size = addr;
1433         c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1434         c->scache.ways = 1;
1435         c->scache.waybit = 0;           /* does not matter */
1436
1437         return 1;
1438 }
1439
1440 static void loongson2_sc_init(void)
1441 {
1442         struct cpuinfo_mips *c = &current_cpu_data;
1443
1444         scache_size = 512*1024;
1445         c->scache.linesz = 32;
1446         c->scache.ways = 4;
1447         c->scache.waybit = 0;
1448         c->scache.waysize = scache_size / (c->scache.ways);
1449         c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1450         pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1451                scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1452
1453         c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1454 }
1455
1456 static void loongson3_sc_init(void)
1457 {
1458         struct cpuinfo_mips *c = &current_cpu_data;
1459         unsigned int config2, lsize;
1460
1461         config2 = read_c0_config2();
1462         lsize = (config2 >> 4) & 15;
1463         if (lsize)
1464                 c->scache.linesz = 2 << lsize;
1465         else
1466                 c->scache.linesz = 0;
1467         c->scache.sets = 64 << ((config2 >> 8) & 15);
1468         c->scache.ways = 1 + (config2 & 15);
1469
1470         /* Loongson-3 has 4-Scache banks, while Loongson-2K have only 2 banks */
1471         if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
1472                 c->scache.sets *= 2;
1473         else
1474                 c->scache.sets *= 4;
1475
1476         scache_size = c->scache.sets * c->scache.ways * c->scache.linesz;
1477
1478         c->scache.waybit = 0;
1479         c->scache.waysize = scache_size / c->scache.ways;
1480         pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1481                scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1482         if (scache_size)
1483                 c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1484         return;
1485 }
1486
1487 extern int r5k_sc_init(void);
1488 extern int rm7k_sc_init(void);
1489 extern int mips_sc_init(void);
1490
1491 static void setup_scache(void)
1492 {
1493         struct cpuinfo_mips *c = &current_cpu_data;
1494         unsigned int config = read_c0_config();
1495         int sc_present = 0;
1496
1497         /*
1498          * Do the probing thing on R4000SC and R4400SC processors.  Other
1499          * processors don't have a S-cache that would be relevant to the
1500          * Linux memory management.
1501          */
1502         switch (current_cpu_type()) {
1503         case CPU_R4000SC:
1504         case CPU_R4000MC:
1505         case CPU_R4400SC:
1506         case CPU_R4400MC:
1507                 sc_present = run_uncached(probe_scache);
1508                 if (sc_present)
1509                         c->options |= MIPS_CPU_CACHE_CDEX_S;
1510                 break;
1511
1512         case CPU_R10000:
1513         case CPU_R12000:
1514         case CPU_R14000:
1515         case CPU_R16000:
1516                 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1517                 c->scache.linesz = 64 << ((config >> 13) & 1);
1518                 c->scache.ways = 2;
1519                 c->scache.waybit= 0;
1520                 sc_present = 1;
1521                 break;
1522
1523         case CPU_R5000:
1524         case CPU_NEVADA:
1525 #ifdef CONFIG_R5000_CPU_SCACHE
1526                 r5k_sc_init();
1527 #endif
1528                 return;
1529
1530         case CPU_RM7000:
1531 #ifdef CONFIG_RM7000_CPU_SCACHE
1532                 rm7k_sc_init();
1533 #endif
1534                 return;
1535
1536         case CPU_LOONGSON2EF:
1537                 loongson2_sc_init();
1538                 return;
1539
1540         case CPU_LOONGSON64:
1541                 loongson3_sc_init();
1542                 return;
1543
1544         case CPU_CAVIUM_OCTEON3:
1545                 /* don't need to worry about L2, fully coherent */
1546                 return;
1547
1548         default:
1549                 if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
1550                                     MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
1551                                     MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
1552                                     MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
1553 #ifdef CONFIG_MIPS_CPU_SCACHE
1554                         if (mips_sc_init ()) {
1555                                 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1556                                 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1557                                        scache_size >> 10,
1558                                        way_string[c->scache.ways], c->scache.linesz);
1559
1560                                 if (current_cpu_type() == CPU_BMIPS5000)
1561                                         c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1562                         }
1563
1564 #else
1565                         if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1566                                 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1567 #endif
1568                         return;
1569                 }
1570                 sc_present = 0;
1571         }
1572
1573         if (!sc_present)
1574                 return;
1575
1576         /* compute a couple of other cache variables */
1577         c->scache.waysize = scache_size / c->scache.ways;
1578
1579         c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1580
1581         printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1582                scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1583
1584         c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1585 }
1586
1587 void au1x00_fixup_config_od(void)
1588 {
1589         /*
1590          * c0_config.od (bit 19) was write only (and read as 0)
1591          * on the early revisions of Alchemy SOCs.  It disables the bus
1592          * transaction overlapping and needs to be set to fix various errata.
1593          */
1594         switch (read_c0_prid()) {
1595         case 0x00030100: /* Au1000 DA */
1596         case 0x00030201: /* Au1000 HA */
1597         case 0x00030202: /* Au1000 HB */
1598         case 0x01030200: /* Au1500 AB */
1599         /*
1600          * Au1100 errata actually keeps silence about this bit, so we set it
1601          * just in case for those revisions that require it to be set according
1602          * to the (now gone) cpu table.
1603          */
1604         case 0x02030200: /* Au1100 AB */
1605         case 0x02030201: /* Au1100 BA */
1606         case 0x02030202: /* Au1100 BC */
1607                 set_c0_config(1 << 19);
1608                 break;
1609         }
1610 }
1611
1612 /* CP0 hazard avoidance. */
1613 #define NXP_BARRIER()                                                   \
1614          __asm__ __volatile__(                                          \
1615         ".set noreorder\n\t"                                            \
1616         "nop; nop; nop; nop; nop; nop;\n\t"                             \
1617         ".set reorder\n\t")
1618
1619 static void nxp_pr4450_fixup_config(void)
1620 {
1621         unsigned long config0;
1622
1623         config0 = read_c0_config();
1624
1625         /* clear all three cache coherency fields */
1626         config0 &= ~(0x7 | (7 << 25) | (7 << 28));
1627         config0 |= (((_page_cachable_default >> _CACHE_SHIFT) <<  0) |
1628                     ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
1629                     ((_page_cachable_default >> _CACHE_SHIFT) << 28));
1630         write_c0_config(config0);
1631         NXP_BARRIER();
1632 }
1633
1634 static int cca = -1;
1635
1636 static int __init cca_setup(char *str)
1637 {
1638         get_option(&str, &cca);
1639
1640         return 0;
1641 }
1642
1643 early_param("cca", cca_setup);
1644
1645 static void coherency_setup(void)
1646 {
1647         if (cca < 0 || cca > 7)
1648                 cca = read_c0_config() & CONF_CM_CMASK;
1649         _page_cachable_default = cca << _CACHE_SHIFT;
1650
1651         pr_debug("Using cache attribute %d\n", cca);
1652         change_c0_config(CONF_CM_CMASK, cca);
1653
1654         /*
1655          * c0_status.cu=0 specifies that updates by the sc instruction use
1656          * the coherency mode specified by the TLB; 1 means cachable
1657          * coherent update on write will be used.  Not all processors have
1658          * this bit and; some wire it to zero, others like Toshiba had the
1659          * silly idea of putting something else there ...
1660          */
1661         switch (current_cpu_type()) {
1662         case CPU_R4000PC:
1663         case CPU_R4000SC:
1664         case CPU_R4000MC:
1665         case CPU_R4400PC:
1666         case CPU_R4400SC:
1667         case CPU_R4400MC:
1668                 clear_c0_config(CONF_CU);
1669                 break;
1670         /*
1671          * We need to catch the early Alchemy SOCs with
1672          * the write-only co_config.od bit and set it back to one on:
1673          * Au1000 rev DA, HA, HB;  Au1100 AB, BA, BC, Au1500 AB
1674          */
1675         case CPU_ALCHEMY:
1676                 au1x00_fixup_config_od();
1677                 break;
1678
1679         case PRID_IMP_PR4450:
1680                 nxp_pr4450_fixup_config();
1681                 break;
1682         }
1683 }
1684
1685 static void r4k_cache_error_setup(void)
1686 {
1687         extern char __weak except_vec2_generic;
1688         extern char __weak except_vec2_sb1;
1689
1690         switch (current_cpu_type()) {
1691         case CPU_SB1:
1692         case CPU_SB1A:
1693                 set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1694                 break;
1695
1696         default:
1697                 set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1698                 break;
1699         }
1700 }
1701
1702 void r4k_cache_init(void)
1703 {
1704         extern void build_clear_page(void);
1705         extern void build_copy_page(void);
1706         struct cpuinfo_mips *c = &current_cpu_data;
1707
1708         probe_pcache();
1709         probe_vcache();
1710         setup_scache();
1711
1712         r4k_blast_dcache_page_setup();
1713         r4k_blast_dcache_setup();
1714         r4k_blast_icache_page_setup();
1715         r4k_blast_icache_setup();
1716         r4k_blast_scache_page_setup();
1717         r4k_blast_scache_setup();
1718         r4k_blast_scache_node_setup();
1719 #ifdef CONFIG_EVA
1720         r4k_blast_dcache_user_page_setup();
1721         r4k_blast_icache_user_page_setup();
1722 #endif
1723
1724         /*
1725          * Some MIPS32 and MIPS64 processors have physically indexed caches.
1726          * This code supports virtually indexed processors and will be
1727          * unnecessarily inefficient on physically indexed processors.
1728          */
1729         if (c->dcache.linesz && cpu_has_dc_aliases)
1730                 shm_align_mask = max_t( unsigned long,
1731                                         c->dcache.sets * c->dcache.linesz - 1,
1732                                         PAGE_SIZE - 1);
1733         else
1734                 shm_align_mask = PAGE_SIZE-1;
1735
1736         __flush_cache_vmap      = r4k__flush_cache_vmap;
1737         __flush_cache_vunmap    = r4k__flush_cache_vunmap;
1738
1739         flush_cache_all         = cache_noop;
1740         __flush_cache_all       = r4k___flush_cache_all;
1741         flush_cache_mm          = r4k_flush_cache_mm;
1742         flush_cache_page        = r4k_flush_cache_page;
1743         flush_cache_range       = r4k_flush_cache_range;
1744
1745         __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
1746
1747         flush_icache_all        = r4k_flush_icache_all;
1748         flush_data_cache_page   = r4k_flush_data_cache_page;
1749         flush_icache_range      = r4k_flush_icache_range;
1750         local_flush_icache_range        = local_r4k_flush_icache_range;
1751         __flush_icache_user_range       = r4k_flush_icache_user_range;
1752         __local_flush_icache_user_range = local_r4k_flush_icache_user_range;
1753
1754 #ifdef CONFIG_DMA_NONCOHERENT
1755         _dma_cache_wback_inv    = r4k_dma_cache_wback_inv;
1756         _dma_cache_wback        = r4k_dma_cache_wback_inv;
1757         _dma_cache_inv          = r4k_dma_cache_inv;
1758 #endif /* CONFIG_DMA_NONCOHERENT */
1759
1760         build_clear_page();
1761         build_copy_page();
1762
1763         /*
1764          * We want to run CMP kernels on core with and without coherent
1765          * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
1766          * or not to flush caches.
1767          */
1768         local_r4k___flush_cache_all(NULL);
1769
1770         coherency_setup();
1771         board_cache_error_setup = r4k_cache_error_setup;
1772
1773         /*
1774          * Per-CPU overrides
1775          */
1776         switch (current_cpu_type()) {
1777         case CPU_BMIPS4350:
1778         case CPU_BMIPS4380:
1779                 /* No IPI is needed because all CPUs share the same D$ */
1780                 flush_data_cache_page = r4k_blast_dcache_page;
1781                 break;
1782         case CPU_BMIPS5000:
1783                 /* We lose our superpowers if L2 is disabled */
1784                 if (c->scache.flags & MIPS_CACHE_NOT_PRESENT)
1785                         break;
1786
1787                 /* I$ fills from D$ just by emptying the write buffers */
1788                 flush_cache_page = (void *)b5k_instruction_hazard;
1789                 flush_cache_range = (void *)b5k_instruction_hazard;
1790                 flush_data_cache_page = (void *)b5k_instruction_hazard;
1791                 flush_icache_range = (void *)b5k_instruction_hazard;
1792                 local_flush_icache_range = (void *)b5k_instruction_hazard;
1793
1794
1795                 /* Optimization: an L2 flush implicitly flushes the L1 */
1796                 current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES;
1797                 break;
1798         case CPU_LOONGSON64:
1799                 /* Loongson-3 maintains cache coherency by hardware */
1800                 __flush_cache_all       = cache_noop;
1801                 __flush_cache_vmap      = cache_noop;
1802                 __flush_cache_vunmap    = cache_noop;
1803                 __flush_kernel_vmap_range = (void *)cache_noop;
1804                 flush_cache_mm          = (void *)cache_noop;
1805                 flush_cache_page        = (void *)cache_noop;
1806                 flush_cache_range       = (void *)cache_noop;
1807                 flush_icache_all        = (void *)cache_noop;
1808                 flush_data_cache_page   = (void *)cache_noop;
1809                 break;
1810         }
1811 }
1812
1813 static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd,
1814                                void *v)
1815 {
1816         switch (cmd) {
1817         case CPU_PM_ENTER_FAILED:
1818         case CPU_PM_EXIT:
1819                 coherency_setup();
1820                 break;
1821         }
1822
1823         return NOTIFY_OK;
1824 }
1825
1826 static struct notifier_block r4k_cache_pm_notifier_block = {
1827         .notifier_call = r4k_cache_pm_notifier,
1828 };
1829
1830 int __init r4k_cache_init_pm(void)
1831 {
1832         return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block);
1833 }
1834 arch_initcall(r4k_cache_init_pm);
This page took 0.174728 seconds and 4 git commands to generate.