]> Git Repo - linux.git/blob - arch/sparc/mm/srmmu.c
efi/libstub: Optimize for size instead of speed
[linux.git] / arch / sparc / mm / srmmu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * srmmu.c:  SRMMU specific routines for memory management.
4  *
5  * Copyright (C) 1995 David S. Miller  ([email protected])
6  * Copyright (C) 1995,2002 Pete Zaitcev ([email protected])
7  * Copyright (C) 1996 Eddie C. Dost    ([email protected])
8  * Copyright (C) 1997,1998 Jakub Jelinek ([email protected])
9  * Copyright (C) 1999,2000 Anton Blanchard ([email protected])
10  */
11
12 #include <linux/seq_file.h>
13 #include <linux/spinlock.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/vmalloc.h>
17 #include <linux/kdebug.h>
18 #include <linux/export.h>
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/log2.h>
22 #include <linux/gfp.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25
26 #include <asm/mmu_context.h>
27 #include <asm/cacheflush.h>
28 #include <asm/tlbflush.h>
29 #include <asm/io-unit.h>
30 #include <asm/pgalloc.h>
31 #include <asm/pgtable.h>
32 #include <asm/bitext.h>
33 #include <asm/vaddrs.h>
34 #include <asm/cache.h>
35 #include <asm/traps.h>
36 #include <asm/oplib.h>
37 #include <asm/mbus.h>
38 #include <asm/page.h>
39 #include <asm/asi.h>
40 #include <asm/smp.h>
41 #include <asm/io.h>
42
43 /* Now the cpu specific definitions. */
44 #include <asm/turbosparc.h>
45 #include <asm/tsunami.h>
46 #include <asm/viking.h>
47 #include <asm/swift.h>
48 #include <asm/leon.h>
49 #include <asm/mxcc.h>
50 #include <asm/ross.h>
51
52 #include "mm_32.h"
53
54 enum mbus_module srmmu_modtype;
55 static unsigned int hwbug_bitmask;
56 int vac_cache_size;
57 EXPORT_SYMBOL(vac_cache_size);
58 int vac_line_size;
59
60 extern struct resource sparc_iomap;
61
62 extern unsigned long last_valid_pfn;
63
64 static pgd_t *srmmu_swapper_pg_dir;
65
66 const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
67 EXPORT_SYMBOL(sparc32_cachetlb_ops);
68
69 #ifdef CONFIG_SMP
70 const struct sparc32_cachetlb_ops *local_ops;
71
72 #define FLUSH_BEGIN(mm)
73 #define FLUSH_END
74 #else
75 #define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
76 #define FLUSH_END       }
77 #endif
78
79 int flush_page_for_dma_global = 1;
80
81 char *srmmu_name;
82
83 ctxd_t *srmmu_ctx_table_phys;
84 static ctxd_t *srmmu_context_table;
85
86 int viking_mxcc_present;
87 static DEFINE_SPINLOCK(srmmu_context_spinlock);
88
89 static int is_hypersparc;
90
91 static int srmmu_cache_pagetables;
92
93 /* these will be initialized in srmmu_nocache_calcsize() */
94 static unsigned long srmmu_nocache_size;
95 static unsigned long srmmu_nocache_end;
96
97 /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
98 #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
99
100 /* The context table is a nocache user with the biggest alignment needs. */
101 #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
102
103 void *srmmu_nocache_pool;
104 static struct bit_map srmmu_nocache_map;
105
106 static inline int srmmu_pmd_none(pmd_t pmd)
107 { return !(pmd_val(pmd) & 0xFFFFFFF); }
108
109 /* XXX should we hyper_flush_whole_icache here - Anton */
110 static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
111 {
112         pte_t pte;
113
114         pte = __pte((SRMMU_ET_PTD | (__nocache_pa(pgdp) >> 4)));
115         set_pte((pte_t *)ctxp, pte);
116 }
117
118 /*
119  * Locations of MSI Registers.
120  */
121 #define MSI_MBUS_ARBEN  0xe0001008      /* MBus Arbiter Enable register */
122
123 /*
124  * Useful bits in the MSI Registers.
125  */
126 #define MSI_ASYNC_MODE  0x80000000      /* Operate the MSI asynchronously */
127
128 static void msi_set_sync(void)
129 {
130         __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
131                               "andn %%g3, %2, %%g3\n\t"
132                               "sta %%g3, [%0] %1\n\t" : :
133                               "r" (MSI_MBUS_ARBEN),
134                               "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
135 }
136
137 void pmd_set(pmd_t *pmdp, pte_t *ptep)
138 {
139         unsigned long ptp;      /* Physical address, shifted right by 4 */
140         int i;
141
142         ptp = __nocache_pa(ptep) >> 4;
143         for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
144                 set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
145                 ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
146         }
147 }
148
149 void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
150 {
151         unsigned long ptp;      /* Physical address, shifted right by 4 */
152         int i;
153
154         ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4);      /* watch for overflow */
155         for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
156                 set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
157                 ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
158         }
159 }
160
161 /* Find an entry in the third-level page table.. */
162 pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
163 {
164         void *pte;
165
166         pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
167         return (pte_t *) pte +
168             ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
169 }
170
171 /*
172  * size: bytes to allocate in the nocache area.
173  * align: bytes, number to align at.
174  * Returns the virtual address of the allocated area.
175  */
176 static void *__srmmu_get_nocache(int size, int align)
177 {
178         int offset;
179         unsigned long addr;
180
181         if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
182                 printk(KERN_ERR "Size 0x%x too small for nocache request\n",
183                        size);
184                 size = SRMMU_NOCACHE_BITMAP_SHIFT;
185         }
186         if (size & (SRMMU_NOCACHE_BITMAP_SHIFT - 1)) {
187                 printk(KERN_ERR "Size 0x%x unaligned int nocache request\n",
188                        size);
189                 size += SRMMU_NOCACHE_BITMAP_SHIFT - 1;
190         }
191         BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
192
193         offset = bit_map_string_get(&srmmu_nocache_map,
194                                     size >> SRMMU_NOCACHE_BITMAP_SHIFT,
195                                     align >> SRMMU_NOCACHE_BITMAP_SHIFT);
196         if (offset == -1) {
197                 printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n",
198                        size, (int) srmmu_nocache_size,
199                        srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
200                 return NULL;
201         }
202
203         addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT);
204         return (void *)addr;
205 }
206
207 void *srmmu_get_nocache(int size, int align)
208 {
209         void *tmp;
210
211         tmp = __srmmu_get_nocache(size, align);
212
213         if (tmp)
214                 memset(tmp, 0, size);
215
216         return tmp;
217 }
218
219 void srmmu_free_nocache(void *addr, int size)
220 {
221         unsigned long vaddr;
222         int offset;
223
224         vaddr = (unsigned long)addr;
225         if (vaddr < SRMMU_NOCACHE_VADDR) {
226                 printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
227                     vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
228                 BUG();
229         }
230         if (vaddr + size > srmmu_nocache_end) {
231                 printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
232                     vaddr, srmmu_nocache_end);
233                 BUG();
234         }
235         if (!is_power_of_2(size)) {
236                 printk("Size 0x%x is not a power of 2\n", size);
237                 BUG();
238         }
239         if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
240                 printk("Size 0x%x is too small\n", size);
241                 BUG();
242         }
243         if (vaddr & (size - 1)) {
244                 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
245                 BUG();
246         }
247
248         offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
249         size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
250
251         bit_map_clear(&srmmu_nocache_map, offset, size);
252 }
253
254 static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
255                                                  unsigned long end);
256
257 /* Return how much physical memory we have.  */
258 static unsigned long __init probe_memory(void)
259 {
260         unsigned long total = 0;
261         int i;
262
263         for (i = 0; sp_banks[i].num_bytes; i++)
264                 total += sp_banks[i].num_bytes;
265
266         return total;
267 }
268
269 /*
270  * Reserve nocache dynamically proportionally to the amount of
271  * system RAM. -- Tomas Szepe <[email protected]>, June 2002
272  */
273 static void __init srmmu_nocache_calcsize(void)
274 {
275         unsigned long sysmemavail = probe_memory() / 1024;
276         int srmmu_nocache_npages;
277
278         srmmu_nocache_npages =
279                 sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
280
281  /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
282         // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
283         if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
284                 srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
285
286         /* anything above 1280 blows up */
287         if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
288                 srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
289
290         srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
291         srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
292 }
293
294 static void __init srmmu_nocache_init(void)
295 {
296         void *srmmu_nocache_bitmap;
297         unsigned int bitmap_bits;
298         pgd_t *pgd;
299         p4d_t *p4d;
300         pud_t *pud;
301         pmd_t *pmd;
302         pte_t *pte;
303         unsigned long paddr, vaddr;
304         unsigned long pteval;
305
306         bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
307
308         srmmu_nocache_pool = memblock_alloc(srmmu_nocache_size,
309                                             SRMMU_NOCACHE_ALIGN_MAX);
310         if (!srmmu_nocache_pool)
311                 panic("%s: Failed to allocate %lu bytes align=0x%x\n",
312                       __func__, srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX);
313         memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
314
315         srmmu_nocache_bitmap =
316                 memblock_alloc(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
317                                SMP_CACHE_BYTES);
318         if (!srmmu_nocache_bitmap)
319                 panic("%s: Failed to allocate %zu bytes\n", __func__,
320                       BITS_TO_LONGS(bitmap_bits) * sizeof(long));
321         bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
322
323         srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
324         memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
325         init_mm.pgd = srmmu_swapper_pg_dir;
326
327         srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
328
329         paddr = __pa((unsigned long)srmmu_nocache_pool);
330         vaddr = SRMMU_NOCACHE_VADDR;
331
332         while (vaddr < srmmu_nocache_end) {
333                 pgd = pgd_offset_k(vaddr);
334                 p4d = p4d_offset(__nocache_fix(pgd), vaddr);
335                 pud = pud_offset(__nocache_fix(p4d), vaddr);
336                 pmd = pmd_offset(__nocache_fix(pgd), vaddr);
337                 pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
338
339                 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
340
341                 if (srmmu_cache_pagetables)
342                         pteval |= SRMMU_CACHE;
343
344                 set_pte(__nocache_fix(pte), __pte(pteval));
345
346                 vaddr += PAGE_SIZE;
347                 paddr += PAGE_SIZE;
348         }
349
350         flush_cache_all();
351         flush_tlb_all();
352 }
353
354 pgd_t *get_pgd_fast(void)
355 {
356         pgd_t *pgd = NULL;
357
358         pgd = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
359         if (pgd) {
360                 pgd_t *init = pgd_offset_k(0);
361                 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
362                 memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
363                                                 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
364         }
365
366         return pgd;
367 }
368
369 /*
370  * Hardware needs alignment to 256 only, but we align to whole page size
371  * to reduce fragmentation problems due to the buddy principle.
372  * XXX Provide actual fragmentation statistics in /proc.
373  *
374  * Alignments up to the page size are the same for physical and virtual
375  * addresses of the nocache area.
376  */
377 pgtable_t pte_alloc_one(struct mm_struct *mm)
378 {
379         unsigned long pte;
380         struct page *page;
381
382         if ((pte = (unsigned long)pte_alloc_one_kernel(mm)) == 0)
383                 return NULL;
384         page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
385         if (!pgtable_pte_page_ctor(page)) {
386                 __free_page(page);
387                 return NULL;
388         }
389         return page;
390 }
391
392 void pte_free(struct mm_struct *mm, pgtable_t pte)
393 {
394         unsigned long p;
395
396         pgtable_pte_page_dtor(pte);
397         p = (unsigned long)page_address(pte);   /* Cached address (for test) */
398         if (p == 0)
399                 BUG();
400         p = page_to_pfn(pte) << PAGE_SHIFT;     /* Physical address */
401
402         /* free non cached virtual address*/
403         srmmu_free_nocache(__nocache_va(p), PTE_SIZE);
404 }
405
406 /* context handling - a dynamically sized pool is used */
407 #define NO_CONTEXT      -1
408
409 struct ctx_list {
410         struct ctx_list *next;
411         struct ctx_list *prev;
412         unsigned int ctx_number;
413         struct mm_struct *ctx_mm;
414 };
415
416 static struct ctx_list *ctx_list_pool;
417 static struct ctx_list ctx_free;
418 static struct ctx_list ctx_used;
419
420 /* At boot time we determine the number of contexts */
421 static int num_contexts;
422
423 static inline void remove_from_ctx_list(struct ctx_list *entry)
424 {
425         entry->next->prev = entry->prev;
426         entry->prev->next = entry->next;
427 }
428
429 static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
430 {
431         entry->next = head;
432         (entry->prev = head->prev)->next = entry;
433         head->prev = entry;
434 }
435 #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
436 #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
437
438
439 static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
440 {
441         struct ctx_list *ctxp;
442
443         ctxp = ctx_free.next;
444         if (ctxp != &ctx_free) {
445                 remove_from_ctx_list(ctxp);
446                 add_to_used_ctxlist(ctxp);
447                 mm->context = ctxp->ctx_number;
448                 ctxp->ctx_mm = mm;
449                 return;
450         }
451         ctxp = ctx_used.next;
452         if (ctxp->ctx_mm == old_mm)
453                 ctxp = ctxp->next;
454         if (ctxp == &ctx_used)
455                 panic("out of mmu contexts");
456         flush_cache_mm(ctxp->ctx_mm);
457         flush_tlb_mm(ctxp->ctx_mm);
458         remove_from_ctx_list(ctxp);
459         add_to_used_ctxlist(ctxp);
460         ctxp->ctx_mm->context = NO_CONTEXT;
461         ctxp->ctx_mm = mm;
462         mm->context = ctxp->ctx_number;
463 }
464
465 static inline void free_context(int context)
466 {
467         struct ctx_list *ctx_old;
468
469         ctx_old = ctx_list_pool + context;
470         remove_from_ctx_list(ctx_old);
471         add_to_free_ctxlist(ctx_old);
472 }
473
474 static void __init sparc_context_init(int numctx)
475 {
476         int ctx;
477         unsigned long size;
478
479         size = numctx * sizeof(struct ctx_list);
480         ctx_list_pool = memblock_alloc(size, SMP_CACHE_BYTES);
481         if (!ctx_list_pool)
482                 panic("%s: Failed to allocate %lu bytes\n", __func__, size);
483
484         for (ctx = 0; ctx < numctx; ctx++) {
485                 struct ctx_list *clist;
486
487                 clist = (ctx_list_pool + ctx);
488                 clist->ctx_number = ctx;
489                 clist->ctx_mm = NULL;
490         }
491         ctx_free.next = ctx_free.prev = &ctx_free;
492         ctx_used.next = ctx_used.prev = &ctx_used;
493         for (ctx = 0; ctx < numctx; ctx++)
494                 add_to_free_ctxlist(ctx_list_pool + ctx);
495 }
496
497 void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
498                struct task_struct *tsk)
499 {
500         unsigned long flags;
501
502         if (mm->context == NO_CONTEXT) {
503                 spin_lock_irqsave(&srmmu_context_spinlock, flags);
504                 alloc_context(old_mm, mm);
505                 spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
506                 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
507         }
508
509         if (sparc_cpu_model == sparc_leon)
510                 leon_switch_mm();
511
512         if (is_hypersparc)
513                 hyper_flush_whole_icache();
514
515         srmmu_set_context(mm->context);
516 }
517
518 /* Low level IO area allocation on the SRMMU. */
519 static inline void srmmu_mapioaddr(unsigned long physaddr,
520                                    unsigned long virt_addr, int bus_type)
521 {
522         pgd_t *pgdp;
523         p4d_t *p4dp;
524         pud_t *pudp;
525         pmd_t *pmdp;
526         pte_t *ptep;
527         unsigned long tmp;
528
529         physaddr &= PAGE_MASK;
530         pgdp = pgd_offset_k(virt_addr);
531         p4dp = p4d_offset(pgdp, virt_addr);
532         pudp = pud_offset(p4dp, virt_addr);
533         pmdp = pmd_offset(pudp, virt_addr);
534         ptep = pte_offset_kernel(pmdp, virt_addr);
535         tmp = (physaddr >> 4) | SRMMU_ET_PTE;
536
537         /* I need to test whether this is consistent over all
538          * sun4m's.  The bus_type represents the upper 4 bits of
539          * 36-bit physical address on the I/O space lines...
540          */
541         tmp |= (bus_type << 28);
542         tmp |= SRMMU_PRIV;
543         __flush_page_to_ram(virt_addr);
544         set_pte(ptep, __pte(tmp));
545 }
546
547 void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
548                       unsigned long xva, unsigned int len)
549 {
550         while (len != 0) {
551                 len -= PAGE_SIZE;
552                 srmmu_mapioaddr(xpa, xva, bus);
553                 xva += PAGE_SIZE;
554                 xpa += PAGE_SIZE;
555         }
556         flush_tlb_all();
557 }
558
559 static inline void srmmu_unmapioaddr(unsigned long virt_addr)
560 {
561         pgd_t *pgdp;
562         p4d_t *p4dp;
563         pud_t *pudp;
564         pmd_t *pmdp;
565         pte_t *ptep;
566
567
568         pgdp = pgd_offset_k(virt_addr);
569         p4dp = p4d_offset(pgdp, virt_addr);
570         pudp = pud_offset(p4dp, virt_addr);
571         pmdp = pmd_offset(pudp, virt_addr);
572         ptep = pte_offset_kernel(pmdp, virt_addr);
573
574         /* No need to flush uncacheable page. */
575         __pte_clear(ptep);
576 }
577
578 void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
579 {
580         while (len != 0) {
581                 len -= PAGE_SIZE;
582                 srmmu_unmapioaddr(virt_addr);
583                 virt_addr += PAGE_SIZE;
584         }
585         flush_tlb_all();
586 }
587
588 /* tsunami.S */
589 extern void tsunami_flush_cache_all(void);
590 extern void tsunami_flush_cache_mm(struct mm_struct *mm);
591 extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
592 extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
593 extern void tsunami_flush_page_to_ram(unsigned long page);
594 extern void tsunami_flush_page_for_dma(unsigned long page);
595 extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
596 extern void tsunami_flush_tlb_all(void);
597 extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
598 extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
599 extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
600 extern void tsunami_setup_blockops(void);
601
602 /* swift.S */
603 extern void swift_flush_cache_all(void);
604 extern void swift_flush_cache_mm(struct mm_struct *mm);
605 extern void swift_flush_cache_range(struct vm_area_struct *vma,
606                                     unsigned long start, unsigned long end);
607 extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
608 extern void swift_flush_page_to_ram(unsigned long page);
609 extern void swift_flush_page_for_dma(unsigned long page);
610 extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
611 extern void swift_flush_tlb_all(void);
612 extern void swift_flush_tlb_mm(struct mm_struct *mm);
613 extern void swift_flush_tlb_range(struct vm_area_struct *vma,
614                                   unsigned long start, unsigned long end);
615 extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
616
617 #if 0  /* P3: deadwood to debug precise flushes on Swift. */
618 void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
619 {
620         int cctx, ctx1;
621
622         page &= PAGE_MASK;
623         if ((ctx1 = vma->vm_mm->context) != -1) {
624                 cctx = srmmu_get_context();
625 /* Is context # ever different from current context? P3 */
626                 if (cctx != ctx1) {
627                         printk("flush ctx %02x curr %02x\n", ctx1, cctx);
628                         srmmu_set_context(ctx1);
629                         swift_flush_page(page);
630                         __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
631                                         "r" (page), "i" (ASI_M_FLUSH_PROBE));
632                         srmmu_set_context(cctx);
633                 } else {
634                          /* Rm. prot. bits from virt. c. */
635                         /* swift_flush_cache_all(); */
636                         /* swift_flush_cache_page(vma, page); */
637                         swift_flush_page(page);
638
639                         __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
640                                 "r" (page), "i" (ASI_M_FLUSH_PROBE));
641                         /* same as above: srmmu_flush_tlb_page() */
642                 }
643         }
644 }
645 #endif
646
647 /*
648  * The following are all MBUS based SRMMU modules, and therefore could
649  * be found in a multiprocessor configuration.  On the whole, these
650  * chips seems to be much more touchy about DVMA and page tables
651  * with respect to cache coherency.
652  */
653
654 /* viking.S */
655 extern void viking_flush_cache_all(void);
656 extern void viking_flush_cache_mm(struct mm_struct *mm);
657 extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
658                                      unsigned long end);
659 extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
660 extern void viking_flush_page_to_ram(unsigned long page);
661 extern void viking_flush_page_for_dma(unsigned long page);
662 extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
663 extern void viking_flush_page(unsigned long page);
664 extern void viking_mxcc_flush_page(unsigned long page);
665 extern void viking_flush_tlb_all(void);
666 extern void viking_flush_tlb_mm(struct mm_struct *mm);
667 extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
668                                    unsigned long end);
669 extern void viking_flush_tlb_page(struct vm_area_struct *vma,
670                                   unsigned long page);
671 extern void sun4dsmp_flush_tlb_all(void);
672 extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
673 extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
674                                    unsigned long end);
675 extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
676                                   unsigned long page);
677
678 /* hypersparc.S */
679 extern void hypersparc_flush_cache_all(void);
680 extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
681 extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
682 extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
683 extern void hypersparc_flush_page_to_ram(unsigned long page);
684 extern void hypersparc_flush_page_for_dma(unsigned long page);
685 extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
686 extern void hypersparc_flush_tlb_all(void);
687 extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
688 extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
689 extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
690 extern void hypersparc_setup_blockops(void);
691
692 /*
693  * NOTE: All of this startup code assumes the low 16mb (approx.) of
694  *       kernel mappings are done with one single contiguous chunk of
695  *       ram.  On small ram machines (classics mainly) we only get
696  *       around 8mb mapped for us.
697  */
698
699 static void __init early_pgtable_allocfail(char *type)
700 {
701         prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
702         prom_halt();
703 }
704
705 static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
706                                                         unsigned long end)
707 {
708         pgd_t *pgdp;
709         p4d_t *p4dp;
710         pud_t *pudp;
711         pmd_t *pmdp;
712         pte_t *ptep;
713
714         while (start < end) {
715                 pgdp = pgd_offset_k(start);
716                 p4dp = p4d_offset(pgdp, start);
717                 pudp = pud_offset(p4dp, start);
718                 if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
719                         pmdp = __srmmu_get_nocache(
720                             SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
721                         if (pmdp == NULL)
722                                 early_pgtable_allocfail("pmd");
723                         memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
724                         pud_set(__nocache_fix(pudp), pmdp);
725                 }
726                 pmdp = pmd_offset(__nocache_fix(pudp), start);
727                 if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
728                         ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
729                         if (ptep == NULL)
730                                 early_pgtable_allocfail("pte");
731                         memset(__nocache_fix(ptep), 0, PTE_SIZE);
732                         pmd_set(__nocache_fix(pmdp), ptep);
733                 }
734                 if (start > (0xffffffffUL - PMD_SIZE))
735                         break;
736                 start = (start + PMD_SIZE) & PMD_MASK;
737         }
738 }
739
740 static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
741                                                   unsigned long end)
742 {
743         pgd_t *pgdp;
744         p4d_t *p4dp;
745         pud_t *pudp;
746         pmd_t *pmdp;
747         pte_t *ptep;
748
749         while (start < end) {
750                 pgdp = pgd_offset_k(start);
751                 p4dp = p4d_offset(pgdp, start);
752                 pudp = pud_offset(p4dp, start);
753                 if (pud_none(*pudp)) {
754                         pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
755                         if (pmdp == NULL)
756                                 early_pgtable_allocfail("pmd");
757                         memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
758                         pud_set((pud_t *)pgdp, pmdp);
759                 }
760                 pmdp = pmd_offset(pudp, start);
761                 if (srmmu_pmd_none(*pmdp)) {
762                         ptep = __srmmu_get_nocache(PTE_SIZE,
763                                                              PTE_SIZE);
764                         if (ptep == NULL)
765                                 early_pgtable_allocfail("pte");
766                         memset(ptep, 0, PTE_SIZE);
767                         pmd_set(pmdp, ptep);
768                 }
769                 if (start > (0xffffffffUL - PMD_SIZE))
770                         break;
771                 start = (start + PMD_SIZE) & PMD_MASK;
772         }
773 }
774
775 /* These flush types are not available on all chips... */
776 static inline unsigned long srmmu_probe(unsigned long vaddr)
777 {
778         unsigned long retval;
779
780         if (sparc_cpu_model != sparc_leon) {
781
782                 vaddr &= PAGE_MASK;
783                 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
784                                      "=r" (retval) :
785                                      "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
786         } else {
787                 retval = leon_swprobe(vaddr, NULL);
788         }
789         return retval;
790 }
791
792 /*
793  * This is much cleaner than poking around physical address space
794  * looking at the prom's page table directly which is what most
795  * other OS's do.  Yuck... this is much better.
796  */
797 static void __init srmmu_inherit_prom_mappings(unsigned long start,
798                                                unsigned long end)
799 {
800         unsigned long probed;
801         unsigned long addr;
802         pgd_t *pgdp;
803         p4d_t *p4dp;
804         pud_t *pudp;
805         pmd_t *pmdp;
806         pte_t *ptep;
807         int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
808
809         while (start <= end) {
810                 if (start == 0)
811                         break; /* probably wrap around */
812                 if (start == 0xfef00000)
813                         start = KADB_DEBUGGER_BEGVM;
814                 probed = srmmu_probe(start);
815                 if (!probed) {
816                         /* continue probing until we find an entry */
817                         start += PAGE_SIZE;
818                         continue;
819                 }
820
821                 /* A red snapper, see what it really is. */
822                 what = 0;
823                 addr = start - PAGE_SIZE;
824
825                 if (!(start & ~(SRMMU_REAL_PMD_MASK))) {
826                         if (srmmu_probe(addr + SRMMU_REAL_PMD_SIZE) == probed)
827                                 what = 1;
828                 }
829
830                 if (!(start & ~(SRMMU_PGDIR_MASK))) {
831                         if (srmmu_probe(addr + SRMMU_PGDIR_SIZE) == probed)
832                                 what = 2;
833                 }
834
835                 pgdp = pgd_offset_k(start);
836                 p4dp = p4d_offset(pgdp, start);
837                 pudp = pud_offset(p4dp, start);
838                 if (what == 2) {
839                         *(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
840                         start += SRMMU_PGDIR_SIZE;
841                         continue;
842                 }
843                 if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
844                         pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
845                                                    SRMMU_PMD_TABLE_SIZE);
846                         if (pmdp == NULL)
847                                 early_pgtable_allocfail("pmd");
848                         memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
849                         pud_set(__nocache_fix(pudp), pmdp);
850                 }
851                 pmdp = pmd_offset(__nocache_fix(pgdp), start);
852                 if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
853                         ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
854                         if (ptep == NULL)
855                                 early_pgtable_allocfail("pte");
856                         memset(__nocache_fix(ptep), 0, PTE_SIZE);
857                         pmd_set(__nocache_fix(pmdp), ptep);
858                 }
859                 if (what == 1) {
860                         /* We bend the rule where all 16 PTPs in a pmd_t point
861                          * inside the same PTE page, and we leak a perfectly
862                          * good hardware PTE piece. Alternatives seem worse.
863                          */
864                         unsigned int x; /* Index of HW PMD in soft cluster */
865                         unsigned long *val;
866                         x = (start >> PMD_SHIFT) & 15;
867                         val = &pmdp->pmdv[x];
868                         *(unsigned long *)__nocache_fix(val) = probed;
869                         start += SRMMU_REAL_PMD_SIZE;
870                         continue;
871                 }
872                 ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
873                 *(pte_t *)__nocache_fix(ptep) = __pte(probed);
874                 start += PAGE_SIZE;
875         }
876 }
877
878 #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
879
880 /* Create a third-level SRMMU 16MB page mapping. */
881 static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
882 {
883         pgd_t *pgdp = pgd_offset_k(vaddr);
884         unsigned long big_pte;
885
886         big_pte = KERNEL_PTE(phys_base >> 4);
887         *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
888 }
889
890 /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
891 static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
892 {
893         unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
894         unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
895         unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
896         /* Map "low" memory only */
897         const unsigned long min_vaddr = PAGE_OFFSET;
898         const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
899
900         if (vstart < min_vaddr || vstart >= max_vaddr)
901                 return vstart;
902
903         if (vend > max_vaddr || vend < min_vaddr)
904                 vend = max_vaddr;
905
906         while (vstart < vend) {
907                 do_large_mapping(vstart, pstart);
908                 vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
909         }
910         return vstart;
911 }
912
913 static void __init map_kernel(void)
914 {
915         int i;
916
917         if (phys_base > 0) {
918                 do_large_mapping(PAGE_OFFSET, phys_base);
919         }
920
921         for (i = 0; sp_banks[i].num_bytes != 0; i++) {
922                 map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
923         }
924 }
925
926 void (*poke_srmmu)(void) = NULL;
927
928 void __init srmmu_paging_init(void)
929 {
930         int i;
931         phandle cpunode;
932         char node_str[128];
933         pgd_t *pgd;
934         p4d_t *p4d;
935         pud_t *pud;
936         pmd_t *pmd;
937         pte_t *pte;
938         unsigned long pages_avail;
939
940         init_mm.context = (unsigned long) NO_CONTEXT;
941         sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
942
943         if (sparc_cpu_model == sun4d)
944                 num_contexts = 65536; /* We know it is Viking */
945         else {
946                 /* Find the number of contexts on the srmmu. */
947                 cpunode = prom_getchild(prom_root_node);
948                 num_contexts = 0;
949                 while (cpunode != 0) {
950                         prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
951                         if (!strcmp(node_str, "cpu")) {
952                                 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
953                                 break;
954                         }
955                         cpunode = prom_getsibling(cpunode);
956                 }
957         }
958
959         if (!num_contexts) {
960                 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
961                 prom_halt();
962         }
963
964         pages_avail = 0;
965         last_valid_pfn = bootmem_init(&pages_avail);
966
967         srmmu_nocache_calcsize();
968         srmmu_nocache_init();
969         srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM - PAGE_SIZE));
970         map_kernel();
971
972         /* ctx table has to be physically aligned to its size */
973         srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t));
974         srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa(srmmu_context_table);
975
976         for (i = 0; i < num_contexts; i++)
977                 srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
978
979         flush_cache_all();
980         srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
981 #ifdef CONFIG_SMP
982         /* Stop from hanging here... */
983         local_ops->tlb_all();
984 #else
985         flush_tlb_all();
986 #endif
987         poke_srmmu();
988
989         srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
990         srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
991
992         srmmu_allocate_ptable_skeleton(
993                 __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
994         srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
995
996         pgd = pgd_offset_k(PKMAP_BASE);
997         p4d = p4d_offset(pgd, PKMAP_BASE);
998         pud = pud_offset(p4d, PKMAP_BASE);
999         pmd = pmd_offset(pud, PKMAP_BASE);
1000         pte = pte_offset_kernel(pmd, PKMAP_BASE);
1001         pkmap_page_table = pte;
1002
1003         flush_cache_all();
1004         flush_tlb_all();
1005
1006         sparc_context_init(num_contexts);
1007
1008         kmap_init();
1009
1010         {
1011                 unsigned long zones_size[MAX_NR_ZONES];
1012                 unsigned long zholes_size[MAX_NR_ZONES];
1013                 unsigned long npages;
1014                 int znum;
1015
1016                 for (znum = 0; znum < MAX_NR_ZONES; znum++)
1017                         zones_size[znum] = zholes_size[znum] = 0;
1018
1019                 npages = max_low_pfn - pfn_base;
1020
1021                 zones_size[ZONE_DMA] = npages;
1022                 zholes_size[ZONE_DMA] = npages - pages_avail;
1023
1024                 npages = highend_pfn - max_low_pfn;
1025                 zones_size[ZONE_HIGHMEM] = npages;
1026                 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
1027
1028                 free_area_init_node(0, zones_size, pfn_base, zholes_size);
1029         }
1030 }
1031
1032 void mmu_info(struct seq_file *m)
1033 {
1034         seq_printf(m,
1035                    "MMU type\t: %s\n"
1036                    "contexts\t: %d\n"
1037                    "nocache total\t: %ld\n"
1038                    "nocache used\t: %d\n",
1039                    srmmu_name,
1040                    num_contexts,
1041                    srmmu_nocache_size,
1042                    srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
1043 }
1044
1045 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
1046 {
1047         mm->context = NO_CONTEXT;
1048         return 0;
1049 }
1050
1051 void destroy_context(struct mm_struct *mm)
1052 {
1053         unsigned long flags;
1054
1055         if (mm->context != NO_CONTEXT) {
1056                 flush_cache_mm(mm);
1057                 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
1058                 flush_tlb_mm(mm);
1059                 spin_lock_irqsave(&srmmu_context_spinlock, flags);
1060                 free_context(mm->context);
1061                 spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
1062                 mm->context = NO_CONTEXT;
1063         }
1064 }
1065
1066 /* Init various srmmu chip types. */
1067 static void __init srmmu_is_bad(void)
1068 {
1069         prom_printf("Could not determine SRMMU chip type.\n");
1070         prom_halt();
1071 }
1072
1073 static void __init init_vac_layout(void)
1074 {
1075         phandle nd;
1076         int cache_lines;
1077         char node_str[128];
1078 #ifdef CONFIG_SMP
1079         int cpu = 0;
1080         unsigned long max_size = 0;
1081         unsigned long min_line_size = 0x10000000;
1082 #endif
1083
1084         nd = prom_getchild(prom_root_node);
1085         while ((nd = prom_getsibling(nd)) != 0) {
1086                 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
1087                 if (!strcmp(node_str, "cpu")) {
1088                         vac_line_size = prom_getint(nd, "cache-line-size");
1089                         if (vac_line_size == -1) {
1090                                 prom_printf("can't determine cache-line-size, halting.\n");
1091                                 prom_halt();
1092                         }
1093                         cache_lines = prom_getint(nd, "cache-nlines");
1094                         if (cache_lines == -1) {
1095                                 prom_printf("can't determine cache-nlines, halting.\n");
1096                                 prom_halt();
1097                         }
1098
1099                         vac_cache_size = cache_lines * vac_line_size;
1100 #ifdef CONFIG_SMP
1101                         if (vac_cache_size > max_size)
1102                                 max_size = vac_cache_size;
1103                         if (vac_line_size < min_line_size)
1104                                 min_line_size = vac_line_size;
1105                         //FIXME: cpus not contiguous!!
1106                         cpu++;
1107                         if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1108                                 break;
1109 #else
1110                         break;
1111 #endif
1112                 }
1113         }
1114         if (nd == 0) {
1115                 prom_printf("No CPU nodes found, halting.\n");
1116                 prom_halt();
1117         }
1118 #ifdef CONFIG_SMP
1119         vac_cache_size = max_size;
1120         vac_line_size = min_line_size;
1121 #endif
1122         printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
1123                (int)vac_cache_size, (int)vac_line_size);
1124 }
1125
1126 static void poke_hypersparc(void)
1127 {
1128         volatile unsigned long clear;
1129         unsigned long mreg = srmmu_get_mmureg();
1130
1131         hyper_flush_unconditional_combined();
1132
1133         mreg &= ~(HYPERSPARC_CWENABLE);
1134         mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
1135         mreg |= (HYPERSPARC_CMODE);
1136
1137         srmmu_set_mmureg(mreg);
1138
1139 #if 0 /* XXX I think this is bad news... -DaveM */
1140         hyper_clear_all_tags();
1141 #endif
1142
1143         put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
1144         hyper_flush_whole_icache();
1145         clear = srmmu_get_faddr();
1146         clear = srmmu_get_fstatus();
1147 }
1148
1149 static const struct sparc32_cachetlb_ops hypersparc_ops = {
1150         .cache_all      = hypersparc_flush_cache_all,
1151         .cache_mm       = hypersparc_flush_cache_mm,
1152         .cache_page     = hypersparc_flush_cache_page,
1153         .cache_range    = hypersparc_flush_cache_range,
1154         .tlb_all        = hypersparc_flush_tlb_all,
1155         .tlb_mm         = hypersparc_flush_tlb_mm,
1156         .tlb_page       = hypersparc_flush_tlb_page,
1157         .tlb_range      = hypersparc_flush_tlb_range,
1158         .page_to_ram    = hypersparc_flush_page_to_ram,
1159         .sig_insns      = hypersparc_flush_sig_insns,
1160         .page_for_dma   = hypersparc_flush_page_for_dma,
1161 };
1162
1163 static void __init init_hypersparc(void)
1164 {
1165         srmmu_name = "ROSS HyperSparc";
1166         srmmu_modtype = HyperSparc;
1167
1168         init_vac_layout();
1169
1170         is_hypersparc = 1;
1171         sparc32_cachetlb_ops = &hypersparc_ops;
1172
1173         poke_srmmu = poke_hypersparc;
1174
1175         hypersparc_setup_blockops();
1176 }
1177
1178 static void poke_swift(void)
1179 {
1180         unsigned long mreg;
1181
1182         /* Clear any crap from the cache or else... */
1183         swift_flush_cache_all();
1184
1185         /* Enable I & D caches */
1186         mreg = srmmu_get_mmureg();
1187         mreg |= (SWIFT_IE | SWIFT_DE);
1188         /*
1189          * The Swift branch folding logic is completely broken.  At
1190          * trap time, if things are just right, if can mistakenly
1191          * think that a trap is coming from kernel mode when in fact
1192          * it is coming from user mode (it mis-executes the branch in
1193          * the trap code).  So you see things like crashme completely
1194          * hosing your machine which is completely unacceptable.  Turn
1195          * this shit off... nice job Fujitsu.
1196          */
1197         mreg &= ~(SWIFT_BF);
1198         srmmu_set_mmureg(mreg);
1199 }
1200
1201 static const struct sparc32_cachetlb_ops swift_ops = {
1202         .cache_all      = swift_flush_cache_all,
1203         .cache_mm       = swift_flush_cache_mm,
1204         .cache_page     = swift_flush_cache_page,
1205         .cache_range    = swift_flush_cache_range,
1206         .tlb_all        = swift_flush_tlb_all,
1207         .tlb_mm         = swift_flush_tlb_mm,
1208         .tlb_page       = swift_flush_tlb_page,
1209         .tlb_range      = swift_flush_tlb_range,
1210         .page_to_ram    = swift_flush_page_to_ram,
1211         .sig_insns      = swift_flush_sig_insns,
1212         .page_for_dma   = swift_flush_page_for_dma,
1213 };
1214
1215 #define SWIFT_MASKID_ADDR  0x10003018
1216 static void __init init_swift(void)
1217 {
1218         unsigned long swift_rev;
1219
1220         __asm__ __volatile__("lda [%1] %2, %0\n\t"
1221                              "srl %0, 0x18, %0\n\t" :
1222                              "=r" (swift_rev) :
1223                              "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
1224         srmmu_name = "Fujitsu Swift";
1225         switch (swift_rev) {
1226         case 0x11:
1227         case 0x20:
1228         case 0x23:
1229         case 0x30:
1230                 srmmu_modtype = Swift_lots_o_bugs;
1231                 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
1232                 /*
1233                  * Gee george, I wonder why Sun is so hush hush about
1234                  * this hardware bug... really braindamage stuff going
1235                  * on here.  However I think we can find a way to avoid
1236                  * all of the workaround overhead under Linux.  Basically,
1237                  * any page fault can cause kernel pages to become user
1238                  * accessible (the mmu gets confused and clears some of
1239                  * the ACC bits in kernel ptes).  Aha, sounds pretty
1240                  * horrible eh?  But wait, after extensive testing it appears
1241                  * that if you use pgd_t level large kernel pte's (like the
1242                  * 4MB pages on the Pentium) the bug does not get tripped
1243                  * at all.  This avoids almost all of the major overhead.
1244                  * Welcome to a world where your vendor tells you to,
1245                  * "apply this kernel patch" instead of "sorry for the
1246                  * broken hardware, send it back and we'll give you
1247                  * properly functioning parts"
1248                  */
1249                 break;
1250         case 0x25:
1251         case 0x31:
1252                 srmmu_modtype = Swift_bad_c;
1253                 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
1254                 /*
1255                  * You see Sun allude to this hardware bug but never
1256                  * admit things directly, they'll say things like,
1257                  * "the Swift chip cache problems" or similar.
1258                  */
1259                 break;
1260         default:
1261                 srmmu_modtype = Swift_ok;
1262                 break;
1263         }
1264
1265         sparc32_cachetlb_ops = &swift_ops;
1266         flush_page_for_dma_global = 0;
1267
1268         /*
1269          * Are you now convinced that the Swift is one of the
1270          * biggest VLSI abortions of all time?  Bravo Fujitsu!
1271          * Fujitsu, the !#?!%$'d up processor people.  I bet if
1272          * you examined the microcode of the Swift you'd find
1273          * XXX's all over the place.
1274          */
1275         poke_srmmu = poke_swift;
1276 }
1277
1278 static void turbosparc_flush_cache_all(void)
1279 {
1280         flush_user_windows();
1281         turbosparc_idflash_clear();
1282 }
1283
1284 static void turbosparc_flush_cache_mm(struct mm_struct *mm)
1285 {
1286         FLUSH_BEGIN(mm)
1287         flush_user_windows();
1288         turbosparc_idflash_clear();
1289         FLUSH_END
1290 }
1291
1292 static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1293 {
1294         FLUSH_BEGIN(vma->vm_mm)
1295         flush_user_windows();
1296         turbosparc_idflash_clear();
1297         FLUSH_END
1298 }
1299
1300 static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1301 {
1302         FLUSH_BEGIN(vma->vm_mm)
1303         flush_user_windows();
1304         if (vma->vm_flags & VM_EXEC)
1305                 turbosparc_flush_icache();
1306         turbosparc_flush_dcache();
1307         FLUSH_END
1308 }
1309
1310 /* TurboSparc is copy-back, if we turn it on, but this does not work. */
1311 static void turbosparc_flush_page_to_ram(unsigned long page)
1312 {
1313 #ifdef TURBOSPARC_WRITEBACK
1314         volatile unsigned long clear;
1315
1316         if (srmmu_probe(page))
1317                 turbosparc_flush_page_cache(page);
1318         clear = srmmu_get_fstatus();
1319 #endif
1320 }
1321
1322 static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1323 {
1324 }
1325
1326 static void turbosparc_flush_page_for_dma(unsigned long page)
1327 {
1328         turbosparc_flush_dcache();
1329 }
1330
1331 static void turbosparc_flush_tlb_all(void)
1332 {
1333         srmmu_flush_whole_tlb();
1334 }
1335
1336 static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
1337 {
1338         FLUSH_BEGIN(mm)
1339         srmmu_flush_whole_tlb();
1340         FLUSH_END
1341 }
1342
1343 static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1344 {
1345         FLUSH_BEGIN(vma->vm_mm)
1346         srmmu_flush_whole_tlb();
1347         FLUSH_END
1348 }
1349
1350 static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1351 {
1352         FLUSH_BEGIN(vma->vm_mm)
1353         srmmu_flush_whole_tlb();
1354         FLUSH_END
1355 }
1356
1357
1358 static void poke_turbosparc(void)
1359 {
1360         unsigned long mreg = srmmu_get_mmureg();
1361         unsigned long ccreg;
1362
1363         /* Clear any crap from the cache or else... */
1364         turbosparc_flush_cache_all();
1365         /* Temporarily disable I & D caches */
1366         mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE);
1367         mreg &= ~(TURBOSPARC_PCENABLE);         /* Don't check parity */
1368         srmmu_set_mmureg(mreg);
1369
1370         ccreg = turbosparc_get_ccreg();
1371
1372 #ifdef TURBOSPARC_WRITEBACK
1373         ccreg |= (TURBOSPARC_SNENABLE);         /* Do DVMA snooping in Dcache */
1374         ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
1375                         /* Write-back D-cache, emulate VLSI
1376                          * abortion number three, not number one */
1377 #else
1378         /* For now let's play safe, optimize later */
1379         ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
1380                         /* Do DVMA snooping in Dcache, Write-thru D-cache */
1381         ccreg &= ~(TURBOSPARC_uS2);
1382                         /* Emulate VLSI abortion number three, not number one */
1383 #endif
1384
1385         switch (ccreg & 7) {
1386         case 0: /* No SE cache */
1387         case 7: /* Test mode */
1388                 break;
1389         default:
1390                 ccreg |= (TURBOSPARC_SCENABLE);
1391         }
1392         turbosparc_set_ccreg(ccreg);
1393
1394         mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
1395         mreg |= (TURBOSPARC_ICSNOOP);           /* Icache snooping on */
1396         srmmu_set_mmureg(mreg);
1397 }
1398
1399 static const struct sparc32_cachetlb_ops turbosparc_ops = {
1400         .cache_all      = turbosparc_flush_cache_all,
1401         .cache_mm       = turbosparc_flush_cache_mm,
1402         .cache_page     = turbosparc_flush_cache_page,
1403         .cache_range    = turbosparc_flush_cache_range,
1404         .tlb_all        = turbosparc_flush_tlb_all,
1405         .tlb_mm         = turbosparc_flush_tlb_mm,
1406         .tlb_page       = turbosparc_flush_tlb_page,
1407         .tlb_range      = turbosparc_flush_tlb_range,
1408         .page_to_ram    = turbosparc_flush_page_to_ram,
1409         .sig_insns      = turbosparc_flush_sig_insns,
1410         .page_for_dma   = turbosparc_flush_page_for_dma,
1411 };
1412
1413 static void __init init_turbosparc(void)
1414 {
1415         srmmu_name = "Fujitsu TurboSparc";
1416         srmmu_modtype = TurboSparc;
1417         sparc32_cachetlb_ops = &turbosparc_ops;
1418         poke_srmmu = poke_turbosparc;
1419 }
1420
1421 static void poke_tsunami(void)
1422 {
1423         unsigned long mreg = srmmu_get_mmureg();
1424
1425         tsunami_flush_icache();
1426         tsunami_flush_dcache();
1427         mreg &= ~TSUNAMI_ITD;
1428         mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
1429         srmmu_set_mmureg(mreg);
1430 }
1431
1432 static const struct sparc32_cachetlb_ops tsunami_ops = {
1433         .cache_all      = tsunami_flush_cache_all,
1434         .cache_mm       = tsunami_flush_cache_mm,
1435         .cache_page     = tsunami_flush_cache_page,
1436         .cache_range    = tsunami_flush_cache_range,
1437         .tlb_all        = tsunami_flush_tlb_all,
1438         .tlb_mm         = tsunami_flush_tlb_mm,
1439         .tlb_page       = tsunami_flush_tlb_page,
1440         .tlb_range      = tsunami_flush_tlb_range,
1441         .page_to_ram    = tsunami_flush_page_to_ram,
1442         .sig_insns      = tsunami_flush_sig_insns,
1443         .page_for_dma   = tsunami_flush_page_for_dma,
1444 };
1445
1446 static void __init init_tsunami(void)
1447 {
1448         /*
1449          * Tsunami's pretty sane, Sun and TI actually got it
1450          * somewhat right this time.  Fujitsu should have
1451          * taken some lessons from them.
1452          */
1453
1454         srmmu_name = "TI Tsunami";
1455         srmmu_modtype = Tsunami;
1456         sparc32_cachetlb_ops = &tsunami_ops;
1457         poke_srmmu = poke_tsunami;
1458
1459         tsunami_setup_blockops();
1460 }
1461
1462 static void poke_viking(void)
1463 {
1464         unsigned long mreg = srmmu_get_mmureg();
1465         static int smp_catch;
1466
1467         if (viking_mxcc_present) {
1468                 unsigned long mxcc_control = mxcc_get_creg();
1469
1470                 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
1471                 mxcc_control &= ~(MXCC_CTL_RRC);
1472                 mxcc_set_creg(mxcc_control);
1473
1474                 /*
1475                  * We don't need memory parity checks.
1476                  * XXX This is a mess, have to dig out later. ecd.
1477                 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
1478                  */
1479
1480                 /* We do cache ptables on MXCC. */
1481                 mreg |= VIKING_TCENABLE;
1482         } else {
1483                 unsigned long bpreg;
1484
1485                 mreg &= ~(VIKING_TCENABLE);
1486                 if (smp_catch++) {
1487                         /* Must disable mixed-cmd mode here for other cpu's. */
1488                         bpreg = viking_get_bpreg();
1489                         bpreg &= ~(VIKING_ACTION_MIX);
1490                         viking_set_bpreg(bpreg);
1491
1492                         /* Just in case PROM does something funny. */
1493                         msi_set_sync();
1494                 }
1495         }
1496
1497         mreg |= VIKING_SPENABLE;
1498         mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
1499         mreg |= VIKING_SBENABLE;
1500         mreg &= ~(VIKING_ACENABLE);
1501         srmmu_set_mmureg(mreg);
1502 }
1503
1504 static struct sparc32_cachetlb_ops viking_ops __ro_after_init = {
1505         .cache_all      = viking_flush_cache_all,
1506         .cache_mm       = viking_flush_cache_mm,
1507         .cache_page     = viking_flush_cache_page,
1508         .cache_range    = viking_flush_cache_range,
1509         .tlb_all        = viking_flush_tlb_all,
1510         .tlb_mm         = viking_flush_tlb_mm,
1511         .tlb_page       = viking_flush_tlb_page,
1512         .tlb_range      = viking_flush_tlb_range,
1513         .page_to_ram    = viking_flush_page_to_ram,
1514         .sig_insns      = viking_flush_sig_insns,
1515         .page_for_dma   = viking_flush_page_for_dma,
1516 };
1517
1518 #ifdef CONFIG_SMP
1519 /* On sun4d the cpu broadcasts local TLB flushes, so we can just
1520  * perform the local TLB flush and all the other cpus will see it.
1521  * But, unfortunately, there is a bug in the sun4d XBUS backplane
1522  * that requires that we add some synchronization to these flushes.
1523  *
1524  * The bug is that the fifo which keeps track of all the pending TLB
1525  * broadcasts in the system is an entry or two too small, so if we
1526  * have too many going at once we'll overflow that fifo and lose a TLB
1527  * flush resulting in corruption.
1528  *
1529  * Our workaround is to take a global spinlock around the TLB flushes,
1530  * which guarentees we won't ever have too many pending.  It's a big
1531  * hammer, but a semaphore like system to make sure we only have N TLB
1532  * flushes going at once will require SMP locking anyways so there's
1533  * no real value in trying any harder than this.
1534  */
1535 static struct sparc32_cachetlb_ops viking_sun4d_smp_ops __ro_after_init = {
1536         .cache_all      = viking_flush_cache_all,
1537         .cache_mm       = viking_flush_cache_mm,
1538         .cache_page     = viking_flush_cache_page,
1539         .cache_range    = viking_flush_cache_range,
1540         .tlb_all        = sun4dsmp_flush_tlb_all,
1541         .tlb_mm         = sun4dsmp_flush_tlb_mm,
1542         .tlb_page       = sun4dsmp_flush_tlb_page,
1543         .tlb_range      = sun4dsmp_flush_tlb_range,
1544         .page_to_ram    = viking_flush_page_to_ram,
1545         .sig_insns      = viking_flush_sig_insns,
1546         .page_for_dma   = viking_flush_page_for_dma,
1547 };
1548 #endif
1549
1550 static void __init init_viking(void)
1551 {
1552         unsigned long mreg = srmmu_get_mmureg();
1553
1554         /* Ahhh, the viking.  SRMMU VLSI abortion number two... */
1555         if (mreg & VIKING_MMODE) {
1556                 srmmu_name = "TI Viking";
1557                 viking_mxcc_present = 0;
1558                 msi_set_sync();
1559
1560                 /*
1561                  * We need this to make sure old viking takes no hits
1562                  * on it's cache for dma snoops to workaround the
1563                  * "load from non-cacheable memory" interrupt bug.
1564                  * This is only necessary because of the new way in
1565                  * which we use the IOMMU.
1566                  */
1567                 viking_ops.page_for_dma = viking_flush_page;
1568 #ifdef CONFIG_SMP
1569                 viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
1570 #endif
1571                 flush_page_for_dma_global = 0;
1572         } else {
1573                 srmmu_name = "TI Viking/MXCC";
1574                 viking_mxcc_present = 1;
1575                 srmmu_cache_pagetables = 1;
1576         }
1577
1578         sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1579                 &viking_ops;
1580 #ifdef CONFIG_SMP
1581         if (sparc_cpu_model == sun4d)
1582                 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1583                         &viking_sun4d_smp_ops;
1584 #endif
1585
1586         poke_srmmu = poke_viking;
1587 }
1588
1589 /* Probe for the srmmu chip version. */
1590 static void __init get_srmmu_type(void)
1591 {
1592         unsigned long mreg, psr;
1593         unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
1594
1595         srmmu_modtype = SRMMU_INVAL_MOD;
1596         hwbug_bitmask = 0;
1597
1598         mreg = srmmu_get_mmureg(); psr = get_psr();
1599         mod_typ = (mreg & 0xf0000000) >> 28;
1600         mod_rev = (mreg & 0x0f000000) >> 24;
1601         psr_typ = (psr >> 28) & 0xf;
1602         psr_vers = (psr >> 24) & 0xf;
1603
1604         /* First, check for sparc-leon. */
1605         if (sparc_cpu_model == sparc_leon) {
1606                 init_leon();
1607                 return;
1608         }
1609
1610         /* Second, check for HyperSparc or Cypress. */
1611         if (mod_typ == 1) {
1612                 switch (mod_rev) {
1613                 case 7:
1614                         /* UP or MP Hypersparc */
1615                         init_hypersparc();
1616                         break;
1617                 case 0:
1618                 case 2:
1619                 case 10:
1620                 case 11:
1621                 case 12:
1622                 case 13:
1623                 case 14:
1624                 case 15:
1625                 default:
1626                         prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
1627                         prom_halt();
1628                         break;
1629                 }
1630                 return;
1631         }
1632
1633         /* Now Fujitsu TurboSparc. It might happen that it is
1634          * in Swift emulation mode, so we will check later...
1635          */
1636         if (psr_typ == 0 && psr_vers == 5) {
1637                 init_turbosparc();
1638                 return;
1639         }
1640
1641         /* Next check for Fujitsu Swift. */
1642         if (psr_typ == 0 && psr_vers == 4) {
1643                 phandle cpunode;
1644                 char node_str[128];
1645
1646                 /* Look if it is not a TurboSparc emulating Swift... */
1647                 cpunode = prom_getchild(prom_root_node);
1648                 while ((cpunode = prom_getsibling(cpunode)) != 0) {
1649                         prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1650                         if (!strcmp(node_str, "cpu")) {
1651                                 if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
1652                                     prom_getintdefault(cpunode, "psr-version", 1) == 5) {
1653                                         init_turbosparc();
1654                                         return;
1655                                 }
1656                                 break;
1657                         }
1658                 }
1659
1660                 init_swift();
1661                 return;
1662         }
1663
1664         /* Now the Viking family of srmmu. */
1665         if (psr_typ == 4 &&
1666            ((psr_vers == 0) ||
1667             ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
1668                 init_viking();
1669                 return;
1670         }
1671
1672         /* Finally the Tsunami. */
1673         if (psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
1674                 init_tsunami();
1675                 return;
1676         }
1677
1678         /* Oh well */
1679         srmmu_is_bad();
1680 }
1681
1682 #ifdef CONFIG_SMP
1683 /* Local cross-calls. */
1684 static void smp_flush_page_for_dma(unsigned long page)
1685 {
1686         xc1((smpfunc_t) local_ops->page_for_dma, page);
1687         local_ops->page_for_dma(page);
1688 }
1689
1690 static void smp_flush_cache_all(void)
1691 {
1692         xc0((smpfunc_t) local_ops->cache_all);
1693         local_ops->cache_all();
1694 }
1695
1696 static void smp_flush_tlb_all(void)
1697 {
1698         xc0((smpfunc_t) local_ops->tlb_all);
1699         local_ops->tlb_all();
1700 }
1701
1702 static void smp_flush_cache_mm(struct mm_struct *mm)
1703 {
1704         if (mm->context != NO_CONTEXT) {
1705                 cpumask_t cpu_mask;
1706                 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1707                 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1708                 if (!cpumask_empty(&cpu_mask))
1709                         xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
1710                 local_ops->cache_mm(mm);
1711         }
1712 }
1713
1714 static void smp_flush_tlb_mm(struct mm_struct *mm)
1715 {
1716         if (mm->context != NO_CONTEXT) {
1717                 cpumask_t cpu_mask;
1718                 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1719                 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1720                 if (!cpumask_empty(&cpu_mask)) {
1721                         xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
1722                         if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
1723                                 cpumask_copy(mm_cpumask(mm),
1724                                              cpumask_of(smp_processor_id()));
1725                 }
1726                 local_ops->tlb_mm(mm);
1727         }
1728 }
1729
1730 static void smp_flush_cache_range(struct vm_area_struct *vma,
1731                                   unsigned long start,
1732                                   unsigned long end)
1733 {
1734         struct mm_struct *mm = vma->vm_mm;
1735
1736         if (mm->context != NO_CONTEXT) {
1737                 cpumask_t cpu_mask;
1738                 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1739                 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1740                 if (!cpumask_empty(&cpu_mask))
1741                         xc3((smpfunc_t) local_ops->cache_range,
1742                             (unsigned long) vma, start, end);
1743                 local_ops->cache_range(vma, start, end);
1744         }
1745 }
1746
1747 static void smp_flush_tlb_range(struct vm_area_struct *vma,
1748                                 unsigned long start,
1749                                 unsigned long end)
1750 {
1751         struct mm_struct *mm = vma->vm_mm;
1752
1753         if (mm->context != NO_CONTEXT) {
1754                 cpumask_t cpu_mask;
1755                 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1756                 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1757                 if (!cpumask_empty(&cpu_mask))
1758                         xc3((smpfunc_t) local_ops->tlb_range,
1759                             (unsigned long) vma, start, end);
1760                 local_ops->tlb_range(vma, start, end);
1761         }
1762 }
1763
1764 static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1765 {
1766         struct mm_struct *mm = vma->vm_mm;
1767
1768         if (mm->context != NO_CONTEXT) {
1769                 cpumask_t cpu_mask;
1770                 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1771                 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1772                 if (!cpumask_empty(&cpu_mask))
1773                         xc2((smpfunc_t) local_ops->cache_page,
1774                             (unsigned long) vma, page);
1775                 local_ops->cache_page(vma, page);
1776         }
1777 }
1778
1779 static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1780 {
1781         struct mm_struct *mm = vma->vm_mm;
1782
1783         if (mm->context != NO_CONTEXT) {
1784                 cpumask_t cpu_mask;
1785                 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1786                 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1787                 if (!cpumask_empty(&cpu_mask))
1788                         xc2((smpfunc_t) local_ops->tlb_page,
1789                             (unsigned long) vma, page);
1790                 local_ops->tlb_page(vma, page);
1791         }
1792 }
1793
1794 static void smp_flush_page_to_ram(unsigned long page)
1795 {
1796         /* Current theory is that those who call this are the one's
1797          * who have just dirtied their cache with the pages contents
1798          * in kernel space, therefore we only run this on local cpu.
1799          *
1800          * XXX This experiment failed, research further... -DaveM
1801          */
1802 #if 1
1803         xc1((smpfunc_t) local_ops->page_to_ram, page);
1804 #endif
1805         local_ops->page_to_ram(page);
1806 }
1807
1808 static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1809 {
1810         cpumask_t cpu_mask;
1811         cpumask_copy(&cpu_mask, mm_cpumask(mm));
1812         cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1813         if (!cpumask_empty(&cpu_mask))
1814                 xc2((smpfunc_t) local_ops->sig_insns,
1815                     (unsigned long) mm, insn_addr);
1816         local_ops->sig_insns(mm, insn_addr);
1817 }
1818
1819 static struct sparc32_cachetlb_ops smp_cachetlb_ops __ro_after_init = {
1820         .cache_all      = smp_flush_cache_all,
1821         .cache_mm       = smp_flush_cache_mm,
1822         .cache_page     = smp_flush_cache_page,
1823         .cache_range    = smp_flush_cache_range,
1824         .tlb_all        = smp_flush_tlb_all,
1825         .tlb_mm         = smp_flush_tlb_mm,
1826         .tlb_page       = smp_flush_tlb_page,
1827         .tlb_range      = smp_flush_tlb_range,
1828         .page_to_ram    = smp_flush_page_to_ram,
1829         .sig_insns      = smp_flush_sig_insns,
1830         .page_for_dma   = smp_flush_page_for_dma,
1831 };
1832 #endif
1833
1834 /* Load up routines and constants for sun4m and sun4d mmu */
1835 void __init load_mmu(void)
1836 {
1837         /* Functions */
1838         get_srmmu_type();
1839
1840 #ifdef CONFIG_SMP
1841         /* El switcheroo... */
1842         local_ops = sparc32_cachetlb_ops;
1843
1844         if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
1845                 smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
1846                 smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
1847                 smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
1848                 smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
1849         }
1850
1851         if (poke_srmmu == poke_viking) {
1852                 /* Avoid unnecessary cross calls. */
1853                 smp_cachetlb_ops.cache_all = local_ops->cache_all;
1854                 smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
1855                 smp_cachetlb_ops.cache_range = local_ops->cache_range;
1856                 smp_cachetlb_ops.cache_page = local_ops->cache_page;
1857
1858                 smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
1859                 smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
1860                 smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
1861         }
1862
1863         /* It really is const after this point. */
1864         sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1865                 &smp_cachetlb_ops;
1866 #endif
1867
1868         if (sparc_cpu_model != sun4d)
1869                 ld_mmu_iommu();
1870 #ifdef CONFIG_SMP
1871         if (sparc_cpu_model == sun4d)
1872                 sun4d_init_smp();
1873         else if (sparc_cpu_model == sparc_leon)
1874                 leon_init_smp();
1875         else
1876                 sun4m_init_smp();
1877 #endif
1878 }
This page took 0.143212 seconds and 4 git commands to generate.