1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
4 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/sched.h>
9 #include <asm/mmu_context.h>
10 #include <asm/pgtable.h>
11 #include <asm/setup.h>
13 #define CSKY_TLB_SIZE CONFIG_CPU_TLB_SIZE
15 void flush_tlb_all(void)
20 void flush_tlb_mm(struct mm_struct *mm)
22 int cpu = smp_processor_id();
24 if (cpu_context(cpu, mm) != 0)
25 drop_mmu_context(mm, cpu);
30 #define restore_asid_inv_utlb(oldpid, newpid) \
32 if ((oldpid & ASID_MASK) == newpid) \
33 write_mmu_entryhi(oldpid + 1); \
34 write_mmu_entryhi(oldpid); \
37 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
40 struct mm_struct *mm = vma->vm_mm;
41 int cpu = smp_processor_id();
43 if (cpu_context(cpu, mm) != 0) {
44 unsigned long size, flags;
45 int newpid = cpu_asid(cpu, mm);
47 local_irq_save(flags);
48 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
49 size = (size + 1) >> 1;
50 if (size <= CSKY_TLB_SIZE/2) {
51 start &= (PAGE_MASK << 1);
52 end += ((PAGE_SIZE << 1) - 1);
53 end &= (PAGE_MASK << 1);
54 #ifdef CONFIG_CPU_HAS_TLBI
56 asm volatile("tlbi.vaas %0"
57 ::"r"(start | newpid));
58 start += (PAGE_SIZE << 1);
63 int oldpid = read_mmu_entryhi();
68 write_mmu_entryhi(start | newpid);
69 start += (PAGE_SIZE << 1);
71 idx = read_mmu_index();
73 tlb_invalid_indexed();
75 restore_asid_inv_utlb(oldpid, newpid);
79 drop_mmu_context(mm, cpu);
81 local_irq_restore(flags);
85 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
87 unsigned long size, flags;
89 local_irq_save(flags);
90 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
91 if (size <= CSKY_TLB_SIZE) {
92 start &= (PAGE_MASK << 1);
93 end += ((PAGE_SIZE << 1) - 1);
94 end &= (PAGE_MASK << 1);
95 #ifdef CONFIG_CPU_HAS_TLBI
97 asm volatile("tlbi.vaas %0"::"r"(start));
98 start += (PAGE_SIZE << 1);
103 int oldpid = read_mmu_entryhi();
105 while (start < end) {
108 write_mmu_entryhi(start);
109 start += (PAGE_SIZE << 1);
111 idx = read_mmu_index();
113 tlb_invalid_indexed();
115 restore_asid_inv_utlb(oldpid, 0);
122 local_irq_restore(flags);
125 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
127 int cpu = smp_processor_id();
128 int newpid = cpu_asid(cpu, vma->vm_mm);
130 if (!vma || cpu_context(cpu, vma->vm_mm) != 0) {
131 page &= (PAGE_MASK << 1);
133 #ifdef CONFIG_CPU_HAS_TLBI
134 asm volatile("tlbi.vaas %0"::"r"(page | newpid));
141 local_irq_save(flags);
142 oldpid = read_mmu_entryhi();
143 write_mmu_entryhi(page | newpid);
145 idx = read_mmu_index();
147 tlb_invalid_indexed();
149 restore_asid_inv_utlb(oldpid, newpid);
150 local_irq_restore(flags);
157 * Remove one kernel space TLB entry. This entry is assumed to be marked
158 * global so we don't do the ASID thing.
160 void flush_tlb_one(unsigned long page)
164 oldpid = read_mmu_entryhi();
165 page &= (PAGE_MASK << 1);
167 #ifdef CONFIG_CPU_HAS_TLBI
168 page = page | (oldpid & 0xfff);
169 asm volatile("tlbi.vaas %0"::"r"(page));
176 page = page | (oldpid & 0xff);
178 local_irq_save(flags);
179 write_mmu_entryhi(page);
181 idx = read_mmu_index();
183 tlb_invalid_indexed();
184 restore_asid_inv_utlb(oldpid, oldpid);
185 local_irq_restore(flags);
189 EXPORT_SYMBOL(flush_tlb_one);
191 /* show current 32 jtlbs */
192 void show_jtlb_table(void)
195 int entryhi, entrylo0, entrylo1;
199 local_irq_save(flags);
203 oldpid = read_mmu_entryhi();
204 while (entry < CSKY_TLB_SIZE) {
205 write_mmu_index(entry);
207 entryhi = read_mmu_entryhi();
208 entrylo0 = read_mmu_entrylo0();
210 entrylo1 = read_mmu_entrylo1();
212 pr_info("jtlb[%d]: entryhi - 0x%x; entrylo0 - 0x%x;"
213 " entrylo1 - 0x%x\n",
214 entry, entryhi, entrylo0, entrylo1);
217 write_mmu_entryhi(oldpid);
218 local_irq_restore(flags);