1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6 #include <asm/loongarch.h>
8 #include <asm/pgtable.h>
9 #include <asm/regdef.h>
10 #include <asm/stackframe.h>
12 #define INVTLB_ADDR_GFALSE_AND_ASID 5
14 #define PTRS_PER_PGD_BITS (PAGE_SHIFT - 3)
15 #define PTRS_PER_PUD_BITS (PAGE_SHIFT - 3)
16 #define PTRS_PER_PMD_BITS (PAGE_SHIFT - 3)
17 #define PTRS_PER_PTE_BITS (PAGE_SHIFT - 3)
19 .macro tlb_do_page_fault, write
20 SYM_CODE_START(tlb_do_page_fault_\write)
23 csrrd a2, LOONGARCH_CSR_BADV
25 REG_S a2, sp, PT_BVADDR
29 SYM_CODE_END(tlb_do_page_fault_\write)
35 SYM_CODE_START(handle_tlb_protect)
41 csrrd a2, LOONGARCH_CSR_BADV
42 REG_S a2, sp, PT_BVADDR
43 la_abs t0, do_page_fault
46 SYM_CODE_END(handle_tlb_protect)
48 SYM_CODE_START(handle_tlb_load)
50 csrwr t0, EXCEPTION_KS0
51 csrwr t1, EXCEPTION_KS1
52 csrwr ra, EXCEPTION_KS2
55 * The vmalloc handling is not in the hotpath.
57 csrrd t0, LOONGARCH_CSR_BADV
59 csrrd t1, LOONGARCH_CSR_PGDL
62 /* Get PGD offset in bytes */
63 bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
65 #if CONFIG_PGTABLE_LEVELS > 3
67 bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
70 #if CONFIG_PGTABLE_LEVELS > 2
72 bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
78 * For huge tlb entries, pmde doesn't contain an address but
79 * instead contains the tlb pte. Check the PAGE_HUGE bit and
80 * see if we need to jump to huge tlb processing.
82 rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
83 bltz ra, tlb_huge_update_load
85 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
86 bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
87 alsl.d t1, t0, ra, _PTE_T_LOG2
90 smp_pgtable_change_load:
95 andi ra, t0, _PAGE_PRESENT
96 beqz ra, nopage_tlb_load
98 ori t0, t0, _PAGE_VALID
101 beqz t0, smp_pgtable_change_load
106 bstrins.d t1, zero, 3, 3
109 csrwr t0, LOONGARCH_CSR_TLBELO0
110 csrwr t1, LOONGARCH_CSR_TLBELO1
113 csrrd t0, EXCEPTION_KS0
114 csrrd t1, EXCEPTION_KS1
115 csrrd ra, EXCEPTION_KS2
120 la_abs t1, swapper_pg_dir
124 /* This is the entry point of a huge page. */
125 tlb_huge_update_load:
129 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
131 andi t0, ra, _PAGE_PRESENT
132 beqz t0, nopage_tlb_load
135 ori t0, ra, _PAGE_VALID
137 beqz t0, tlb_huge_update_load
138 ori t0, ra, _PAGE_VALID
140 ori t0, ra, _PAGE_VALID
143 csrrd ra, LOONGARCH_CSR_ASID
144 csrrd t1, LOONGARCH_CSR_BADV
145 andi ra, ra, CSR_ASID_ASID
146 invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
149 * A huge PTE describes an area the size of the
150 * configured huge page size. This is twice the
151 * of the large TLB entry size we intend to use.
152 * A TLB entry half the size of the configured
153 * huge page size is configured into entrylo0
154 * and entrylo1 to cover the contiguous huge PTE
157 /* Huge page: Move Global bit */
158 xori t0, t0, _PAGE_HUGE
159 lu12i.w t1, _PAGE_HGLOBAL >> 12
161 srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
165 csrwr ra, LOONGARCH_CSR_TLBELO0
167 /* Convert to entrylo1 */
169 slli.d t1, t1, (HPAGE_SHIFT - 1)
171 csrwr t0, LOONGARCH_CSR_TLBELO1
173 /* Set huge page tlb entry size */
174 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
175 addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
176 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
180 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
181 addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
182 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
184 csrrd t0, EXCEPTION_KS0
185 csrrd t1, EXCEPTION_KS1
186 csrrd ra, EXCEPTION_KS2
191 csrrd ra, EXCEPTION_KS2
192 la_abs t0, tlb_do_page_fault_0
194 SYM_CODE_END(handle_tlb_load)
196 SYM_CODE_START(handle_tlb_load_ptw)
197 UNWIND_HINT_UNDEFINED
198 csrwr t0, LOONGARCH_CSR_KS0
199 csrwr t1, LOONGARCH_CSR_KS1
200 la_abs t0, tlb_do_page_fault_0
202 SYM_CODE_END(handle_tlb_load_ptw)
204 SYM_CODE_START(handle_tlb_store)
205 UNWIND_HINT_UNDEFINED
206 csrwr t0, EXCEPTION_KS0
207 csrwr t1, EXCEPTION_KS1
208 csrwr ra, EXCEPTION_KS2
211 * The vmalloc handling is not in the hotpath.
213 csrrd t0, LOONGARCH_CSR_BADV
214 bltz t0, vmalloc_store
215 csrrd t1, LOONGARCH_CSR_PGDL
218 /* Get PGD offset in bytes */
219 bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
221 #if CONFIG_PGTABLE_LEVELS > 3
223 bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
226 #if CONFIG_PGTABLE_LEVELS > 2
228 bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
234 * For huge tlb entries, pmde doesn't contain an address but
235 * instead contains the tlb pte. Check the PAGE_HUGE bit and
236 * see if we need to jump to huge tlb processing.
238 rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
239 bltz ra, tlb_huge_update_store
241 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
242 bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
243 alsl.d t1, t0, ra, _PTE_T_LOG2
246 smp_pgtable_change_store:
251 andi ra, t0, _PAGE_PRESENT | _PAGE_WRITE
252 xori ra, ra, _PAGE_PRESENT | _PAGE_WRITE
253 bnez ra, nopage_tlb_store
255 ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
258 beqz t0, smp_pgtable_change_store
263 bstrins.d t1, zero, 3, 3
266 csrwr t0, LOONGARCH_CSR_TLBELO0
267 csrwr t1, LOONGARCH_CSR_TLBELO1
270 csrrd t0, EXCEPTION_KS0
271 csrrd t1, EXCEPTION_KS1
272 csrrd ra, EXCEPTION_KS2
277 la_abs t1, swapper_pg_dir
281 /* This is the entry point of a huge page. */
282 tlb_huge_update_store:
286 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
288 andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE
289 xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE
290 bnez t0, nopage_tlb_store
293 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
295 beqz t0, tlb_huge_update_store
296 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
298 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
301 csrrd ra, LOONGARCH_CSR_ASID
302 csrrd t1, LOONGARCH_CSR_BADV
303 andi ra, ra, CSR_ASID_ASID
304 invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
307 * A huge PTE describes an area the size of the
308 * configured huge page size. This is twice the
309 * of the large TLB entry size we intend to use.
310 * A TLB entry half the size of the configured
311 * huge page size is configured into entrylo0
312 * and entrylo1 to cover the contiguous huge PTE
315 /* Huge page: Move Global bit */
316 xori t0, t0, _PAGE_HUGE
317 lu12i.w t1, _PAGE_HGLOBAL >> 12
319 srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
323 csrwr ra, LOONGARCH_CSR_TLBELO0
325 /* Convert to entrylo1 */
327 slli.d t1, t1, (HPAGE_SHIFT - 1)
329 csrwr t0, LOONGARCH_CSR_TLBELO1
331 /* Set huge page tlb entry size */
332 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
333 addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
334 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
338 /* Reset default page size */
339 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
340 addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
341 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
343 csrrd t0, EXCEPTION_KS0
344 csrrd t1, EXCEPTION_KS1
345 csrrd ra, EXCEPTION_KS2
350 csrrd ra, EXCEPTION_KS2
351 la_abs t0, tlb_do_page_fault_1
353 SYM_CODE_END(handle_tlb_store)
355 SYM_CODE_START(handle_tlb_store_ptw)
356 UNWIND_HINT_UNDEFINED
357 csrwr t0, LOONGARCH_CSR_KS0
358 csrwr t1, LOONGARCH_CSR_KS1
359 la_abs t0, tlb_do_page_fault_1
361 SYM_CODE_END(handle_tlb_store_ptw)
363 SYM_CODE_START(handle_tlb_modify)
364 UNWIND_HINT_UNDEFINED
365 csrwr t0, EXCEPTION_KS0
366 csrwr t1, EXCEPTION_KS1
367 csrwr ra, EXCEPTION_KS2
370 * The vmalloc handling is not in the hotpath.
372 csrrd t0, LOONGARCH_CSR_BADV
373 bltz t0, vmalloc_modify
374 csrrd t1, LOONGARCH_CSR_PGDL
377 /* Get PGD offset in bytes */
378 bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
380 #if CONFIG_PGTABLE_LEVELS > 3
382 bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
385 #if CONFIG_PGTABLE_LEVELS > 2
387 bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
393 * For huge tlb entries, pmde doesn't contain an address but
394 * instead contains the tlb pte. Check the PAGE_HUGE bit and
395 * see if we need to jump to huge tlb processing.
397 rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
398 bltz ra, tlb_huge_update_modify
400 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
401 bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
402 alsl.d t1, t0, ra, _PTE_T_LOG2
405 smp_pgtable_change_modify:
410 andi ra, t0, _PAGE_WRITE
411 beqz ra, nopage_tlb_modify
413 ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
416 beqz t0, smp_pgtable_change_modify
421 bstrins.d t1, zero, 3, 3
424 csrwr t0, LOONGARCH_CSR_TLBELO0
425 csrwr t1, LOONGARCH_CSR_TLBELO1
428 csrrd t0, EXCEPTION_KS0
429 csrrd t1, EXCEPTION_KS1
430 csrrd ra, EXCEPTION_KS2
435 la_abs t1, swapper_pg_dir
436 b vmalloc_done_modify
439 /* This is the entry point of a huge page. */
440 tlb_huge_update_modify:
444 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
446 andi t0, ra, _PAGE_WRITE
447 beqz t0, nopage_tlb_modify
450 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
452 beqz t0, tlb_huge_update_modify
453 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
455 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
458 csrrd ra, LOONGARCH_CSR_ASID
459 csrrd t1, LOONGARCH_CSR_BADV
460 andi ra, ra, CSR_ASID_ASID
461 invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
464 * A huge PTE describes an area the size of the
465 * configured huge page size. This is twice the
466 * of the large TLB entry size we intend to use.
467 * A TLB entry half the size of the configured
468 * huge page size is configured into entrylo0
469 * and entrylo1 to cover the contiguous huge PTE
472 /* Huge page: Move Global bit */
473 xori t0, t0, _PAGE_HUGE
474 lu12i.w t1, _PAGE_HGLOBAL >> 12
476 srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
480 csrwr ra, LOONGARCH_CSR_TLBELO0
482 /* Convert to entrylo1 */
484 slli.d t1, t1, (HPAGE_SHIFT - 1)
486 csrwr t0, LOONGARCH_CSR_TLBELO1
488 /* Set huge page tlb entry size */
489 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
490 addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
491 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
495 /* Reset default page size */
496 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
497 addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
498 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
500 csrrd t0, EXCEPTION_KS0
501 csrrd t1, EXCEPTION_KS1
502 csrrd ra, EXCEPTION_KS2
507 csrrd ra, EXCEPTION_KS2
508 la_abs t0, tlb_do_page_fault_1
510 SYM_CODE_END(handle_tlb_modify)
512 SYM_CODE_START(handle_tlb_modify_ptw)
513 UNWIND_HINT_UNDEFINED
514 csrwr t0, LOONGARCH_CSR_KS0
515 csrwr t1, LOONGARCH_CSR_KS1
516 la_abs t0, tlb_do_page_fault_1
518 SYM_CODE_END(handle_tlb_modify_ptw)
520 SYM_CODE_START(handle_tlb_refill)
521 UNWIND_HINT_UNDEFINED
522 csrwr t0, LOONGARCH_CSR_TLBRSAVE
523 csrrd t0, LOONGARCH_CSR_PGD
525 #if CONFIG_PGTABLE_LEVELS > 3
528 #if CONFIG_PGTABLE_LEVELS > 2
534 csrrd t0, LOONGARCH_CSR_TLBRSAVE
536 SYM_CODE_END(handle_tlb_refill)