1 /* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
12 #include <asm/pgtable.h>
20 mov TLB_TAG_ACCESS, %g4
21 ldxa [%g4] ASI_IMMU, %g4
23 /* sun4v_itlb_miss branches here with the missing virtual
24 * address already loaded into %g4
28 /* Catch kernel NULL pointer calls. */
29 sethi %hi(PAGE_SIZE), %g5
31 blu,pn %xcc, kvmap_itlb_longpath
34 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
37 sethi %hi(LOW_OBP_ADDRESS), %g5
39 blu,pn %xcc, kvmap_itlb_vmalloc_addr
43 blu,pn %xcc, kvmap_itlb_obp
46 kvmap_itlb_vmalloc_addr:
47 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
49 TSB_LOCK_TAG(%g1, %g2, %g7)
51 /* Load and check PTE. */
52 ldxa [%g5] ASI_PHYS_USE_EC, %g5
54 sllx %g7, TSB_TAG_INVALID_BIT, %g7
55 brgez,a,pn %g5, kvmap_itlb_longpath
58 TSB_WRITE(%g1, %g5, %g6)
60 /* fallthrough to TLB load */
64 661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
66 .section .sun4v_2insn_patch, "ax"
72 /* For sun4v the ASI_ITLB_DATA_IN store and the retry
73 * instruction get nop'd out and we get here to branch
74 * to the sun4v tlb load code. The registers are setup
81 * The sun4v TLB load wants the PTE in %g3 so we fix that
84 ba,pt %xcc, sun4v_itlb_load
89 661: rdpr %pstate, %g5
90 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
91 .section .sun4v_2insn_patch, "ax"
98 ba,pt %xcc, sparc64_realfault_common
99 mov FAULT_CODE_ITLB, %g4
102 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
104 TSB_LOCK_TAG(%g1, %g2, %g7)
106 TSB_WRITE(%g1, %g5, %g6)
108 ba,pt %xcc, kvmap_itlb_load
112 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
114 TSB_LOCK_TAG(%g1, %g2, %g7)
116 TSB_WRITE(%g1, %g5, %g6)
118 ba,pt %xcc, kvmap_dtlb_load
122 kvmap_dtlb_tsb4m_load:
123 TSB_LOCK_TAG(%g1, %g2, %g7)
124 TSB_WRITE(%g1, %g5, %g6)
125 ba,pt %xcc, kvmap_dtlb_load
129 /* %g6: TAG TARGET */
130 mov TLB_TAG_ACCESS, %g4
131 ldxa [%g4] ASI_DMMU, %g4
133 /* sun4v_dtlb_miss branches here with the missing virtual
134 * address already loaded into %g4
137 brgez,pn %g4, kvmap_dtlb_nonlinear
140 #ifdef CONFIG_DEBUG_PAGEALLOC
141 /* Index through the base page size TSB even for linear
142 * mappings when using page allocation debugging.
144 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
146 /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
147 KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
149 /* TSB entry address left in %g1, lookup linear PTE.
150 * Must preserve %g1 and %g6 (TAG).
152 kvmap_dtlb_tsb4m_miss:
153 /* Clear the PAGE_OFFSET top virtual bits, shift
154 * down to get PFN, and make sure PFN is in range.
156 661: sllx %g4, 0, %g5
157 .section .page_offset_shift_patch, "ax"
161 /* Check to see if we know about valid memory at the 4MB
162 * chunk this physical address will reside within.
164 661: srlx %g5, MAX_PHYS_ADDRESS_BITS, %g2
165 .section .page_offset_shift_patch, "ax"
169 brnz,pn %g2, kvmap_dtlb_longpath
172 /* This unconditional branch and delay-slot nop gets patched
173 * by the sethi sequence once the bitmap is properly setup.
175 .globl valid_addr_bitmap_insn
176 valid_addr_bitmap_insn:
180 .globl valid_addr_bitmap_patch
181 valid_addr_bitmap_patch:
182 sethi %hi(sparc64_valid_addr_bitmap), %g7
183 or %g7, %lo(sparc64_valid_addr_bitmap), %g7
186 661: srlx %g5, ILOG2_4MB, %g2
187 .section .page_offset_shift_patch, "ax"
198 be,pn %xcc, kvmap_dtlb_longpath
200 2: sethi %hi(kpte_linear_bitmap), %g2
202 /* Get the 256MB physical address index. */
203 661: sllx %g4, 0, %g5
204 .section .page_offset_shift_patch, "ax"
208 or %g2, %lo(kpte_linear_bitmap), %g2
210 661: srlx %g5, ILOG2_256MB, %g5
211 .section .page_offset_shift_patch, "ax"
215 and %g5, (32 - 1), %g7
217 /* Divide by 32 to get the offset into the bitmask. */
222 /* kern_linear_pte_xor[(mask >> shift) & 3)] */
225 sethi %hi(kern_linear_pte_xor), %g5
227 or %g5, %lo(kern_linear_pte_xor), %g5
231 .globl kvmap_linear_patch
233 ba,pt %xcc, kvmap_dtlb_tsb4m_load
236 kvmap_dtlb_vmalloc_addr:
237 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
239 TSB_LOCK_TAG(%g1, %g2, %g7)
241 /* Load and check PTE. */
242 ldxa [%g5] ASI_PHYS_USE_EC, %g5
244 sllx %g7, TSB_TAG_INVALID_BIT, %g7
245 brgez,a,pn %g5, kvmap_dtlb_longpath
248 TSB_WRITE(%g1, %g5, %g6)
250 /* fallthrough to TLB load */
254 661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
256 .section .sun4v_2insn_patch, "ax"
262 /* For sun4v the ASI_DTLB_DATA_IN store and the retry
263 * instruction get nop'd out and we get here to branch
264 * to the sun4v tlb load code. The registers are setup
271 * The sun4v TLB load wants the PTE in %g3 so we fix that
274 ba,pt %xcc, sun4v_dtlb_load
277 #ifdef CONFIG_SPARSEMEM_VMEMMAP
281 sethi %hi(vmemmap_table), %g1
283 or %g1, %lo(vmemmap_table), %g1
284 ba,pt %xcc, kvmap_dtlb_load
288 kvmap_dtlb_nonlinear:
289 /* Catch kernel NULL pointer derefs. */
290 sethi %hi(PAGE_SIZE), %g5
292 bleu,pn %xcc, kvmap_dtlb_longpath
295 #ifdef CONFIG_SPARSEMEM_VMEMMAP
296 /* Do not use the TSB for vmemmap. */
297 mov (VMEMMAP_BASE >> 40), %g5
300 bgeu,pn %xcc, kvmap_vmemmap
304 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
307 sethi %hi(MODULES_VADDR), %g5
309 blu,pn %xcc, kvmap_dtlb_longpath
310 mov (VMALLOC_END >> 40), %g5
313 bgeu,pn %xcc, kvmap_dtlb_longpath
317 sethi %hi(LOW_OBP_ADDRESS), %g5
319 blu,pn %xcc, kvmap_dtlb_vmalloc_addr
323 blu,pn %xcc, kvmap_dtlb_obp
325 ba,pt %xcc, kvmap_dtlb_vmalloc_addr
330 661: rdpr %pstate, %g5
331 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
332 .section .sun4v_2insn_patch, "ax"
335 ldxa [%g0] ASI_SCRATCHPAD, %g5
341 661: mov TLB_TAG_ACCESS, %g4
342 ldxa [%g4] ASI_DMMU, %g5
343 .section .sun4v_2insn_patch, "ax"
345 ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
349 be,pt %xcc, sparc64_realfault_common
350 mov FAULT_CODE_DTLB, %g4
351 ba,pt %xcc, winfix_trampoline