]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* $Id: ultra.S,v 1.72 2002/02/09 19:49:31 davem Exp $ |
2 | * ultra.S: Don't expand these all over the place... | |
3 | * | |
4 | * Copyright (C) 1997, 2000 David S. Miller ([email protected]) | |
5 | */ | |
6 | ||
1da177e4 LT |
7 | #include <asm/asi.h> |
8 | #include <asm/pgtable.h> | |
9 | #include <asm/page.h> | |
10 | #include <asm/spitfire.h> | |
11 | #include <asm/mmu_context.h> | |
2ef27778 | 12 | #include <asm/mmu.h> |
1da177e4 LT |
13 | #include <asm/pil.h> |
14 | #include <asm/head.h> | |
15 | #include <asm/thread_info.h> | |
16 | #include <asm/cacheflush.h> | |
52bf082f | 17 | #include <asm/hypervisor.h> |
1da177e4 LT |
18 | |
19 | /* Basically, most of the Spitfire vs. Cheetah madness | |
20 | * has to do with the fact that Cheetah does not support | |
21 | * IMMU flushes out of the secondary context. Someone needs | |
22 | * to throw a south lake birthday party for the folks | |
23 | * in Microelectronics who refused to fix this shit. | |
24 | */ | |
25 | ||
26 | /* This file is meant to be read efficiently by the CPU, not humans. | |
27 | * Staraj sie tego nikomu nie pierdolnac... | |
28 | */ | |
29 | .text | |
30 | .align 32 | |
31 | .globl __flush_tlb_mm | |
52bf082f DM |
32 | __flush_tlb_mm: /* 18 insns */ |
33 | /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ | |
1da177e4 LT |
34 | ldxa [%o1] ASI_DMMU, %g2 |
35 | cmp %g2, %o0 | |
36 | bne,pn %icc, __spitfire_flush_tlb_mm_slow | |
37 | mov 0x50, %g3 | |
38 | stxa %g0, [%g3] ASI_DMMU_DEMAP | |
39 | stxa %g0, [%g3] ASI_IMMU_DEMAP | |
4da808c3 DM |
40 | sethi %hi(KERNBASE), %g3 |
41 | flush %g3 | |
1da177e4 | 42 | retl |
4da808c3 | 43 | nop |
1da177e4 LT |
44 | nop |
45 | nop | |
46 | nop | |
47 | nop | |
48 | nop | |
49 | nop | |
50 | nop | |
2ef27778 DM |
51 | nop |
52 | nop | |
1da177e4 LT |
53 | |
54 | .align 32 | |
55 | .globl __flush_tlb_pending | |
52bf082f | 56 | __flush_tlb_pending: /* 26 insns */ |
1da177e4 LT |
57 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ |
58 | rdpr %pstate, %g7 | |
59 | sllx %o1, 3, %o1 | |
60 | andn %g7, PSTATE_IE, %g2 | |
61 | wrpr %g2, %pstate | |
62 | mov SECONDARY_CONTEXT, %o4 | |
63 | ldxa [%o4] ASI_DMMU, %g2 | |
64 | stxa %o0, [%o4] ASI_DMMU | |
65 | 1: sub %o1, (1 << 3), %o1 | |
66 | ldx [%o2 + %o1], %o3 | |
67 | andcc %o3, 1, %g0 | |
68 | andn %o3, 1, %o3 | |
69 | be,pn %icc, 2f | |
70 | or %o3, 0x10, %o3 | |
71 | stxa %g0, [%o3] ASI_IMMU_DEMAP | |
72 | 2: stxa %g0, [%o3] ASI_DMMU_DEMAP | |
73 | membar #Sync | |
74 | brnz,pt %o1, 1b | |
75 | nop | |
76 | stxa %g2, [%o4] ASI_DMMU | |
4da808c3 DM |
77 | sethi %hi(KERNBASE), %o4 |
78 | flush %o4 | |
1da177e4 LT |
79 | retl |
80 | wrpr %g7, 0x0, %pstate | |
fef43da4 | 81 | nop |
2ef27778 DM |
82 | nop |
83 | nop | |
84 | nop | |
1da177e4 LT |
85 | |
86 | .align 32 | |
87 | .globl __flush_tlb_kernel_range | |
1daef08a | 88 | __flush_tlb_kernel_range: /* 16 insns */ |
52bf082f | 89 | /* %o0=start, %o1=end */ |
1da177e4 LT |
90 | cmp %o0, %o1 |
91 | be,pn %xcc, 2f | |
92 | sethi %hi(PAGE_SIZE), %o4 | |
93 | sub %o1, %o0, %o3 | |
94 | sub %o3, %o4, %o3 | |
95 | or %o0, 0x20, %o0 ! Nucleus | |
96 | 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP | |
97 | stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP | |
98 | membar #Sync | |
99 | brnz,pt %o3, 1b | |
100 | sub %o3, %o4, %o3 | |
4da808c3 DM |
101 | 2: sethi %hi(KERNBASE), %o3 |
102 | flush %o3 | |
103 | retl | |
104 | nop | |
52bf082f | 105 | nop |
1da177e4 LT |
106 | |
107 | __spitfire_flush_tlb_mm_slow: | |
108 | rdpr %pstate, %g1 | |
109 | wrpr %g1, PSTATE_IE, %pstate | |
110 | stxa %o0, [%o1] ASI_DMMU | |
111 | stxa %g0, [%g3] ASI_DMMU_DEMAP | |
112 | stxa %g0, [%g3] ASI_IMMU_DEMAP | |
113 | flush %g6 | |
114 | stxa %g2, [%o1] ASI_DMMU | |
4da808c3 DM |
115 | sethi %hi(KERNBASE), %o1 |
116 | flush %o1 | |
1da177e4 LT |
117 | retl |
118 | wrpr %g1, 0, %pstate | |
119 | ||
120 | /* | |
121 | * The following code flushes one page_size worth. | |
122 | */ | |
83005161 | 123 | .section .kprobes.text, "ax" |
1da177e4 LT |
124 | .align 32 |
125 | .globl __flush_icache_page | |
126 | __flush_icache_page: /* %o0 = phys_page */ | |
127 | membar #StoreStore | |
128 | srlx %o0, PAGE_SHIFT, %o0 | |
129 | sethi %uhi(PAGE_OFFSET), %g1 | |
130 | sllx %o0, PAGE_SHIFT, %o0 | |
131 | sethi %hi(PAGE_SIZE), %g2 | |
132 | sllx %g1, 32, %g1 | |
133 | add %o0, %g1, %o0 | |
134 | 1: subcc %g2, 32, %g2 | |
135 | bne,pt %icc, 1b | |
136 | flush %o0 + %g2 | |
137 | retl | |
138 | nop | |
139 | ||
140 | #ifdef DCACHE_ALIASING_POSSIBLE | |
141 | ||
142 | #if (PAGE_SHIFT != 13) | |
143 | #error only page shift of 13 is supported by dcache flush | |
144 | #endif | |
145 | ||
146 | #define DTAG_MASK 0x3 | |
147 | ||
c5bd50a9 DM |
148 | /* This routine is Spitfire specific so the hardcoded |
149 | * D-cache size and line-size are OK. | |
150 | */ | |
1da177e4 LT |
151 | .align 64 |
152 | .globl __flush_dcache_page | |
153 | __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ | |
154 | sethi %uhi(PAGE_OFFSET), %g1 | |
155 | sllx %g1, 32, %g1 | |
c5bd50a9 DM |
156 | sub %o0, %g1, %o0 ! physical address |
157 | srlx %o0, 11, %o0 ! make D-cache TAG | |
158 | sethi %hi(1 << 14), %o2 ! D-cache size | |
159 | sub %o2, (1 << 5), %o2 ! D-cache line size | |
160 | 1: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG | |
161 | andcc %o3, DTAG_MASK, %g0 ! Valid? | |
162 | be,pn %xcc, 2f ! Nope, branch | |
163 | andn %o3, DTAG_MASK, %o3 ! Clear valid bits | |
164 | cmp %o3, %o0 ! TAG match? | |
165 | bne,pt %xcc, 2f ! Nope, branch | |
166 | nop | |
167 | stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG | |
168 | membar #Sync | |
169 | 2: brnz,pt %o2, 1b | |
170 | sub %o2, (1 << 5), %o2 ! D-cache line size | |
1da177e4 LT |
171 | |
172 | /* The I-cache does not snoop local stores so we | |
173 | * better flush that too when necessary. | |
174 | */ | |
175 | brnz,pt %o1, __flush_icache_page | |
176 | sllx %o0, 11, %o0 | |
177 | retl | |
178 | nop | |
179 | ||
1da177e4 LT |
180 | #endif /* DCACHE_ALIASING_POSSIBLE */ |
181 | ||
c5bd50a9 DM |
182 | .previous |
183 | ||
2ef27778 | 184 | /* Cheetah specific versions, patched at boot time. */ |
4da808c3 | 185 | __cheetah_flush_tlb_mm: /* 19 insns */ |
1da177e4 LT |
186 | rdpr %pstate, %g7 |
187 | andn %g7, PSTATE_IE, %g2 | |
188 | wrpr %g2, 0x0, %pstate | |
189 | wrpr %g0, 1, %tl | |
190 | mov PRIMARY_CONTEXT, %o2 | |
191 | mov 0x40, %g3 | |
192 | ldxa [%o2] ASI_DMMU, %g2 | |
2ef27778 DM |
193 | srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1 |
194 | sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1 | |
195 | or %o0, %o1, %o0 /* Preserve nucleus page size fields */ | |
1da177e4 LT |
196 | stxa %o0, [%o2] ASI_DMMU |
197 | stxa %g0, [%g3] ASI_DMMU_DEMAP | |
198 | stxa %g0, [%g3] ASI_IMMU_DEMAP | |
199 | stxa %g2, [%o2] ASI_DMMU | |
4da808c3 DM |
200 | sethi %hi(KERNBASE), %o2 |
201 | flush %o2 | |
1da177e4 LT |
202 | wrpr %g0, 0, %tl |
203 | retl | |
204 | wrpr %g7, 0x0, %pstate | |
205 | ||
4da808c3 | 206 | __cheetah_flush_tlb_pending: /* 27 insns */ |
1da177e4 LT |
207 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ |
208 | rdpr %pstate, %g7 | |
209 | sllx %o1, 3, %o1 | |
210 | andn %g7, PSTATE_IE, %g2 | |
211 | wrpr %g2, 0x0, %pstate | |
212 | wrpr %g0, 1, %tl | |
213 | mov PRIMARY_CONTEXT, %o4 | |
214 | ldxa [%o4] ASI_DMMU, %g2 | |
2ef27778 DM |
215 | srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3 |
216 | sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3 | |
217 | or %o0, %o3, %o0 /* Preserve nucleus page size fields */ | |
1da177e4 LT |
218 | stxa %o0, [%o4] ASI_DMMU |
219 | 1: sub %o1, (1 << 3), %o1 | |
220 | ldx [%o2 + %o1], %o3 | |
221 | andcc %o3, 1, %g0 | |
222 | be,pn %icc, 2f | |
223 | andn %o3, 1, %o3 | |
224 | stxa %g0, [%o3] ASI_IMMU_DEMAP | |
225 | 2: stxa %g0, [%o3] ASI_DMMU_DEMAP | |
b445e26c | 226 | membar #Sync |
1da177e4 | 227 | brnz,pt %o1, 1b |
b445e26c | 228 | nop |
1da177e4 | 229 | stxa %g2, [%o4] ASI_DMMU |
4da808c3 DM |
230 | sethi %hi(KERNBASE), %o4 |
231 | flush %o4 | |
1da177e4 LT |
232 | wrpr %g0, 0, %tl |
233 | retl | |
234 | wrpr %g7, 0x0, %pstate | |
235 | ||
236 | #ifdef DCACHE_ALIASING_POSSIBLE | |
c5bd50a9 | 237 | __cheetah_flush_dcache_page: /* 11 insns */ |
1da177e4 LT |
238 | sethi %uhi(PAGE_OFFSET), %g1 |
239 | sllx %g1, 32, %g1 | |
240 | sub %o0, %g1, %o0 | |
241 | sethi %hi(PAGE_SIZE), %o4 | |
242 | 1: subcc %o4, (1 << 5), %o4 | |
243 | stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE | |
244 | membar #Sync | |
245 | bne,pt %icc, 1b | |
246 | nop | |
247 | retl /* I-cache flush never needed on Cheetah, see callers. */ | |
248 | nop | |
249 | #endif /* DCACHE_ALIASING_POSSIBLE */ | |
250 | ||
52bf082f | 251 | /* Hypervisor specific versions, patched at boot time. */ |
2a3a5f5d DM |
252 | __hypervisor_tlb_tl0_error: |
253 | save %sp, -192, %sp | |
254 | mov %i0, %o0 | |
255 | call hypervisor_tlbop_error | |
256 | mov %i1, %o1 | |
257 | ret | |
258 | restore | |
259 | ||
260 | __hypervisor_flush_tlb_mm: /* 10 insns */ | |
52bf082f DM |
261 | mov %o0, %o2 /* ARG2: mmu context */ |
262 | mov 0, %o0 /* ARG0: CPU lists unimplemented */ | |
263 | mov 0, %o1 /* ARG1: CPU lists unimplemented */ | |
264 | mov HV_MMU_ALL, %o3 /* ARG3: flags */ | |
265 | mov HV_FAST_MMU_DEMAP_CTX, %o5 | |
266 | ta HV_FAST_TRAP | |
2a3a5f5d DM |
267 | brnz,pn %o0, __hypervisor_tlb_tl0_error |
268 | mov HV_FAST_MMU_DEMAP_CTX, %o1 | |
52bf082f DM |
269 | retl |
270 | nop | |
271 | ||
2a3a5f5d | 272 | __hypervisor_flush_tlb_pending: /* 16 insns */ |
52bf082f DM |
273 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ |
274 | sllx %o1, 3, %g1 | |
275 | mov %o2, %g2 | |
276 | mov %o0, %g3 | |
277 | 1: sub %g1, (1 << 3), %g1 | |
278 | ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */ | |
279 | mov %g3, %o1 /* ARG1: mmu context */ | |
2a3a5f5d DM |
280 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ |
281 | srlx %o0, PAGE_SHIFT, %o0 | |
282 | sllx %o0, PAGE_SHIFT, %o0 | |
52bf082f | 283 | ta HV_MMU_UNMAP_ADDR_TRAP |
2a3a5f5d DM |
284 | brnz,pn %o0, __hypervisor_tlb_tl0_error |
285 | mov HV_MMU_UNMAP_ADDR_TRAP, %o1 | |
52bf082f DM |
286 | brnz,pt %g1, 1b |
287 | nop | |
288 | retl | |
289 | nop | |
290 | ||
2a3a5f5d | 291 | __hypervisor_flush_tlb_kernel_range: /* 16 insns */ |
52bf082f DM |
292 | /* %o0=start, %o1=end */ |
293 | cmp %o0, %o1 | |
294 | be,pn %xcc, 2f | |
295 | sethi %hi(PAGE_SIZE), %g3 | |
296 | mov %o0, %g1 | |
297 | sub %o1, %g1, %g2 | |
298 | sub %g2, %g3, %g2 | |
299 | 1: add %g1, %g2, %o0 /* ARG0: virtual address */ | |
300 | mov 0, %o1 /* ARG1: mmu context */ | |
301 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ | |
302 | ta HV_MMU_UNMAP_ADDR_TRAP | |
2a3a5f5d DM |
303 | brnz,pn %o0, __hypervisor_tlb_tl0_error |
304 | mov HV_MMU_UNMAP_ADDR_TRAP, %o1 | |
52bf082f DM |
305 | brnz,pt %g2, 1b |
306 | sub %g2, %g3, %g2 | |
307 | 2: retl | |
308 | nop | |
309 | ||
310 | #ifdef DCACHE_ALIASING_POSSIBLE | |
311 | /* XXX Niagara and friends have an 8K cache, so no aliasing is | |
312 | * XXX possible, but nothing explicit in the Hypervisor API | |
313 | * XXX guarantees this. | |
314 | */ | |
315 | __hypervisor_flush_dcache_page: /* 2 insns */ | |
316 | retl | |
317 | nop | |
318 | #endif | |
319 | ||
320 | tlb_patch_one: | |
1da177e4 LT |
321 | 1: lduw [%o1], %g1 |
322 | stw %g1, [%o0] | |
323 | flush %o0 | |
324 | subcc %o2, 1, %o2 | |
325 | add %o1, 4, %o1 | |
326 | bne,pt %icc, 1b | |
327 | add %o0, 4, %o0 | |
328 | retl | |
329 | nop | |
330 | ||
331 | .globl cheetah_patch_cachetlbops | |
332 | cheetah_patch_cachetlbops: | |
333 | save %sp, -128, %sp | |
334 | ||
335 | sethi %hi(__flush_tlb_mm), %o0 | |
336 | or %o0, %lo(__flush_tlb_mm), %o0 | |
337 | sethi %hi(__cheetah_flush_tlb_mm), %o1 | |
338 | or %o1, %lo(__cheetah_flush_tlb_mm), %o1 | |
52bf082f | 339 | call tlb_patch_one |
4da808c3 | 340 | mov 19, %o2 |
1da177e4 LT |
341 | |
342 | sethi %hi(__flush_tlb_pending), %o0 | |
343 | or %o0, %lo(__flush_tlb_pending), %o0 | |
344 | sethi %hi(__cheetah_flush_tlb_pending), %o1 | |
345 | or %o1, %lo(__cheetah_flush_tlb_pending), %o1 | |
52bf082f | 346 | call tlb_patch_one |
4da808c3 | 347 | mov 27, %o2 |
1da177e4 LT |
348 | |
349 | #ifdef DCACHE_ALIASING_POSSIBLE | |
350 | sethi %hi(__flush_dcache_page), %o0 | |
351 | or %o0, %lo(__flush_dcache_page), %o0 | |
c5bd50a9 DM |
352 | sethi %hi(__cheetah_flush_dcache_page), %o1 |
353 | or %o1, %lo(__cheetah_flush_dcache_page), %o1 | |
52bf082f | 354 | call tlb_patch_one |
1da177e4 LT |
355 | mov 11, %o2 |
356 | #endif /* DCACHE_ALIASING_POSSIBLE */ | |
357 | ||
358 | ret | |
359 | restore | |
360 | ||
361 | #ifdef CONFIG_SMP | |
362 | /* These are all called by the slaves of a cross call, at | |
363 | * trap level 1, with interrupts fully disabled. | |
364 | * | |
365 | * Register usage: | |
366 | * %g5 mm->context (all tlb flushes) | |
367 | * %g1 address arg 1 (tlb page and range flushes) | |
368 | * %g7 address arg 2 (tlb range flush only) | |
369 | * | |
56fb4df6 DM |
370 | * %g6 scratch 1 |
371 | * %g2 scratch 2 | |
372 | * %g3 scratch 3 | |
373 | * %g4 scratch 4 | |
1da177e4 LT |
374 | */ |
375 | .align 32 | |
376 | .globl xcall_flush_tlb_mm | |
2a3a5f5d | 377 | xcall_flush_tlb_mm: /* 21 insns */ |
1da177e4 | 378 | mov PRIMARY_CONTEXT, %g2 |
1da177e4 | 379 | ldxa [%g2] ASI_DMMU, %g3 |
2ef27778 DM |
380 | srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 |
381 | sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4 | |
382 | or %g5, %g4, %g5 /* Preserve nucleus page size fields */ | |
1da177e4 | 383 | stxa %g5, [%g2] ASI_DMMU |
2ef27778 | 384 | mov 0x40, %g4 |
1da177e4 LT |
385 | stxa %g0, [%g4] ASI_DMMU_DEMAP |
386 | stxa %g0, [%g4] ASI_IMMU_DEMAP | |
387 | stxa %g3, [%g2] ASI_DMMU | |
388 | retry | |
52bf082f DM |
389 | nop |
390 | nop | |
391 | nop | |
392 | nop | |
393 | nop | |
394 | nop | |
395 | nop | |
2a3a5f5d DM |
396 | nop |
397 | nop | |
398 | nop | |
1da177e4 LT |
399 | |
400 | .globl xcall_flush_tlb_pending | |
2a3a5f5d | 401 | xcall_flush_tlb_pending: /* 21 insns */ |
1da177e4 LT |
402 | /* %g5=context, %g1=nr, %g7=vaddrs[] */ |
403 | sllx %g1, 3, %g1 | |
404 | mov PRIMARY_CONTEXT, %g4 | |
405 | ldxa [%g4] ASI_DMMU, %g2 | |
2ef27778 DM |
406 | srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4 |
407 | sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4 | |
408 | or %g5, %g4, %g5 | |
409 | mov PRIMARY_CONTEXT, %g4 | |
1da177e4 LT |
410 | stxa %g5, [%g4] ASI_DMMU |
411 | 1: sub %g1, (1 << 3), %g1 | |
412 | ldx [%g7 + %g1], %g5 | |
413 | andcc %g5, 0x1, %g0 | |
414 | be,pn %icc, 2f | |
415 | ||
416 | andn %g5, 0x1, %g5 | |
417 | stxa %g0, [%g5] ASI_IMMU_DEMAP | |
418 | 2: stxa %g0, [%g5] ASI_DMMU_DEMAP | |
419 | membar #Sync | |
420 | brnz,pt %g1, 1b | |
421 | nop | |
422 | stxa %g2, [%g4] ASI_DMMU | |
423 | retry | |
2a3a5f5d | 424 | nop |
1da177e4 LT |
425 | |
426 | .globl xcall_flush_tlb_kernel_range | |
2a3a5f5d | 427 | xcall_flush_tlb_kernel_range: /* 25 insns */ |
1da177e4 LT |
428 | sethi %hi(PAGE_SIZE - 1), %g2 |
429 | or %g2, %lo(PAGE_SIZE - 1), %g2 | |
430 | andn %g1, %g2, %g1 | |
431 | andn %g7, %g2, %g7 | |
432 | sub %g7, %g1, %g3 | |
433 | add %g2, 1, %g2 | |
434 | sub %g3, %g2, %g3 | |
435 | or %g1, 0x20, %g1 ! Nucleus | |
436 | 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP | |
437 | stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP | |
438 | membar #Sync | |
439 | brnz,pt %g3, 1b | |
440 | sub %g3, %g2, %g3 | |
441 | retry | |
442 | nop | |
443 | nop | |
52bf082f DM |
444 | nop |
445 | nop | |
446 | nop | |
447 | nop | |
448 | nop | |
449 | nop | |
2a3a5f5d DM |
450 | nop |
451 | nop | |
452 | nop | |
1da177e4 LT |
453 | |
454 | /* This runs in a very controlled environment, so we do | |
455 | * not need to worry about BH races etc. | |
456 | */ | |
457 | .globl xcall_sync_tick | |
458 | xcall_sync_tick: | |
45fec05f DM |
459 | |
460 | 661: rdpr %pstate, %g2 | |
1da177e4 | 461 | wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate |
df7d6aec | 462 | .section .sun4v_2insn_patch, "ax" |
45fec05f DM |
463 | .word 661b |
464 | nop | |
465 | nop | |
466 | .previous | |
467 | ||
1da177e4 LT |
468 | rdpr %pil, %g2 |
469 | wrpr %g0, 15, %pil | |
470 | sethi %hi(109f), %g7 | |
471 | b,pt %xcc, etrap_irq | |
472 | 109: or %g7, %lo(109b), %g7 | |
10e26723 DM |
473 | #ifdef CONFIG_TRACE_IRQFLAGS |
474 | call trace_hardirqs_off | |
475 | nop | |
476 | #endif | |
1da177e4 LT |
477 | call smp_synchronize_tick_client |
478 | nop | |
479 | clr %l6 | |
480 | b rtrap_xcall | |
481 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 | |
482 | ||
483 | /* NOTE: This is SPECIAL!! We do etrap/rtrap however | |
484 | * we choose to deal with the "BH's run with | |
485 | * %pil==15" problem (described in asm/pil.h) | |
486 | * by just invoking rtrap directly past where | |
487 | * BH's are checked for. | |
488 | * | |
489 | * We do it like this because we do not want %pil==15 | |
490 | * lockups to prevent regs being reported. | |
491 | */ | |
492 | .globl xcall_report_regs | |
493 | xcall_report_regs: | |
45fec05f DM |
494 | |
495 | 661: rdpr %pstate, %g2 | |
1da177e4 | 496 | wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate |
df7d6aec | 497 | .section .sun4v_2insn_patch, "ax" |
45fec05f DM |
498 | .word 661b |
499 | nop | |
500 | nop | |
501 | .previous | |
502 | ||
1da177e4 LT |
503 | rdpr %pil, %g2 |
504 | wrpr %g0, 15, %pil | |
505 | sethi %hi(109f), %g7 | |
506 | b,pt %xcc, etrap_irq | |
507 | 109: or %g7, %lo(109b), %g7 | |
10e26723 DM |
508 | #ifdef CONFIG_TRACE_IRQFLAGS |
509 | call trace_hardirqs_off | |
510 | nop | |
511 | #endif | |
1da177e4 LT |
512 | call __show_regs |
513 | add %sp, PTREGS_OFF, %o0 | |
514 | clr %l6 | |
515 | /* Has to be a non-v9 branch due to the large distance. */ | |
516 | b rtrap_xcall | |
517 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 | |
518 | ||
519 | #ifdef DCACHE_ALIASING_POSSIBLE | |
520 | .align 32 | |
521 | .globl xcall_flush_dcache_page_cheetah | |
522 | xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */ | |
523 | sethi %hi(PAGE_SIZE), %g3 | |
524 | 1: subcc %g3, (1 << 5), %g3 | |
525 | stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE | |
526 | membar #Sync | |
527 | bne,pt %icc, 1b | |
528 | nop | |
529 | retry | |
530 | nop | |
531 | #endif /* DCACHE_ALIASING_POSSIBLE */ | |
532 | ||
533 | .globl xcall_flush_dcache_page_spitfire | |
534 | xcall_flush_dcache_page_spitfire: /* %g1 == physical page address | |
535 | %g7 == kernel page virtual address | |
536 | %g5 == (page->mapping != NULL) */ | |
537 | #ifdef DCACHE_ALIASING_POSSIBLE | |
538 | srlx %g1, (13 - 2), %g1 ! Form tag comparitor | |
539 | sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K | |
540 | sub %g3, (1 << 5), %g3 ! D$ linesize == 32 | |
541 | 1: ldxa [%g3] ASI_DCACHE_TAG, %g2 | |
542 | andcc %g2, 0x3, %g0 | |
543 | be,pn %xcc, 2f | |
544 | andn %g2, 0x3, %g2 | |
545 | cmp %g2, %g1 | |
546 | ||
547 | bne,pt %xcc, 2f | |
548 | nop | |
549 | stxa %g0, [%g3] ASI_DCACHE_TAG | |
550 | membar #Sync | |
551 | 2: cmp %g3, 0 | |
552 | bne,pt %xcc, 1b | |
553 | sub %g3, (1 << 5), %g3 | |
554 | ||
555 | brz,pn %g5, 2f | |
556 | #endif /* DCACHE_ALIASING_POSSIBLE */ | |
557 | sethi %hi(PAGE_SIZE), %g3 | |
558 | ||
559 | 1: flush %g7 | |
560 | subcc %g3, (1 << 5), %g3 | |
561 | bne,pt %icc, 1b | |
562 | add %g7, (1 << 5), %g7 | |
563 | ||
564 | 2: retry | |
565 | nop | |
566 | nop | |
567 | ||
2a3a5f5d DM |
568 | /* %g5: error |
569 | * %g6: tlb op | |
570 | */ | |
571 | __hypervisor_tlb_xcall_error: | |
572 | mov %g5, %g4 | |
573 | mov %g6, %g5 | |
574 | ba,pt %xcc, etrap | |
575 | rd %pc, %g7 | |
576 | mov %l4, %o0 | |
577 | call hypervisor_tlbop_error_xcall | |
578 | mov %l5, %o1 | |
579 | ba,a,pt %xcc, rtrap_clr_l6 | |
580 | ||
52bf082f | 581 | .globl __hypervisor_xcall_flush_tlb_mm |
2a3a5f5d | 582 | __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ |
52bf082f DM |
583 | /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */ |
584 | mov %o0, %g2 | |
585 | mov %o1, %g3 | |
586 | mov %o2, %g4 | |
587 | mov %o3, %g1 | |
588 | mov %o5, %g7 | |
589 | clr %o0 /* ARG0: CPU lists unimplemented */ | |
590 | clr %o1 /* ARG1: CPU lists unimplemented */ | |
591 | mov %g5, %o2 /* ARG2: mmu context */ | |
592 | mov HV_MMU_ALL, %o3 /* ARG3: flags */ | |
593 | mov HV_FAST_MMU_DEMAP_CTX, %o5 | |
594 | ta HV_FAST_TRAP | |
2a3a5f5d DM |
595 | mov HV_FAST_MMU_DEMAP_CTX, %g6 |
596 | brnz,pn %o0, __hypervisor_tlb_xcall_error | |
597 | mov %o0, %g5 | |
52bf082f DM |
598 | mov %g2, %o0 |
599 | mov %g3, %o1 | |
600 | mov %g4, %o2 | |
601 | mov %g1, %o3 | |
602 | mov %g7, %o5 | |
603 | membar #Sync | |
604 | retry | |
605 | ||
606 | .globl __hypervisor_xcall_flush_tlb_pending | |
2a3a5f5d DM |
607 | __hypervisor_xcall_flush_tlb_pending: /* 21 insns */ |
608 | /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ | |
52bf082f DM |
609 | sllx %g1, 3, %g1 |
610 | mov %o0, %g2 | |
611 | mov %o1, %g3 | |
612 | mov %o2, %g4 | |
613 | 1: sub %g1, (1 << 3), %g1 | |
614 | ldx [%g7 + %g1], %o0 /* ARG0: virtual address */ | |
615 | mov %g5, %o1 /* ARG1: mmu context */ | |
2a3a5f5d DM |
616 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ |
617 | srlx %o0, PAGE_SHIFT, %o0 | |
618 | sllx %o0, PAGE_SHIFT, %o0 | |
52bf082f | 619 | ta HV_MMU_UNMAP_ADDR_TRAP |
2a3a5f5d DM |
620 | mov HV_MMU_UNMAP_ADDR_TRAP, %g6 |
621 | brnz,a,pn %o0, __hypervisor_tlb_xcall_error | |
622 | mov %o0, %g5 | |
52bf082f DM |
623 | brnz,pt %g1, 1b |
624 | nop | |
625 | mov %g2, %o0 | |
626 | mov %g3, %o1 | |
627 | mov %g4, %o2 | |
628 | membar #Sync | |
629 | retry | |
630 | ||
631 | .globl __hypervisor_xcall_flush_tlb_kernel_range | |
2a3a5f5d DM |
632 | __hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */ |
633 | /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */ | |
52bf082f DM |
634 | sethi %hi(PAGE_SIZE - 1), %g2 |
635 | or %g2, %lo(PAGE_SIZE - 1), %g2 | |
636 | andn %g1, %g2, %g1 | |
637 | andn %g7, %g2, %g7 | |
638 | sub %g7, %g1, %g3 | |
639 | add %g2, 1, %g2 | |
640 | sub %g3, %g2, %g3 | |
641 | mov %o0, %g2 | |
642 | mov %o1, %g4 | |
2a3a5f5d | 643 | mov %o2, %g7 |
52bf082f DM |
644 | 1: add %g1, %g3, %o0 /* ARG0: virtual address */ |
645 | mov 0, %o1 /* ARG1: mmu context */ | |
646 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ | |
647 | ta HV_MMU_UNMAP_ADDR_TRAP | |
2a3a5f5d DM |
648 | mov HV_MMU_UNMAP_ADDR_TRAP, %g6 |
649 | brnz,pn %o0, __hypervisor_tlb_xcall_error | |
650 | mov %o0, %g5 | |
52bf082f DM |
651 | sethi %hi(PAGE_SIZE), %o2 |
652 | brnz,pt %g3, 1b | |
653 | sub %g3, %o2, %g3 | |
654 | mov %g2, %o0 | |
655 | mov %g4, %o1 | |
2a3a5f5d | 656 | mov %g7, %o2 |
52bf082f DM |
657 | membar #Sync |
658 | retry | |
659 | ||
1da177e4 LT |
660 | /* These just get rescheduled to PIL vectors. */ |
661 | .globl xcall_call_function | |
662 | xcall_call_function: | |
663 | wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint | |
664 | retry | |
665 | ||
666 | .globl xcall_receive_signal | |
667 | xcall_receive_signal: | |
668 | wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint | |
669 | retry | |
670 | ||
671 | .globl xcall_capture | |
672 | xcall_capture: | |
673 | wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint | |
674 | retry | |
675 | ||
ee29074d DM |
676 | .globl xcall_new_mmu_context_version |
677 | xcall_new_mmu_context_version: | |
678 | wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint | |
679 | retry | |
680 | ||
1da177e4 | 681 | #endif /* CONFIG_SMP */ |
52bf082f DM |
682 | |
683 | ||
684 | .globl hypervisor_patch_cachetlbops | |
685 | hypervisor_patch_cachetlbops: | |
686 | save %sp, -128, %sp | |
687 | ||
688 | sethi %hi(__flush_tlb_mm), %o0 | |
689 | or %o0, %lo(__flush_tlb_mm), %o0 | |
690 | sethi %hi(__hypervisor_flush_tlb_mm), %o1 | |
691 | or %o1, %lo(__hypervisor_flush_tlb_mm), %o1 | |
692 | call tlb_patch_one | |
2a3a5f5d | 693 | mov 10, %o2 |
52bf082f DM |
694 | |
695 | sethi %hi(__flush_tlb_pending), %o0 | |
696 | or %o0, %lo(__flush_tlb_pending), %o0 | |
697 | sethi %hi(__hypervisor_flush_tlb_pending), %o1 | |
698 | or %o1, %lo(__hypervisor_flush_tlb_pending), %o1 | |
699 | call tlb_patch_one | |
2a3a5f5d | 700 | mov 16, %o2 |
52bf082f DM |
701 | |
702 | sethi %hi(__flush_tlb_kernel_range), %o0 | |
703 | or %o0, %lo(__flush_tlb_kernel_range), %o0 | |
704 | sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1 | |
705 | or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1 | |
706 | call tlb_patch_one | |
2a3a5f5d | 707 | mov 16, %o2 |
52bf082f DM |
708 | |
709 | #ifdef DCACHE_ALIASING_POSSIBLE | |
710 | sethi %hi(__flush_dcache_page), %o0 | |
711 | or %o0, %lo(__flush_dcache_page), %o0 | |
712 | sethi %hi(__hypervisor_flush_dcache_page), %o1 | |
713 | or %o1, %lo(__hypervisor_flush_dcache_page), %o1 | |
714 | call tlb_patch_one | |
715 | mov 2, %o2 | |
716 | #endif /* DCACHE_ALIASING_POSSIBLE */ | |
717 | ||
718 | #ifdef CONFIG_SMP | |
719 | sethi %hi(xcall_flush_tlb_mm), %o0 | |
720 | or %o0, %lo(xcall_flush_tlb_mm), %o0 | |
721 | sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1 | |
722 | or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1 | |
723 | call tlb_patch_one | |
2a3a5f5d | 724 | mov 21, %o2 |
52bf082f DM |
725 | |
726 | sethi %hi(xcall_flush_tlb_pending), %o0 | |
727 | or %o0, %lo(xcall_flush_tlb_pending), %o0 | |
728 | sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 | |
729 | or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 | |
730 | call tlb_patch_one | |
2a3a5f5d | 731 | mov 21, %o2 |
52bf082f DM |
732 | |
733 | sethi %hi(xcall_flush_tlb_kernel_range), %o0 | |
734 | or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 | |
735 | sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1 | |
736 | or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1 | |
737 | call tlb_patch_one | |
2a3a5f5d | 738 | mov 25, %o2 |
52bf082f DM |
739 | #endif /* CONFIG_SMP */ |
740 | ||
741 | ret | |
742 | restore |