]>
Commit | Line | Data |
---|---|---|
55c7401d MZ |
1 | /* |
2 | * Copyright (C) 2012,2013 - ARM Ltd | |
3 | * Author: Marc Zyngier <[email protected]> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include <linux/linkage.h> | |
19 | #include <linux/irqchip/arm-gic.h> | |
20 | ||
21 | #include <asm/assembler.h> | |
22 | #include <asm/memory.h> | |
23 | #include <asm/asm-offsets.h> | |
24 | #include <asm/fpsimdmacros.h> | |
25 | #include <asm/kvm.h> | |
26 | #include <asm/kvm_asm.h> | |
27 | #include <asm/kvm_arm.h> | |
28 | #include <asm/kvm_mmu.h> | |
29 | ||
30 | #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) | |
31 | #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) | |
32 | #define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x) | |
33 | #define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x) | |
34 | ||
35 | .text | |
36 | .pushsection .hyp.text, "ax" | |
37 | .align PAGE_SHIFT | |
38 | ||
39 | __kvm_hyp_code_start: | |
40 | .globl __kvm_hyp_code_start | |
41 | ||
42 | .macro save_common_regs | |
43 | // x2: base address for cpu context | |
44 | // x3: tmp register | |
45 | ||
46 | add x3, x2, #CPU_XREG_OFFSET(19) | |
47 | stp x19, x20, [x3] | |
48 | stp x21, x22, [x3, #16] | |
49 | stp x23, x24, [x3, #32] | |
50 | stp x25, x26, [x3, #48] | |
51 | stp x27, x28, [x3, #64] | |
52 | stp x29, lr, [x3, #80] | |
53 | ||
54 | mrs x19, sp_el0 | |
55 | mrs x20, elr_el2 // EL1 PC | |
56 | mrs x21, spsr_el2 // EL1 pstate | |
57 | ||
58 | stp x19, x20, [x3, #96] | |
59 | str x21, [x3, #112] | |
60 | ||
61 | mrs x22, sp_el1 | |
62 | mrs x23, elr_el1 | |
63 | mrs x24, spsr_el1 | |
64 | ||
65 | str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] | |
66 | str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)] | |
67 | str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)] | |
68 | .endm | |
69 | ||
70 | .macro restore_common_regs | |
71 | // x2: base address for cpu context | |
72 | // x3: tmp register | |
73 | ||
74 | ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] | |
75 | ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)] | |
76 | ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)] | |
77 | ||
78 | msr sp_el1, x22 | |
79 | msr elr_el1, x23 | |
80 | msr spsr_el1, x24 | |
81 | ||
82 | add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0 | |
83 | ldp x19, x20, [x3] | |
84 | ldr x21, [x3, #16] | |
85 | ||
86 | msr sp_el0, x19 | |
87 | msr elr_el2, x20 // EL1 PC | |
88 | msr spsr_el2, x21 // EL1 pstate | |
89 | ||
90 | add x3, x2, #CPU_XREG_OFFSET(19) | |
91 | ldp x19, x20, [x3] | |
92 | ldp x21, x22, [x3, #16] | |
93 | ldp x23, x24, [x3, #32] | |
94 | ldp x25, x26, [x3, #48] | |
95 | ldp x27, x28, [x3, #64] | |
96 | ldp x29, lr, [x3, #80] | |
97 | .endm | |
98 | ||
99 | .macro save_host_regs | |
100 | save_common_regs | |
101 | .endm | |
102 | ||
103 | .macro restore_host_regs | |
104 | restore_common_regs | |
105 | .endm | |
106 | ||
107 | .macro save_fpsimd | |
108 | // x2: cpu context address | |
109 | // x3, x4: tmp regs | |
110 | add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) | |
111 | fpsimd_save x3, 4 | |
112 | .endm | |
113 | ||
114 | .macro restore_fpsimd | |
115 | // x2: cpu context address | |
116 | // x3, x4: tmp regs | |
117 | add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) | |
118 | fpsimd_restore x3, 4 | |
119 | .endm | |
120 | ||
121 | .macro save_guest_regs | |
122 | // x0 is the vcpu address | |
123 | // x1 is the return code, do not corrupt! | |
124 | // x2 is the cpu context | |
125 | // x3 is a tmp register | |
126 | // Guest's x0-x3 are on the stack | |
127 | ||
128 | // Compute base to save registers | |
129 | add x3, x2, #CPU_XREG_OFFSET(4) | |
130 | stp x4, x5, [x3] | |
131 | stp x6, x7, [x3, #16] | |
132 | stp x8, x9, [x3, #32] | |
133 | stp x10, x11, [x3, #48] | |
134 | stp x12, x13, [x3, #64] | |
135 | stp x14, x15, [x3, #80] | |
136 | stp x16, x17, [x3, #96] | |
137 | str x18, [x3, #112] | |
138 | ||
139 | pop x6, x7 // x2, x3 | |
140 | pop x4, x5 // x0, x1 | |
141 | ||
142 | add x3, x2, #CPU_XREG_OFFSET(0) | |
143 | stp x4, x5, [x3] | |
144 | stp x6, x7, [x3, #16] | |
145 | ||
146 | save_common_regs | |
147 | .endm | |
148 | ||
149 | .macro restore_guest_regs | |
150 | // x0 is the vcpu address. | |
151 | // x2 is the cpu context | |
152 | // x3 is a tmp register | |
153 | ||
154 | // Prepare x0-x3 for later restore | |
155 | add x3, x2, #CPU_XREG_OFFSET(0) | |
156 | ldp x4, x5, [x3] | |
157 | ldp x6, x7, [x3, #16] | |
158 | push x4, x5 // Push x0-x3 on the stack | |
159 | push x6, x7 | |
160 | ||
161 | // x4-x18 | |
162 | ldp x4, x5, [x3, #32] | |
163 | ldp x6, x7, [x3, #48] | |
164 | ldp x8, x9, [x3, #64] | |
165 | ldp x10, x11, [x3, #80] | |
166 | ldp x12, x13, [x3, #96] | |
167 | ldp x14, x15, [x3, #112] | |
168 | ldp x16, x17, [x3, #128] | |
169 | ldr x18, [x3, #144] | |
170 | ||
171 | // x19-x29, lr, sp*, elr*, spsr* | |
172 | restore_common_regs | |
173 | ||
174 | // Last bits of the 64bit state | |
175 | pop x2, x3 | |
176 | pop x0, x1 | |
177 | ||
178 | // Do not touch any register after this! | |
179 | .endm | |
180 | ||
181 | /* | |
182 | * Macros to perform system register save/restore. | |
183 | * | |
184 | * Ordering here is absolutely critical, and must be kept consistent | |
185 | * in {save,restore}_sysregs, {save,restore}_guest_32bit_state, | |
186 | * and in kvm_asm.h. | |
187 | * | |
188 | * In other words, don't touch any of these unless you know what | |
189 | * you are doing. | |
190 | */ | |
191 | .macro save_sysregs | |
192 | // x2: base address for cpu context | |
193 | // x3: tmp register | |
194 | ||
195 | add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1) | |
196 | ||
197 | mrs x4, vmpidr_el2 | |
198 | mrs x5, csselr_el1 | |
199 | mrs x6, sctlr_el1 | |
200 | mrs x7, actlr_el1 | |
201 | mrs x8, cpacr_el1 | |
202 | mrs x9, ttbr0_el1 | |
203 | mrs x10, ttbr1_el1 | |
204 | mrs x11, tcr_el1 | |
205 | mrs x12, esr_el1 | |
206 | mrs x13, afsr0_el1 | |
207 | mrs x14, afsr1_el1 | |
208 | mrs x15, far_el1 | |
209 | mrs x16, mair_el1 | |
210 | mrs x17, vbar_el1 | |
211 | mrs x18, contextidr_el1 | |
212 | mrs x19, tpidr_el0 | |
213 | mrs x20, tpidrro_el0 | |
214 | mrs x21, tpidr_el1 | |
215 | mrs x22, amair_el1 | |
216 | mrs x23, cntkctl_el1 | |
1bbd8054 | 217 | mrs x24, par_el1 |
55c7401d MZ |
218 | |
219 | stp x4, x5, [x3] | |
220 | stp x6, x7, [x3, #16] | |
221 | stp x8, x9, [x3, #32] | |
222 | stp x10, x11, [x3, #48] | |
223 | stp x12, x13, [x3, #64] | |
224 | stp x14, x15, [x3, #80] | |
225 | stp x16, x17, [x3, #96] | |
226 | stp x18, x19, [x3, #112] | |
227 | stp x20, x21, [x3, #128] | |
228 | stp x22, x23, [x3, #144] | |
1bbd8054 | 229 | str x24, [x3, #160] |
55c7401d MZ |
230 | .endm |
231 | ||
232 | .macro restore_sysregs | |
233 | // x2: base address for cpu context | |
234 | // x3: tmp register | |
235 | ||
236 | add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1) | |
237 | ||
238 | ldp x4, x5, [x3] | |
239 | ldp x6, x7, [x3, #16] | |
240 | ldp x8, x9, [x3, #32] | |
241 | ldp x10, x11, [x3, #48] | |
242 | ldp x12, x13, [x3, #64] | |
243 | ldp x14, x15, [x3, #80] | |
244 | ldp x16, x17, [x3, #96] | |
245 | ldp x18, x19, [x3, #112] | |
246 | ldp x20, x21, [x3, #128] | |
247 | ldp x22, x23, [x3, #144] | |
1bbd8054 | 248 | ldr x24, [x3, #160] |
55c7401d MZ |
249 | |
250 | msr vmpidr_el2, x4 | |
251 | msr csselr_el1, x5 | |
252 | msr sctlr_el1, x6 | |
253 | msr actlr_el1, x7 | |
254 | msr cpacr_el1, x8 | |
255 | msr ttbr0_el1, x9 | |
256 | msr ttbr1_el1, x10 | |
257 | msr tcr_el1, x11 | |
258 | msr esr_el1, x12 | |
259 | msr afsr0_el1, x13 | |
260 | msr afsr1_el1, x14 | |
261 | msr far_el1, x15 | |
262 | msr mair_el1, x16 | |
263 | msr vbar_el1, x17 | |
264 | msr contextidr_el1, x18 | |
265 | msr tpidr_el0, x19 | |
266 | msr tpidrro_el0, x20 | |
267 | msr tpidr_el1, x21 | |
268 | msr amair_el1, x22 | |
269 | msr cntkctl_el1, x23 | |
1bbd8054 | 270 | msr par_el1, x24 |
55c7401d MZ |
271 | .endm |
272 | ||
b4afad06 MZ |
273 | .macro skip_32bit_state tmp, target |
274 | // Skip 32bit state if not needed | |
275 | mrs \tmp, hcr_el2 | |
276 | tbnz \tmp, #HCR_RW_SHIFT, \target | |
277 | .endm | |
278 | ||
279 | .macro skip_tee_state tmp, target | |
280 | // Skip ThumbEE state if not needed | |
281 | mrs \tmp, id_pfr0_el1 | |
282 | tbz \tmp, #12, \target | |
283 | .endm | |
284 | ||
285 | .macro save_guest_32bit_state | |
286 | skip_32bit_state x3, 1f | |
287 | ||
288 | add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT) | |
289 | mrs x4, spsr_abt | |
290 | mrs x5, spsr_und | |
291 | mrs x6, spsr_irq | |
292 | mrs x7, spsr_fiq | |
293 | stp x4, x5, [x3] | |
294 | stp x6, x7, [x3, #16] | |
295 | ||
296 | add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) | |
297 | mrs x4, dacr32_el2 | |
298 | mrs x5, ifsr32_el2 | |
299 | mrs x6, fpexc32_el2 | |
300 | mrs x7, dbgvcr32_el2 | |
301 | stp x4, x5, [x3] | |
302 | stp x6, x7, [x3, #16] | |
303 | ||
304 | skip_tee_state x8, 1f | |
305 | ||
306 | add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1) | |
307 | mrs x4, teecr32_el1 | |
308 | mrs x5, teehbr32_el1 | |
309 | stp x4, x5, [x3] | |
310 | 1: | |
311 | .endm | |
312 | ||
313 | .macro restore_guest_32bit_state | |
314 | skip_32bit_state x3, 1f | |
315 | ||
316 | add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT) | |
317 | ldp x4, x5, [x3] | |
318 | ldp x6, x7, [x3, #16] | |
319 | msr spsr_abt, x4 | |
320 | msr spsr_und, x5 | |
321 | msr spsr_irq, x6 | |
322 | msr spsr_fiq, x7 | |
323 | ||
324 | add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) | |
325 | ldp x4, x5, [x3] | |
326 | ldp x6, x7, [x3, #16] | |
327 | msr dacr32_el2, x4 | |
328 | msr ifsr32_el2, x5 | |
329 | msr fpexc32_el2, x6 | |
330 | msr dbgvcr32_el2, x7 | |
331 | ||
332 | skip_tee_state x8, 1f | |
333 | ||
334 | add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1) | |
335 | ldp x4, x5, [x3] | |
336 | msr teecr32_el1, x4 | |
337 | msr teehbr32_el1, x5 | |
338 | 1: | |
339 | .endm | |
340 | ||
55c7401d MZ |
341 | .macro activate_traps |
342 | ldr x2, [x0, #VCPU_IRQ_LINES] | |
343 | ldr x1, [x0, #VCPU_HCR_EL2] | |
344 | orr x2, x2, x1 | |
345 | msr hcr_el2, x2 | |
346 | ||
347 | ldr x2, =(CPTR_EL2_TTA) | |
348 | msr cptr_el2, x2 | |
349 | ||
350 | ldr x2, =(1 << 15) // Trap CP15 Cr=15 | |
351 | msr hstr_el2, x2 | |
352 | ||
353 | mrs x2, mdcr_el2 | |
354 | and x2, x2, #MDCR_EL2_HPMN_MASK | |
355 | orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR) | |
356 | msr mdcr_el2, x2 | |
357 | .endm | |
358 | ||
359 | .macro deactivate_traps | |
360 | mov x2, #HCR_RW | |
361 | msr hcr_el2, x2 | |
362 | msr cptr_el2, xzr | |
363 | msr hstr_el2, xzr | |
364 | ||
365 | mrs x2, mdcr_el2 | |
366 | and x2, x2, #MDCR_EL2_HPMN_MASK | |
367 | msr mdcr_el2, x2 | |
368 | .endm | |
369 | ||
370 | .macro activate_vm | |
371 | ldr x1, [x0, #VCPU_KVM] | |
372 | kern_hyp_va x1 | |
373 | ldr x2, [x1, #KVM_VTTBR] | |
374 | msr vttbr_el2, x2 | |
375 | .endm | |
376 | ||
377 | .macro deactivate_vm | |
378 | msr vttbr_el2, xzr | |
379 | .endm | |
380 | ||
1f17f3b6 MZ |
381 | /* |
382 | * Save the VGIC CPU state into memory | |
383 | * x0: Register pointing to VCPU struct | |
384 | * Do not corrupt x1!!! | |
385 | */ | |
386 | .macro save_vgic_state | |
387 | /* Get VGIC VCTRL base into x2 */ | |
388 | ldr x2, [x0, #VCPU_KVM] | |
389 | kern_hyp_va x2 | |
390 | ldr x2, [x2, #KVM_VGIC_VCTRL] | |
391 | kern_hyp_va x2 | |
392 | cbz x2, 2f // disabled | |
393 | ||
394 | /* Compute the address of struct vgic_cpu */ | |
395 | add x3, x0, #VCPU_VGIC_CPU | |
396 | ||
397 | /* Save all interesting registers */ | |
398 | ldr w4, [x2, #GICH_HCR] | |
399 | ldr w5, [x2, #GICH_VMCR] | |
400 | ldr w6, [x2, #GICH_MISR] | |
401 | ldr w7, [x2, #GICH_EISR0] | |
402 | ldr w8, [x2, #GICH_EISR1] | |
403 | ldr w9, [x2, #GICH_ELRSR0] | |
404 | ldr w10, [x2, #GICH_ELRSR1] | |
405 | ldr w11, [x2, #GICH_APR] | |
c5b2c0f5 MZ |
406 | CPU_BE( rev w4, w4 ) |
407 | CPU_BE( rev w5, w5 ) | |
408 | CPU_BE( rev w6, w6 ) | |
409 | CPU_BE( rev w7, w7 ) | |
410 | CPU_BE( rev w8, w8 ) | |
411 | CPU_BE( rev w9, w9 ) | |
412 | CPU_BE( rev w10, w10 ) | |
413 | CPU_BE( rev w11, w11 ) | |
1f17f3b6 MZ |
414 | |
415 | str w4, [x3, #VGIC_CPU_HCR] | |
416 | str w5, [x3, #VGIC_CPU_VMCR] | |
417 | str w6, [x3, #VGIC_CPU_MISR] | |
418 | str w7, [x3, #VGIC_CPU_EISR] | |
419 | str w8, [x3, #(VGIC_CPU_EISR + 4)] | |
420 | str w9, [x3, #VGIC_CPU_ELRSR] | |
421 | str w10, [x3, #(VGIC_CPU_ELRSR + 4)] | |
422 | str w11, [x3, #VGIC_CPU_APR] | |
423 | ||
424 | /* Clear GICH_HCR */ | |
425 | str wzr, [x2, #GICH_HCR] | |
426 | ||
427 | /* Save list registers */ | |
428 | add x2, x2, #GICH_LR0 | |
429 | ldr w4, [x3, #VGIC_CPU_NR_LR] | |
430 | add x3, x3, #VGIC_CPU_LR | |
431 | 1: ldr w5, [x2], #4 | |
c5b2c0f5 | 432 | CPU_BE( rev w5, w5 ) |
1f17f3b6 MZ |
433 | str w5, [x3], #4 |
434 | sub w4, w4, #1 | |
435 | cbnz w4, 1b | |
436 | 2: | |
437 | .endm | |
438 | ||
439 | /* | |
440 | * Restore the VGIC CPU state from memory | |
441 | * x0: Register pointing to VCPU struct | |
442 | */ | |
443 | .macro restore_vgic_state | |
444 | /* Get VGIC VCTRL base into x2 */ | |
445 | ldr x2, [x0, #VCPU_KVM] | |
446 | kern_hyp_va x2 | |
447 | ldr x2, [x2, #KVM_VGIC_VCTRL] | |
448 | kern_hyp_va x2 | |
449 | cbz x2, 2f // disabled | |
450 | ||
451 | /* Compute the address of struct vgic_cpu */ | |
452 | add x3, x0, #VCPU_VGIC_CPU | |
453 | ||
454 | /* We only restore a minimal set of registers */ | |
455 | ldr w4, [x3, #VGIC_CPU_HCR] | |
456 | ldr w5, [x3, #VGIC_CPU_VMCR] | |
457 | ldr w6, [x3, #VGIC_CPU_APR] | |
c5b2c0f5 MZ |
458 | CPU_BE( rev w4, w4 ) |
459 | CPU_BE( rev w5, w5 ) | |
460 | CPU_BE( rev w6, w6 ) | |
1f17f3b6 MZ |
461 | |
462 | str w4, [x2, #GICH_HCR] | |
463 | str w5, [x2, #GICH_VMCR] | |
464 | str w6, [x2, #GICH_APR] | |
465 | ||
466 | /* Restore list registers */ | |
467 | add x2, x2, #GICH_LR0 | |
468 | ldr w4, [x3, #VGIC_CPU_NR_LR] | |
469 | add x3, x3, #VGIC_CPU_LR | |
470 | 1: ldr w5, [x3], #4 | |
c5b2c0f5 | 471 | CPU_BE( rev w5, w5 ) |
1f17f3b6 MZ |
472 | str w5, [x2], #4 |
473 | sub w4, w4, #1 | |
474 | cbnz w4, 1b | |
475 | 2: | |
476 | .endm | |
477 | ||
003300de MZ |
478 | .macro save_timer_state |
479 | // x0: vcpu pointer | |
480 | ldr x2, [x0, #VCPU_KVM] | |
481 | kern_hyp_va x2 | |
482 | ldr w3, [x2, #KVM_TIMER_ENABLED] | |
483 | cbz w3, 1f | |
484 | ||
485 | mrs x3, cntv_ctl_el0 | |
486 | and x3, x3, #3 | |
487 | str w3, [x0, #VCPU_TIMER_CNTV_CTL] | |
488 | bic x3, x3, #1 // Clear Enable | |
489 | msr cntv_ctl_el0, x3 | |
490 | ||
491 | isb | |
492 | ||
493 | mrs x3, cntv_cval_el0 | |
494 | str x3, [x0, #VCPU_TIMER_CNTV_CVAL] | |
495 | ||
496 | 1: | |
497 | // Allow physical timer/counter access for the host | |
498 | mrs x2, cnthctl_el2 | |
499 | orr x2, x2, #3 | |
500 | msr cnthctl_el2, x2 | |
501 | ||
502 | // Clear cntvoff for the host | |
503 | msr cntvoff_el2, xzr | |
504 | .endm | |
505 | ||
506 | .macro restore_timer_state | |
507 | // x0: vcpu pointer | |
508 | // Disallow physical timer access for the guest | |
509 | // Physical counter access is allowed | |
510 | mrs x2, cnthctl_el2 | |
511 | orr x2, x2, #1 | |
512 | bic x2, x2, #2 | |
513 | msr cnthctl_el2, x2 | |
514 | ||
515 | ldr x2, [x0, #VCPU_KVM] | |
516 | kern_hyp_va x2 | |
517 | ldr w3, [x2, #KVM_TIMER_ENABLED] | |
518 | cbz w3, 1f | |
519 | ||
520 | ldr x3, [x2, #KVM_TIMER_CNTVOFF] | |
521 | msr cntvoff_el2, x3 | |
522 | ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL] | |
523 | msr cntv_cval_el0, x2 | |
524 | isb | |
525 | ||
526 | ldr w2, [x0, #VCPU_TIMER_CNTV_CTL] | |
527 | and x2, x2, #3 | |
528 | msr cntv_ctl_el0, x2 | |
529 | 1: | |
530 | .endm | |
531 | ||
55c7401d MZ |
532 | __save_sysregs: |
533 | save_sysregs | |
534 | ret | |
535 | ||
536 | __restore_sysregs: | |
537 | restore_sysregs | |
538 | ret | |
539 | ||
540 | __save_fpsimd: | |
541 | save_fpsimd | |
542 | ret | |
543 | ||
544 | __restore_fpsimd: | |
545 | restore_fpsimd | |
546 | ret | |
547 | ||
548 | /* | |
549 | * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu); | |
550 | * | |
551 | * This is the world switch. The first half of the function | |
552 | * deals with entering the guest, and anything from __kvm_vcpu_return | |
553 | * to the end of the function deals with reentering the host. | |
554 | * On the enter path, only x0 (vcpu pointer) must be preserved until | |
555 | * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception | |
556 | * code) must both be preserved until the epilogue. | |
557 | * In both cases, x2 points to the CPU context we're saving/restoring from/to. | |
558 | */ | |
559 | ENTRY(__kvm_vcpu_run) | |
560 | kern_hyp_va x0 | |
561 | msr tpidr_el2, x0 // Save the vcpu register | |
562 | ||
563 | // Host context | |
564 | ldr x2, [x0, #VCPU_HOST_CONTEXT] | |
565 | kern_hyp_va x2 | |
566 | ||
567 | save_host_regs | |
568 | bl __save_fpsimd | |
569 | bl __save_sysregs | |
570 | ||
571 | activate_traps | |
572 | activate_vm | |
573 | ||
1f17f3b6 | 574 | restore_vgic_state |
003300de | 575 | restore_timer_state |
1f17f3b6 | 576 | |
55c7401d MZ |
577 | // Guest context |
578 | add x2, x0, #VCPU_CONTEXT | |
579 | ||
580 | bl __restore_sysregs | |
581 | bl __restore_fpsimd | |
b4afad06 | 582 | restore_guest_32bit_state |
55c7401d MZ |
583 | restore_guest_regs |
584 | ||
585 | // That's it, no more messing around. | |
586 | eret | |
587 | ||
588 | __kvm_vcpu_return: | |
589 | // Assume x0 is the vcpu pointer, x1 the return code | |
590 | // Guest's x0-x3 are on the stack | |
591 | ||
592 | // Guest context | |
593 | add x2, x0, #VCPU_CONTEXT | |
594 | ||
595 | save_guest_regs | |
596 | bl __save_fpsimd | |
597 | bl __save_sysregs | |
b4afad06 | 598 | save_guest_32bit_state |
55c7401d | 599 | |
003300de | 600 | save_timer_state |
1f17f3b6 MZ |
601 | save_vgic_state |
602 | ||
55c7401d MZ |
603 | deactivate_traps |
604 | deactivate_vm | |
605 | ||
606 | // Host context | |
607 | ldr x2, [x0, #VCPU_HOST_CONTEXT] | |
608 | kern_hyp_va x2 | |
609 | ||
610 | bl __restore_sysregs | |
611 | bl __restore_fpsimd | |
612 | restore_host_regs | |
613 | ||
614 | mov x0, x1 | |
615 | ret | |
616 | END(__kvm_vcpu_run) | |
617 | ||
618 | // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); | |
619 | ENTRY(__kvm_tlb_flush_vmid_ipa) | |
f142e5ee MZ |
620 | dsb ishst |
621 | ||
55c7401d MZ |
622 | kern_hyp_va x0 |
623 | ldr x2, [x0, #KVM_VTTBR] | |
624 | msr vttbr_el2, x2 | |
625 | isb | |
626 | ||
627 | /* | |
628 | * We could do so much better if we had the VA as well. | |
629 | * Instead, we invalidate Stage-2 for this IPA, and the | |
630 | * whole of Stage-1. Weep... | |
631 | */ | |
632 | tlbi ipas2e1is, x1 | |
633 | dsb sy | |
634 | tlbi vmalle1is | |
635 | dsb sy | |
636 | isb | |
637 | ||
638 | msr vttbr_el2, xzr | |
639 | ret | |
640 | ENDPROC(__kvm_tlb_flush_vmid_ipa) | |
641 | ||
642 | ENTRY(__kvm_flush_vm_context) | |
f142e5ee | 643 | dsb ishst |
55c7401d MZ |
644 | tlbi alle1is |
645 | ic ialluis | |
646 | dsb sy | |
647 | ret | |
648 | ENDPROC(__kvm_flush_vm_context) | |
649 | ||
650 | __kvm_hyp_panic: | |
651 | // Guess the context by looking at VTTBR: | |
652 | // If zero, then we're already a host. | |
653 | // Otherwise restore a minimal host context before panicing. | |
654 | mrs x0, vttbr_el2 | |
655 | cbz x0, 1f | |
656 | ||
657 | mrs x0, tpidr_el2 | |
658 | ||
659 | deactivate_traps | |
660 | deactivate_vm | |
661 | ||
662 | ldr x2, [x0, #VCPU_HOST_CONTEXT] | |
663 | kern_hyp_va x2 | |
664 | ||
665 | bl __restore_sysregs | |
666 | ||
667 | 1: adr x0, __hyp_panic_str | |
668 | adr x1, 2f | |
669 | ldp x2, x3, [x1] | |
670 | sub x0, x0, x2 | |
671 | add x0, x0, x3 | |
672 | mrs x1, spsr_el2 | |
673 | mrs x2, elr_el2 | |
674 | mrs x3, esr_el2 | |
675 | mrs x4, far_el2 | |
676 | mrs x5, hpfar_el2 | |
677 | mrs x6, par_el1 | |
678 | mrs x7, tpidr_el2 | |
679 | ||
680 | mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ | |
681 | PSR_MODE_EL1h) | |
682 | msr spsr_el2, lr | |
683 | ldr lr, =panic | |
684 | msr elr_el2, lr | |
685 | eret | |
686 | ||
687 | .align 3 | |
688 | 2: .quad HYP_PAGE_OFFSET | |
689 | .quad PAGE_OFFSET | |
690 | ENDPROC(__kvm_hyp_panic) | |
691 | ||
692 | __hyp_panic_str: | |
693 | .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0" | |
694 | ||
695 | .align 2 | |
696 | ||
697 | ENTRY(kvm_call_hyp) | |
698 | hvc #0 | |
699 | ret | |
700 | ENDPROC(kvm_call_hyp) | |
701 | ||
702 | .macro invalid_vector label, target | |
703 | .align 2 | |
704 | \label: | |
705 | b \target | |
706 | ENDPROC(\label) | |
707 | .endm | |
708 | ||
709 | /* None of these should ever happen */ | |
710 | invalid_vector el2t_sync_invalid, __kvm_hyp_panic | |
711 | invalid_vector el2t_irq_invalid, __kvm_hyp_panic | |
712 | invalid_vector el2t_fiq_invalid, __kvm_hyp_panic | |
713 | invalid_vector el2t_error_invalid, __kvm_hyp_panic | |
714 | invalid_vector el2h_sync_invalid, __kvm_hyp_panic | |
715 | invalid_vector el2h_irq_invalid, __kvm_hyp_panic | |
716 | invalid_vector el2h_fiq_invalid, __kvm_hyp_panic | |
717 | invalid_vector el2h_error_invalid, __kvm_hyp_panic | |
718 | invalid_vector el1_sync_invalid, __kvm_hyp_panic | |
719 | invalid_vector el1_irq_invalid, __kvm_hyp_panic | |
720 | invalid_vector el1_fiq_invalid, __kvm_hyp_panic | |
721 | invalid_vector el1_error_invalid, __kvm_hyp_panic | |
722 | ||
723 | el1_sync: // Guest trapped into EL2 | |
724 | push x0, x1 | |
725 | push x2, x3 | |
726 | ||
727 | mrs x1, esr_el2 | |
728 | lsr x2, x1, #ESR_EL2_EC_SHIFT | |
729 | ||
730 | cmp x2, #ESR_EL2_EC_HVC64 | |
731 | b.ne el1_trap | |
732 | ||
733 | mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest | |
734 | cbnz x3, el1_trap // called HVC | |
735 | ||
736 | /* Here, we're pretty sure the host called HVC. */ | |
737 | pop x2, x3 | |
738 | pop x0, x1 | |
739 | ||
740 | push lr, xzr | |
741 | ||
742 | /* | |
743 | * Compute the function address in EL2, and shuffle the parameters. | |
744 | */ | |
745 | kern_hyp_va x0 | |
746 | mov lr, x0 | |
747 | mov x0, x1 | |
748 | mov x1, x2 | |
749 | mov x2, x3 | |
750 | blr lr | |
751 | ||
752 | pop lr, xzr | |
753 | eret | |
754 | ||
755 | el1_trap: | |
756 | /* | |
757 | * x1: ESR | |
758 | * x2: ESR_EC | |
759 | */ | |
760 | cmp x2, #ESR_EL2_EC_DABT | |
761 | mov x0, #ESR_EL2_EC_IABT | |
762 | ccmp x2, x0, #4, ne | |
763 | b.ne 1f // Not an abort we care about | |
764 | ||
765 | /* This is an abort. Check for permission fault */ | |
766 | and x2, x1, #ESR_EL2_FSC_TYPE | |
767 | cmp x2, #FSC_PERM | |
768 | b.ne 1f // Not a permission fault | |
769 | ||
770 | /* | |
771 | * Check for Stage-1 page table walk, which is guaranteed | |
772 | * to give a valid HPFAR_EL2. | |
773 | */ | |
774 | tbnz x1, #7, 1f // S1PTW is set | |
775 | ||
1bbd8054 MZ |
776 | /* Preserve PAR_EL1 */ |
777 | mrs x3, par_el1 | |
778 | push x3, xzr | |
779 | ||
55c7401d MZ |
780 | /* |
781 | * Permission fault, HPFAR_EL2 is invalid. | |
782 | * Resolve the IPA the hard way using the guest VA. | |
783 | * Stage-1 translation already validated the memory access rights. | |
784 | * As such, we can use the EL1 translation regime, and don't have | |
785 | * to distinguish between EL0 and EL1 access. | |
786 | */ | |
787 | mrs x2, far_el2 | |
788 | at s1e1r, x2 | |
789 | isb | |
790 | ||
791 | /* Read result */ | |
792 | mrs x3, par_el1 | |
1bbd8054 MZ |
793 | pop x0, xzr // Restore PAR_EL1 from the stack |
794 | msr par_el1, x0 | |
55c7401d MZ |
795 | tbnz x3, #0, 3f // Bail out if we failed the translation |
796 | ubfx x3, x3, #12, #36 // Extract IPA | |
797 | lsl x3, x3, #4 // and present it like HPFAR | |
798 | b 2f | |
799 | ||
800 | 1: mrs x3, hpfar_el2 | |
801 | mrs x2, far_el2 | |
802 | ||
803 | 2: mrs x0, tpidr_el2 | |
804 | str x1, [x0, #VCPU_ESR_EL2] | |
805 | str x2, [x0, #VCPU_FAR_EL2] | |
806 | str x3, [x0, #VCPU_HPFAR_EL2] | |
807 | ||
808 | mov x1, #ARM_EXCEPTION_TRAP | |
809 | b __kvm_vcpu_return | |
810 | ||
811 | /* | |
812 | * Translation failed. Just return to the guest and | |
813 | * let it fault again. Another CPU is probably playing | |
814 | * behind our back. | |
815 | */ | |
816 | 3: pop x2, x3 | |
817 | pop x0, x1 | |
818 | ||
819 | eret | |
820 | ||
821 | el1_irq: | |
822 | push x0, x1 | |
823 | push x2, x3 | |
824 | mrs x0, tpidr_el2 | |
825 | mov x1, #ARM_EXCEPTION_IRQ | |
826 | b __kvm_vcpu_return | |
827 | ||
828 | .ltorg | |
829 | ||
830 | .align 11 | |
831 | ||
832 | ENTRY(__kvm_hyp_vector) | |
833 | ventry el2t_sync_invalid // Synchronous EL2t | |
834 | ventry el2t_irq_invalid // IRQ EL2t | |
835 | ventry el2t_fiq_invalid // FIQ EL2t | |
836 | ventry el2t_error_invalid // Error EL2t | |
837 | ||
838 | ventry el2h_sync_invalid // Synchronous EL2h | |
839 | ventry el2h_irq_invalid // IRQ EL2h | |
840 | ventry el2h_fiq_invalid // FIQ EL2h | |
841 | ventry el2h_error_invalid // Error EL2h | |
842 | ||
843 | ventry el1_sync // Synchronous 64-bit EL1 | |
844 | ventry el1_irq // IRQ 64-bit EL1 | |
845 | ventry el1_fiq_invalid // FIQ 64-bit EL1 | |
846 | ventry el1_error_invalid // Error 64-bit EL1 | |
847 | ||
848 | ventry el1_sync // Synchronous 32-bit EL1 | |
849 | ventry el1_irq // IRQ 32-bit EL1 | |
850 | ventry el1_fiq_invalid // FIQ 32-bit EL1 | |
851 | ventry el1_error_invalid // Error 32-bit EL1 | |
852 | ENDPROC(__kvm_hyp_vector) | |
853 | ||
854 | __kvm_hyp_code_end: | |
855 | .globl __kvm_hyp_code_end | |
856 | ||
857 | .popsection |