]> Git Repo - linux.git/commitdiff
KVM: arm64: Move PC rollback on SError to HYP
authorMarc Zyngier <[email protected]>
Wed, 14 Oct 2020 11:12:45 +0000 (12:12 +0100)
committerMarc Zyngier <[email protected]>
Tue, 10 Nov 2020 08:34:25 +0000 (08:34 +0000)
Instead of handling the "PC rollback on SError during HVC" at EL1 (which
requires disclosing PC to a potentially untrusted kernel), let's move
this fixup to ... fixup_guest_exit(), which is where we do all fixups.

Isn't that neat?

Signed-off-by: Marc Zyngier <[email protected]>
arch/arm64/kvm/handle_exit.c
arch/arm64/kvm/hyp/include/hyp/switch.h

index d4e00a864ee6daa0b5a01dd5411cb3f7acae25ad..f79137ee4274a0b3eaeba16187ac7f38a209903e 100644 (file)
@@ -241,23 +241,6 @@ int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
 {
        struct kvm_run *run = vcpu->run;
 
-       if (ARM_SERROR_PENDING(exception_index)) {
-               u8 esr_ec = ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
-
-               /*
-                * HVC already have an adjusted PC, which we need to
-                * correct in order to return to after having injected
-                * the SError.
-                *
-                * SMC, on the other hand, is *trapped*, meaning its
-                * preferred return address is the SMC itself.
-                */
-               if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64)
-                       *vcpu_pc(vcpu) -= 4;
-
-               return 1;
-       }
-
        exception_index = ARM_EXCEPTION_CODE(exception_index);
 
        switch (exception_index) {
index 8b2328f62a074255d0f3f3a0a437a2cfb1e0718f..84473574c2e7db4697cfa8f090a088ba82e7bc34 100644 (file)
@@ -411,6 +411,21 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
        if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
                vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
 
+       if (ARM_SERROR_PENDING(*exit_code)) {
+               u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
+
+               /*
+                * HVC already have an adjusted PC, which we need to
+                * correct in order to return to after having injected
+                * the SError.
+                *
+                * SMC, on the other hand, is *trapped*, meaning its
+                * preferred return address is the SMC itself.
+                */
+               if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64)
+                       write_sysreg_el2(read_sysreg_el2(SYS_ELR) - 4, SYS_ELR);
+       }
+
        /*
         * We're using the raw exception code in order to only process
         * the trap if no SError is pending. We will come back to the
This page took 0.059501 seconds and 4 git commands to generate.