{
struct kvm_run *run = vcpu->run;
- if (ARM_SERROR_PENDING(exception_index)) {
- u8 esr_ec = ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
-
- /*
- * HVC already have an adjusted PC, which we need to
- * correct in order to return to after having injected
- * the SError.
- *
- * SMC, on the other hand, is *trapped*, meaning its
- * preferred return address is the SMC itself.
- */
- if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64)
- *vcpu_pc(vcpu) -= 4;
-
- return 1;
- }
-
exception_index = ARM_EXCEPTION_CODE(exception_index);
switch (exception_index) {
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
+ if (ARM_SERROR_PENDING(*exit_code)) {
+ u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
+
+ /*
+ * HVC already have an adjusted PC, which we need to
+ * correct in order to return to after having injected
+ * the SError.
+ *
+ * SMC, on the other hand, is *trapped*, meaning its
+ * preferred return address is the SMC itself.
+ */
+ if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64)
+ write_sysreg_el2(read_sysreg_el2(SYS_ELR) - 4, SYS_ELR);
+ }
+
/*
* We're using the raw exception code in order to only process
* the trap if no SError is pending. We will come back to the