]> Git Repo - J-linux.git/commitdiff
Merge tag 'kvm-x86-svm-6.6' of https://github.com/kvm-x86/linux into HEAD
authorPaolo Bonzini <[email protected]>
Thu, 31 Aug 2023 17:32:40 +0000 (13:32 -0400)
committerPaolo Bonzini <[email protected]>
Thu, 31 Aug 2023 17:32:40 +0000 (13:32 -0400)
KVM: x86: SVM changes for 6.6:

 - Add support for SEV-ES DebugSwap, i.e. allow SEV-ES guests to use debug
   registers and generate/handle #DBs

 - Clean up LBR virtualization code

 - Fix a bug where KVM fails to set the target pCPU during an IRTE update

 - Fix fatal bugs in SEV-ES intrahost migration

 - Fix a bug where the recent (architecturally correct) change to reinject
   #BP and skip INT3 broke SEV guests (can't decode INT3 to skip it)

1  2 
arch/x86/include/asm/cpufeatures.h
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
tools/arch/x86/include/asm/cpufeatures.h

index b69b0d7756aab7fc4ca5e7660c7317f9ae0132c3,31c862d79fae220edf84b233a583ea4b495ceac8..946513218a3f9a87e067801d65e599ebde662acc
@@@ -14,7 -14,7 +14,7 @@@
   * Defines x86 CPU feature bits
   */
  #define NCAPINTS                      21         /* N 32-bit words worth of info */
 -#define NBUGINTS                      1          /* N 32-bit bug flags */
 +#define NBUGINTS                      2          /* N 32-bit bug flags */
  
  /*
   * Note: If the comment begins with a quoted string, that string is used
  #define X86_FEATURE_SMBA              (11*32+21) /* "" Slow Memory Bandwidth Allocation */
  #define X86_FEATURE_BMEC              (11*32+22) /* "" Bandwidth Monitoring Event Configuration */
  
 +#define X86_FEATURE_SRSO              (11*32+24) /* "" AMD BTB untrain RETs */
 +#define X86_FEATURE_SRSO_ALIAS                (11*32+25) /* "" AMD BTB untrain RETs through aliasing */
 +#define X86_FEATURE_IBPB_ON_VMEXIT    (11*32+26) /* "" Issue an IBPB only on VMEXIT */
 +
  /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
  #define X86_FEATURE_AVX_VNNI          (12*32+ 4) /* AVX VNNI instructions */
  #define X86_FEATURE_AVX512_BF16               (12*32+ 5) /* AVX512 BFLOAT16 instructions */
  #define X86_FEATURE_SEV_ES            (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
  #define X86_FEATURE_V_TSC_AUX         (19*32+ 9) /* "" Virtual TSC_AUX */
  #define X86_FEATURE_SME_COHERENT      (19*32+10) /* "" AMD hardware-enforced cache coherency */
+ #define X86_FEATURE_DEBUG_SWAP                (19*32+14) /* AMD SEV-ES full debug state swap support */
  
  /* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
  #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */
  #define X86_FEATURE_AUTOIBRS          (20*32+ 8) /* "" Automatic IBRS */
  #define X86_FEATURE_NO_SMM_CTL_MSR    (20*32+ 9) /* "" SMM_CTL MSR is not present */
  
 +#define X86_FEATURE_SBPB              (20*32+27) /* "" Selective Branch Prediction Barrier */
 +#define X86_FEATURE_IBPB_BRTYPE               (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
 +#define X86_FEATURE_SRSO_NO           (20*32+29) /* "" CPU is not affected by SRSO */
 +
  /*
   * BUG word(s)
   */
  #define X86_BUG_RETBLEED              X86_BUG(27) /* CPU is affected by RETBleed */
  #define X86_BUG_EIBRS_PBRSB           X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
  #define X86_BUG_SMT_RSB                       X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */
 +#define X86_BUG_GDS                   X86_BUG(30) /* CPU is affected by Gather Data Sampling */
  
 +/* BUG word 2 */
 +#define X86_BUG_SRSO                  X86_BUG(1*32 + 0) /* AMD SRSO bug */
 +#define X86_BUG_DIV0                  X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
  #endif /* _ASM_X86_CPUFEATURES_H */
diff --combined arch/x86/kvm/svm/sev.c
index d3aec1f2cad20f0507a02b3987b42c33c4419bef,85d1abdf7d7d3cdf4e13ee423cfb790aea28438e..b9a0a939d59f3826edb3d6839413e5dfd7ae66b8
@@@ -23,6 -23,7 +23,7 @@@
  #include <asm/pkru.h>
  #include <asm/trapnr.h>
  #include <asm/fpu/xcr.h>
+ #include <asm/debugreg.h>
  
  #include "mmu.h"
  #include "x86.h"
@@@ -54,9 -55,14 +55,14 @@@ module_param_named(sev, sev_enabled, bo
  /* enable/disable SEV-ES support */
  static bool sev_es_enabled = true;
  module_param_named(sev_es, sev_es_enabled, bool, 0444);
+ /* enable/disable SEV-ES DebugSwap support */
+ static bool sev_es_debug_swap_enabled = true;
+ module_param_named(debug_swap, sev_es_debug_swap_enabled, bool, 0444);
  #else
  #define sev_enabled false
  #define sev_es_enabled false
+ #define sev_es_debug_swap_enabled false
  #endif /* CONFIG_KVM_AMD_SEV */
  
  static u8 sev_enc_bit;
@@@ -606,6 -612,9 +612,9 @@@ static int sev_es_sync_vmsa(struct vcpu
        save->xss  = svm->vcpu.arch.ia32_xss;
        save->dr6  = svm->vcpu.arch.dr6;
  
+       if (sev_es_debug_swap_enabled)
+               save->sev_features |= SVM_SEV_FEAT_DEBUG_SWAP;
        pr_debug("Virtual Machine Save Area (VMSA):\n");
        print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, save, sizeof(*save), false);
  
@@@ -619,6 -628,11 +628,11 @@@ static int __sev_launch_update_vmsa(str
        struct vcpu_svm *svm = to_svm(vcpu);
        int ret;
  
+       if (vcpu->guest_debug) {
+               pr_warn_once("KVM_SET_GUEST_DEBUG for SEV-ES guest is not supported");
+               return -EINVAL;
+       }
        /* Perform some pre-encryption checks against the VMSA */
        ret = sev_es_sync_vmsa(svm);
        if (ret)
@@@ -1725,7 -1739,7 +1739,7 @@@ static void sev_migrate_from(struct kv
                 * Note, the source is not required to have the same number of
                 * vCPUs as the destination when migrating a vanilla SEV VM.
                 */
-               src_vcpu = kvm_get_vcpu(dst_kvm, i);
+               src_vcpu = kvm_get_vcpu(src_kvm, i);
                src_svm = to_svm(src_vcpu);
  
                /*
@@@ -2171,7 -2185,7 +2185,7 @@@ void __init sev_hardware_setup(void
        bool sev_es_supported = false;
        bool sev_supported = false;
  
-       if (!sev_enabled || !npt_enabled)
+       if (!sev_enabled || !npt_enabled || !nrips)
                goto out;
  
        /*
@@@ -2256,6 -2270,9 +2270,9 @@@ out
  
        sev_enabled = sev_supported;
        sev_es_enabled = sev_es_supported;
+       if (!sev_es_enabled || !cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP) ||
+           !cpu_feature_enabled(X86_FEATURE_NO_NESTED_DATA_BP))
+               sev_es_debug_swap_enabled = false;
  #endif
  }
  
@@@ -2417,18 -2434,15 +2434,18 @@@ static void sev_es_sync_from_ghcb(struc
         */
        memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
  
 -      vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb);
 -      vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb);
 -      vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
 -      vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
 -      vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
 +      BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap));
 +      memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap));
  
 -      svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb);
 +      vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb);
 +      vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb);
 +      vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb);
 +      vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb);
 +      vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb);
  
 -      if (ghcb_xcr0_is_valid(ghcb)) {
 +      svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb);
 +
 +      if (kvm_ghcb_xcr0_is_valid(svm)) {
                vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
                kvm_update_cpuid_runtime(vcpu);
        }
        control->exit_code_hi = upper_32_bits(exit_code);
        control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
        control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
 +      svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb);
  
        /* Clear the valid entries fields */
        memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
  }
  
 +static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
 +{
 +      return (((u64)control->exit_code_hi) << 32) | control->exit_code;
 +}
 +
  static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
  {
 -      struct kvm_vcpu *vcpu;
 -      struct ghcb *ghcb;
 +      struct vmcb_control_area *control = &svm->vmcb->control;
 +      struct kvm_vcpu *vcpu = &svm->vcpu;
        u64 exit_code;
        u64 reason;
  
 -      ghcb = svm->sev_es.ghcb;
 -
        /*
         * Retrieve the exit code now even though it may not be marked valid
         * as it could help with debugging.
         */
 -      exit_code = ghcb_get_sw_exit_code(ghcb);
 +      exit_code = kvm_ghcb_get_sw_exit_code(control);
  
        /* Only GHCB Usage code 0 is supported */
 -      if (ghcb->ghcb_usage) {
 +      if (svm->sev_es.ghcb->ghcb_usage) {
                reason = GHCB_ERR_INVALID_USAGE;
                goto vmgexit_err;
        }
  
        reason = GHCB_ERR_MISSING_INPUT;
  
 -      if (!ghcb_sw_exit_code_is_valid(ghcb) ||
 -          !ghcb_sw_exit_info_1_is_valid(ghcb) ||
 -          !ghcb_sw_exit_info_2_is_valid(ghcb))
 +      if (!kvm_ghcb_sw_exit_code_is_valid(svm) ||
 +          !kvm_ghcb_sw_exit_info_1_is_valid(svm) ||
 +          !kvm_ghcb_sw_exit_info_2_is_valid(svm))
                goto vmgexit_err;
  
 -      switch (ghcb_get_sw_exit_code(ghcb)) {
 +      switch (exit_code) {
        case SVM_EXIT_READ_DR7:
                break;
        case SVM_EXIT_WRITE_DR7:
 -              if (!ghcb_rax_is_valid(ghcb))
 +              if (!kvm_ghcb_rax_is_valid(svm))
                        goto vmgexit_err;
                break;
        case SVM_EXIT_RDTSC:
                break;
        case SVM_EXIT_RDPMC:
 -              if (!ghcb_rcx_is_valid(ghcb))
 +              if (!kvm_ghcb_rcx_is_valid(svm))
                        goto vmgexit_err;
                break;
        case SVM_EXIT_CPUID:
 -              if (!ghcb_rax_is_valid(ghcb) ||
 -                  !ghcb_rcx_is_valid(ghcb))
 +              if (!kvm_ghcb_rax_is_valid(svm) ||
 +                  !kvm_ghcb_rcx_is_valid(svm))
                        goto vmgexit_err;
 -              if (ghcb_get_rax(ghcb) == 0xd)
 -                      if (!ghcb_xcr0_is_valid(ghcb))
 +              if (vcpu->arch.regs[VCPU_REGS_RAX] == 0xd)
 +                      if (!kvm_ghcb_xcr0_is_valid(svm))
                                goto vmgexit_err;
                break;
        case SVM_EXIT_INVD:
                break;
        case SVM_EXIT_IOIO:
 -              if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
 -                      if (!ghcb_sw_scratch_is_valid(ghcb))
 +              if (control->exit_info_1 & SVM_IOIO_STR_MASK) {
 +                      if (!kvm_ghcb_sw_scratch_is_valid(svm))
                                goto vmgexit_err;
                } else {
 -                      if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
 -                              if (!ghcb_rax_is_valid(ghcb))
 +                      if (!(control->exit_info_1 & SVM_IOIO_TYPE_MASK))
 +                              if (!kvm_ghcb_rax_is_valid(svm))
                                        goto vmgexit_err;
                }
                break;
        case SVM_EXIT_MSR:
 -              if (!ghcb_rcx_is_valid(ghcb))
 +              if (!kvm_ghcb_rcx_is_valid(svm))
                        goto vmgexit_err;
 -              if (ghcb_get_sw_exit_info_1(ghcb)) {
 -                      if (!ghcb_rax_is_valid(ghcb) ||
 -                          !ghcb_rdx_is_valid(ghcb))
 +              if (control->exit_info_1) {
 +                      if (!kvm_ghcb_rax_is_valid(svm) ||
 +                          !kvm_ghcb_rdx_is_valid(svm))
                                goto vmgexit_err;
                }
                break;
        case SVM_EXIT_VMMCALL:
 -              if (!ghcb_rax_is_valid(ghcb) ||
 -                  !ghcb_cpl_is_valid(ghcb))
 +              if (!kvm_ghcb_rax_is_valid(svm) ||
 +                  !kvm_ghcb_cpl_is_valid(svm))
                        goto vmgexit_err;
                break;
        case SVM_EXIT_RDTSCP:
        case SVM_EXIT_WBINVD:
                break;
        case SVM_EXIT_MONITOR:
 -              if (!ghcb_rax_is_valid(ghcb) ||
 -                  !ghcb_rcx_is_valid(ghcb) ||
 -                  !ghcb_rdx_is_valid(ghcb))
 +              if (!kvm_ghcb_rax_is_valid(svm) ||
 +                  !kvm_ghcb_rcx_is_valid(svm) ||
 +                  !kvm_ghcb_rdx_is_valid(svm))
                        goto vmgexit_err;
                break;
        case SVM_EXIT_MWAIT:
 -              if (!ghcb_rax_is_valid(ghcb) ||
 -                  !ghcb_rcx_is_valid(ghcb))
 +              if (!kvm_ghcb_rax_is_valid(svm) ||
 +                  !kvm_ghcb_rcx_is_valid(svm))
                        goto vmgexit_err;
                break;
        case SVM_VMGEXIT_MMIO_READ:
        case SVM_VMGEXIT_MMIO_WRITE:
 -              if (!ghcb_sw_scratch_is_valid(ghcb))
 +              if (!kvm_ghcb_sw_scratch_is_valid(svm))
                        goto vmgexit_err;
                break;
        case SVM_VMGEXIT_NMI_COMPLETE:
        return 0;
  
  vmgexit_err:
 -      vcpu = &svm->vcpu;
 -
        if (reason == GHCB_ERR_INVALID_USAGE) {
                vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
 -                          ghcb->ghcb_usage);
 +                          svm->sev_es.ghcb->ghcb_usage);
        } else if (reason == GHCB_ERR_INVALID_EVENT) {
                vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n",
                            exit_code);
                dump_ghcb(svm);
        }
  
 -      /* Clear the valid entries fields */
 -      memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
 -
 -      ghcb_set_sw_exit_info_1(ghcb, 2);
 -      ghcb_set_sw_exit_info_2(ghcb, reason);
 +      ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
 +      ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, reason);
  
        /* Resume the guest to "return" the error code. */
        return 1;
@@@ -2588,7 -2603,7 +2605,7 @@@ void sev_es_unmap_ghcb(struct vcpu_svm 
                 */
                if (svm->sev_es.ghcb_sa_sync) {
                        kvm_write_guest(svm->vcpu.kvm,
 -                                      ghcb_get_sw_scratch(svm->sev_es.ghcb),
 +                                      svm->sev_es.sw_scratch,
                                        svm->sev_es.ghcb_sa,
                                        svm->sev_es.ghcb_sa_len);
                        svm->sev_es.ghcb_sa_sync = false;
@@@ -2634,11 -2649,12 +2651,11 @@@ void pre_sev_run(struct vcpu_svm *svm, 
  static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
  {
        struct vmcb_control_area *control = &svm->vmcb->control;
 -      struct ghcb *ghcb = svm->sev_es.ghcb;
        u64 ghcb_scratch_beg, ghcb_scratch_end;
        u64 scratch_gpa_beg, scratch_gpa_end;
        void *scratch_va;
  
 -      scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
 +      scratch_gpa_beg = svm->sev_es.sw_scratch;
        if (!scratch_gpa_beg) {
                pr_err("vmgexit: scratch gpa not provided\n");
                goto e_scratch;
        return 0;
  
  e_scratch:
 -      ghcb_set_sw_exit_info_1(ghcb, 2);
 -      ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
 +      ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
 +      ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
  
        return 1;
  }
@@@ -2823,6 -2839,7 +2840,6 @@@ int sev_handle_vmgexit(struct kvm_vcpu 
        struct vcpu_svm *svm = to_svm(vcpu);
        struct vmcb_control_area *control = &svm->vmcb->control;
        u64 ghcb_gpa, exit_code;
 -      struct ghcb *ghcb;
        int ret;
  
        /* Validate the GHCB */
        }
  
        svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
 -      ghcb = svm->sev_es.ghcb_map.hva;
  
 -      trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
 -
 -      exit_code = ghcb_get_sw_exit_code(ghcb);
 +      trace_kvm_vmgexit_enter(vcpu->vcpu_id, svm->sev_es.ghcb);
  
 +      sev_es_sync_from_ghcb(svm);
        ret = sev_es_validate_vmgexit(svm);
        if (ret)
                return ret;
  
 -      sev_es_sync_from_ghcb(svm);
 -      ghcb_set_sw_exit_info_1(ghcb, 0);
 -      ghcb_set_sw_exit_info_2(ghcb, 0);
 +      ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 0);
 +      ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 0);
  
 +      exit_code = kvm_ghcb_get_sw_exit_code(control);
        switch (exit_code) {
        case SVM_VMGEXIT_MMIO_READ:
                ret = setup_vmgexit_scratch(svm, true, control->exit_info_2);
                                            svm->sev_es.ghcb_sa);
                break;
        case SVM_VMGEXIT_NMI_COMPLETE:
-               ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
+               ++vcpu->stat.nmi_window_exits;
+               svm->nmi_masked = false;
+               kvm_make_request(KVM_REQ_EVENT, vcpu);
+               ret = 1;
                break;
        case SVM_VMGEXIT_AP_HLT_LOOP:
                ret = kvm_emulate_ap_reset_hold(vcpu);
                        break;
                case 1:
                        /* Get AP jump table address */
 -                      ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table);
 +                      ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, sev->ap_jump_table);
                        break;
                default:
                        pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
                               control->exit_info_1);
 -                      ghcb_set_sw_exit_info_1(ghcb, 2);
 -                      ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT);
 +                      ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
 +                      ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT);
                }
  
                ret = 1;
@@@ -2944,6 -2966,7 +2964,7 @@@ int sev_es_string_io(struct vcpu_svm *s
  
  static void sev_es_init_vmcb(struct vcpu_svm *svm)
  {
+       struct vmcb *vmcb = svm->vmcb01.ptr;
        struct kvm_vcpu *vcpu = &svm->vcpu;
  
        svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
        /*
         * An SEV-ES guest requires a VMSA area that is a separate from the
         * VMCB page. Do not include the encryption mask on the VMSA physical
-        * address since hardware will access it using the guest key.
+        * address since hardware will access it using the guest key.  Note,
+        * the VMSA will be NULL if this vCPU is the destination for intrahost
+        * migration, and will be copied later.
         */
-       svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
+       if (svm->sev_es.vmsa)
+               svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
  
        /* Can't intercept CR register access, HV can't modify CR registers */
        svm_clr_intercept(svm, INTERCEPT_CR0_READ);
        svm_set_intercept(svm, TRAP_CR4_WRITE);
        svm_set_intercept(svm, TRAP_CR8_WRITE);
  
-       /* No support for enable_vmware_backdoor */
-       clr_exception_intercept(svm, GP_VECTOR);
+       vmcb->control.intercepts[INTERCEPT_DR] = 0;
+       if (!sev_es_debug_swap_enabled) {
+               vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
+               vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
+               recalc_intercepts(svm);
+       } else {
+               /*
+                * Disable #DB intercept iff DebugSwap is enabled.  KVM doesn't
+                * allow debugging SEV-ES guests, and enables DebugSwap iff
+                * NO_NESTED_DATA_BP is supported, so there's no reason to
+                * intercept #DB when DebugSwap is enabled.  For simplicity
+                * with respect to guest debug, intercept #DB for other VMs
+                * even if NO_NESTED_DATA_BP is supported, i.e. even if the
+                * guest can't DoS the CPU with infinite #DB vectoring.
+                */
+               clr_exception_intercept(svm, DB_VECTOR);
+       }
  
        /* Can't intercept XSETBV, HV can't modify XCR0 directly */
        svm_clr_intercept(svm, INTERCEPT_XSETBV);
@@@ -3000,6 -3041,12 +3039,12 @@@ void sev_init_vmcb(struct vcpu_svm *svm
        svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
        clr_exception_intercept(svm, UD_VECTOR);
  
+       /*
+        * Don't intercept #GP for SEV guests, e.g. for the VMware backdoor, as
+        * KVM can't decrypt guest memory to decode the faulting instruction.
+        */
+       clr_exception_intercept(svm, GP_VECTOR);
        if (sev_es_guest(svm->vcpu.kvm))
                sev_es_init_vmcb(svm);
  }
@@@ -3018,20 -3065,41 +3063,41 @@@ void sev_es_vcpu_reset(struct vcpu_svm 
  void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa)
  {
        /*
-        * As an SEV-ES guest, hardware will restore the host state on VMEXIT,
-        * of which one step is to perform a VMLOAD.  KVM performs the
-        * corresponding VMSAVE in svm_prepare_guest_switch for both
-        * traditional and SEV-ES guests.
+        * All host state for SEV-ES guests is categorized into three swap types
+        * based on how it is handled by hardware during a world switch:
+        *
+        * A: VMRUN:   Host state saved in host save area
+        *    VMEXIT:  Host state loaded from host save area
+        *
+        * B: VMRUN:   Host state _NOT_ saved in host save area
+        *    VMEXIT:  Host state loaded from host save area
+        *
+        * C: VMRUN:   Host state _NOT_ saved in host save area
+        *    VMEXIT:  Host state initialized to default(reset) values
+        *
+        * Manually save type-B state, i.e. state that is loaded by VMEXIT but
+        * isn't saved by VMRUN, that isn't already saved by VMSAVE (performed
+        * by common SVM code).
         */
-       /* XCR0 is restored on VMEXIT, save the current host value */
        hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
-       /* PKRU is restored on VMEXIT, save the current host value */
        hostsa->pkru = read_pkru();
-       /* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */
        hostsa->xss = host_xss;
+       /*
+        * If DebugSwap is enabled, debug registers are loaded but NOT saved by
+        * the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU both
+        * saves and loads debug registers (Type-A).
+        */
+       if (sev_es_debug_swap_enabled) {
+               hostsa->dr0 = native_get_debugreg(0);
+               hostsa->dr1 = native_get_debugreg(1);
+               hostsa->dr2 = native_get_debugreg(2);
+               hostsa->dr3 = native_get_debugreg(3);
+               hostsa->dr0_addr_mask = amd_get_dr_addr_mask(0);
+               hostsa->dr1_addr_mask = amd_get_dr_addr_mask(1);
+               hostsa->dr2_addr_mask = amd_get_dr_addr_mask(2);
+               hostsa->dr3_addr_mask = amd_get_dr_addr_mask(3);
+       }
  }
  
  void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
diff --combined arch/x86/kvm/svm/svm.c
index d4bfdc607fe7f3257c9c68a4ee34c6a2c3a7e5c9,5cf2380c89dd66fef243671f485a509efa3939b3..488814e919ca0ef3b51574e1a5a51ec320c6170f
@@@ -203,7 -203,7 +203,7 @@@ static int nested = true
  module_param(nested, int, S_IRUGO);
  
  /* enable/disable Next RIP Save */
static int nrips = true;
+ int nrips = true;
  module_param(nrips, int, 0444);
  
  /* enable/disable Virtual VMLOAD VMSAVE */
@@@ -365,6 -365,8 +365,8 @@@ static void svm_set_interrupt_shadow(st
                svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
  
  }
+ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
+                                       void *insn, int insn_len);
  
  static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
                                           bool commit_side_effects)
        }
  
        if (!svm->next_rip) {
+               /*
+                * FIXME: Drop this when kvm_emulate_instruction() does the
+                * right thing and treats "can't emulate" as outright failure
+                * for EMULTYPE_SKIP.
+                */
+               if (!svm_can_emulate_instruction(vcpu, EMULTYPE_SKIP, NULL, 0))
+                       return 0;
                if (unlikely(!commit_side_effects))
                        old_rflags = svm->vmcb->save.rflags;
  
@@@ -677,6 -687,39 +687,39 @@@ free_save_area
  
  }
  
+ static void set_dr_intercepts(struct vcpu_svm *svm)
+ {
+       struct vmcb *vmcb = svm->vmcb01.ptr;
+       vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
+       vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
+       vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
+       vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
+       vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
+       vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
+       vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
+       vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
+       vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
+       vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
+       vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
+       vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
+       vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
+       vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
+       vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
+       vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
+       recalc_intercepts(svm);
+ }
+ static void clr_dr_intercepts(struct vcpu_svm *svm)
+ {
+       struct vmcb *vmcb = svm->vmcb01.ptr;
+       vmcb->control.intercepts[INTERCEPT_DR] = 0;
+       recalc_intercepts(svm);
+ }
  static int direct_access_msr_slot(u32 msr)
  {
        u32 i;
@@@ -947,50 -990,24 +990,24 @@@ static void svm_disable_lbrv(struct kvm
                svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb);
  }
  
- static int svm_get_lbr_msr(struct vcpu_svm *svm, u32 index)
+ static struct vmcb *svm_get_lbr_vmcb(struct vcpu_svm *svm)
  {
        /*
-        * If the LBR virtualization is disabled, the LBR msrs are always
-        * kept in the vmcb01 to avoid copying them on nested guest entries.
-        *
-        * If nested, and the LBR virtualization is enabled/disabled, the msrs
-        * are moved between the vmcb01 and vmcb02 as needed.
+        * If LBR virtualization is disabled, the LBR MSRs are always kept in
+        * vmcb01.  If LBR virtualization is enabled and L1 is running VMs of
+        * its own, the MSRs are moved between vmcb01 and vmcb02 as needed.
         */
-       struct vmcb *vmcb =
-               (svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) ?
-                       svm->vmcb : svm->vmcb01.ptr;
-       switch (index) {
-       case MSR_IA32_DEBUGCTLMSR:
-               return vmcb->save.dbgctl;
-       case MSR_IA32_LASTBRANCHFROMIP:
-               return vmcb->save.br_from;
-       case MSR_IA32_LASTBRANCHTOIP:
-               return vmcb->save.br_to;
-       case MSR_IA32_LASTINTFROMIP:
-               return vmcb->save.last_excp_from;
-       case MSR_IA32_LASTINTTOIP:
-               return vmcb->save.last_excp_to;
-       default:
-               KVM_BUG(false, svm->vcpu.kvm,
-                       "%s: Unknown MSR 0x%x", __func__, index);
-               return 0;
-       }
+       return svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK ? svm->vmcb :
+                                                                  svm->vmcb01.ptr;
  }
  
  void svm_update_lbrv(struct kvm_vcpu *vcpu)
  {
        struct vcpu_svm *svm = to_svm(vcpu);
-       bool enable_lbrv = svm_get_lbr_msr(svm, MSR_IA32_DEBUGCTLMSR) &
-                                          DEBUGCTLMSR_LBR;
-       bool current_enable_lbrv = !!(svm->vmcb->control.virt_ext &
-                                     LBR_CTL_ENABLE_MASK);
-       if (unlikely(is_guest_mode(vcpu) && svm->lbrv_enabled))
-               if (unlikely(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))
-                       enable_lbrv = true;
+       bool current_enable_lbrv = svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK;
+       bool enable_lbrv = (svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR) ||
+                          (is_guest_mode(vcpu) && svm->lbrv_enabled &&
+                           (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK));
  
        if (enable_lbrv == current_enable_lbrv)
                return;
@@@ -1201,10 -1218,9 +1218,9 @@@ static void init_vmcb(struct kvm_vcpu *
         * Guest access to VMware backdoor ports could legitimately
         * trigger #GP because of TSS I/O permission bitmap.
         * We intercept those #GP and allow access to them anyway
-        * as VMware does.  Don't intercept #GP for SEV guests as KVM can't
-        * decrypt guest memory to decode the faulting instruction.
+        * as VMware does.
         */
-       if (enable_vmware_backdoor && !sev_guest(vcpu->kvm))
+       if (enable_vmware_backdoor)
                set_exception_intercept(svm, GP_VECTOR);
  
        svm_set_intercept(svm, INTERCEPT_INTR);
@@@ -1498,9 -1514,7 +1514,9 @@@ static void svm_vcpu_load(struct kvm_vc
  
        if (sd->current_vmcb != svm->vmcb) {
                sd->current_vmcb = svm->vmcb;
 -              indirect_branch_prediction_barrier();
 +
 +              if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT))
 +                      indirect_branch_prediction_barrier();
        }
        if (kvm_vcpu_apicv_active(vcpu))
                avic_vcpu_load(vcpu, cpu);
@@@ -1788,11 -1802,6 +1804,11 @@@ static void sev_post_set_cr3(struct kvm
        }
  }
  
 +static bool svm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 +{
 +      return true;
 +}
 +
  void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  {
        struct vcpu_svm *svm = to_svm(vcpu);
@@@ -1949,7 -1958,7 +1965,7 @@@ static void svm_sync_dirty_debug_regs(s
  {
        struct vcpu_svm *svm = to_svm(vcpu);
  
-       if (vcpu->arch.guest_state_protected)
+       if (WARN_ON_ONCE(sev_es_guest(vcpu->kvm)))
                return;
  
        get_debugreg(vcpu->arch.db[0], 0);
@@@ -2510,12 -2519,13 +2526,13 @@@ static int iret_interception(struct kvm
  {
        struct vcpu_svm *svm = to_svm(vcpu);
  
+       WARN_ON_ONCE(sev_es_guest(vcpu->kvm));
        ++vcpu->stat.nmi_window_exits;
        svm->awaiting_iret_completion = true;
  
        svm_clr_iret_intercept(svm);
-       if (!sev_es_guest(vcpu->kvm))
-               svm->nmi_iret_rip = kvm_rip_read(vcpu);
+       svm->nmi_iret_rip = kvm_rip_read(vcpu);
  
        kvm_make_request(KVM_REQ_EVENT, vcpu);
        return 1;
@@@ -2680,6 -2690,13 +2697,13 @@@ static int dr_interception(struct kvm_v
        unsigned long val;
        int err = 0;
  
+       /*
+        * SEV-ES intercepts DR7 only to disable guest debugging and the guest issues a VMGEXIT
+        * for DR7 write only. KVM cannot change DR7 (always swapped as type 'A') so return early.
+        */
+       if (sev_es_guest(vcpu->kvm))
+               return 1;
        if (vcpu->guest_debug == 0) {
                /*
                 * No more DR vmexits; force a reload of the debug registers
@@@ -2802,11 -2819,19 +2826,19 @@@ static int svm_get_msr(struct kvm_vcpu 
                msr_info->data = svm->tsc_aux;
                break;
        case MSR_IA32_DEBUGCTLMSR:
+               msr_info->data = svm_get_lbr_vmcb(svm)->save.dbgctl;
+               break;
        case MSR_IA32_LASTBRANCHFROMIP:
+               msr_info->data = svm_get_lbr_vmcb(svm)->save.br_from;
+               break;
        case MSR_IA32_LASTBRANCHTOIP:
+               msr_info->data = svm_get_lbr_vmcb(svm)->save.br_to;
+               break;
        case MSR_IA32_LASTINTFROMIP:
+               msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_from;
+               break;
        case MSR_IA32_LASTINTTOIP:
-               msr_info->data = svm_get_lbr_msr(svm, msr_info->index);
+               msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_to;
                break;
        case MSR_VM_HSAVE_PA:
                msr_info->data = svm->nested.hsave_msr;
@@@ -3037,13 -3062,8 +3069,8 @@@ static int svm_set_msr(struct kvm_vcpu 
                if (data & DEBUGCTL_RESERVED_BITS)
                        return 1;
  
-               if (svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK)
-                       svm->vmcb->save.dbgctl = data;
-               else
-                       svm->vmcb01.ptr->save.dbgctl = data;
+               svm_get_lbr_vmcb(svm)->save.dbgctl = data;
                svm_update_lbrv(vcpu);
                break;
        case MSR_VM_HSAVE_PA:
                /*
@@@ -3769,6 -3789,19 +3796,19 @@@ static void svm_enable_nmi_window(struc
        if (svm_get_nmi_mask(vcpu) && !svm->awaiting_iret_completion)
                return; /* IRET will cause a vm exit */
  
+       /*
+        * SEV-ES guests are responsible for signaling when a vCPU is ready to
+        * receive a new NMI, as SEV-ES guests can't be single-stepped, i.e.
+        * KVM can't intercept and single-step IRET to detect when NMIs are
+        * unblocked (architecturally speaking).  See SVM_VMGEXIT_NMI_COMPLETE.
+        *
+        * Note, GIF is guaranteed to be '1' for SEV-ES guests as hardware
+        * ignores SEV-ES guest writes to EFER.SVME *and* CLGI/STGI are not
+        * supported NAEs in the GHCB protocol.
+        */
+       if (sev_es_guest(vcpu->kvm))
+               return;
        if (!gif_set(svm)) {
                if (vgif)
                        svm_set_intercept(svm, INTERCEPT_STGI);
@@@ -3918,12 -3951,11 +3958,11 @@@ static void svm_complete_interrupts(str
        svm->soft_int_injected = false;
  
        /*
-        * If we've made progress since setting HF_IRET_MASK, we've
+        * If we've made progress since setting awaiting_iret_completion, we've
         * executed an IRET and can allow NMI injection.
         */
        if (svm->awaiting_iret_completion &&
-           (sev_es_guest(vcpu->kvm) ||
-            kvm_rip_read(vcpu) != svm->nmi_iret_rip)) {
+           kvm_rip_read(vcpu) != svm->nmi_iret_rip) {
                svm->awaiting_iret_completion = false;
                svm->nmi_masked = false;
                kvm_make_request(KVM_REQ_EVENT, vcpu);
@@@ -3993,8 -4025,14 +4032,8 @@@ static int svm_vcpu_pre_run(struct kvm_
  
  static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
  {
 -      struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
 -
 -      /*
 -       * Note, the next RIP must be provided as SRCU isn't held, i.e. KVM
 -       * can't read guest memory (dereference memslots) to decode the WRMSR.
 -       */
 -      if (control->exit_code == SVM_EXIT_MSR && control->exit_info_1 &&
 -          nrips && control->next_rip)
 +      if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
 +          to_svm(vcpu)->vmcb->control.exit_info_1)
                return handle_fastpath_set_msr_irqoff(vcpu);
  
        return EXIT_FASTPATH_NONE;
@@@ -4006,8 -4044,6 +4045,8 @@@ static noinstr void svm_vcpu_enter_exit
  
        guest_state_enter_irqoff();
  
 +      amd_clear_divider();
 +
        if (sev_es_guest(vcpu->kvm))
                __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
        else
@@@ -4651,16 -4687,25 +4690,25 @@@ static bool svm_can_emulate_instruction
         * and cannot be decrypted by KVM, i.e. KVM would read cyphertext and
         * decode garbage.
         *
-        * Inject #UD if KVM reached this point without an instruction buffer.
-        * In practice, this path should never be hit by a well-behaved guest,
-        * e.g. KVM doesn't intercept #UD or #GP for SEV guests, but this path
-        * is still theoretically reachable, e.g. via unaccelerated fault-like
-        * AVIC access, and needs to be handled by KVM to avoid putting the
-        * guest into an infinite loop.   Injecting #UD is somewhat arbitrary,
-        * but its the least awful option given lack of insight into the guest.
+        * If KVM is NOT trying to simply skip an instruction, inject #UD if
+        * KVM reached this point without an instruction buffer.  In practice,
+        * this path should never be hit by a well-behaved guest, e.g. KVM
+        * doesn't intercept #UD or #GP for SEV guests, but this path is still
+        * theoretically reachable, e.g. via unaccelerated fault-like AVIC
+        * access, and needs to be handled by KVM to avoid putting the guest
+        * into an infinite loop.   Injecting #UD is somewhat arbitrary, but
+        * its the least awful option given lack of insight into the guest.
+        *
+        * If KVM is trying to skip an instruction, simply resume the guest.
+        * If a #NPF occurs while the guest is vectoring an INT3/INTO, then KVM
+        * will attempt to re-inject the INT3/INTO and skip the instruction.
+        * In that scenario, retrying the INT3/INTO and hoping the guest will
+        * make forward progress is the only option that has a chance of
+        * success (and in practice it will work the vast majority of the time).
         */
        if (unlikely(!insn)) {
-               kvm_queue_exception(vcpu, UD_VECTOR);
+               if (!(emul_type & EMULTYPE_SKIP))
+                       kvm_queue_exception(vcpu, UD_VECTOR);
                return false;
        }
  
@@@ -4818,7 -4863,6 +4866,7 @@@ static struct kvm_x86_ops svm_x86_ops _
        .set_segment = svm_set_segment,
        .get_cpl = svm_get_cpl,
        .get_cs_db_l_bits = svm_get_cs_db_l_bits,
 +      .is_valid_cr0 = svm_is_valid_cr0,
        .set_cr0 = svm_set_cr0,
        .post_set_cr3 = sev_post_set_cr3,
        .is_valid_cr4 = svm_is_valid_cr4,
@@@ -5112,9 -5156,11 +5160,11 @@@ static __init int svm_hardware_setup(vo
  
        svm_adjust_mmio_mask();
  
+       nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS);
        /*
         * Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
-        * may be modified by svm_adjust_mmio_mask()).
+        * may be modified by svm_adjust_mmio_mask()), as well as nrips.
         */
        sev_hardware_setup();
  
                        goto err;
        }
  
-       if (nrips) {
-               if (!boot_cpu_has(X86_FEATURE_NRIPS))
-                       nrips = false;
-       }
        enable_apicv = avic = avic && avic_hardware_setup();
  
        if (!enable_apicv) {
diff --combined arch/x86/kvm/svm/svm.h
index 8239c8de45acfd3c6117acd8dbe5c14d2b9039d7,1498956a589fe588f4cf66b7775f3c7bd376c945..ef508042a5536aedd6f45d48ab2bf3651eb9f67e
@@@ -33,6 -33,7 +33,7 @@@
  #define MSRPM_OFFSETS 32
  extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
  extern bool npt_enabled;
+ extern int nrips;
  extern int vgif;
  extern bool intercept_smi;
  extern bool x2avic_enabled;
@@@ -190,12 -191,10 +191,12 @@@ struct vcpu_sev_es_state 
        /* SEV-ES support */
        struct sev_es_save_area *vmsa;
        struct ghcb *ghcb;
 +      u8 valid_bitmap[16];
        struct kvm_host_map ghcb_map;
        bool received_first_sipi;
  
        /* SEV-ES scratch area support */
 +      u64 sw_scratch;
        void *ghcb_sa;
        u32 ghcb_sa_len;
        bool ghcb_sa_sync;
@@@ -406,48 -405,6 +407,6 @@@ static inline bool vmcb12_is_intercept(
        return test_bit(bit, (unsigned long *)&control->intercepts);
  }
  
- static inline void set_dr_intercepts(struct vcpu_svm *svm)
- {
-       struct vmcb *vmcb = svm->vmcb01.ptr;
-       if (!sev_es_guest(svm->vcpu.kvm)) {
-               vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
-               vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
-               vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
-               vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
-               vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
-               vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
-               vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
-               vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
-               vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
-               vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
-               vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
-               vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
-               vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
-               vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
-       }
-       vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
-       vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
-       recalc_intercepts(svm);
- }
- static inline void clr_dr_intercepts(struct vcpu_svm *svm)
- {
-       struct vmcb *vmcb = svm->vmcb01.ptr;
-       vmcb->control.intercepts[INTERCEPT_DR] = 0;
-       /* DR7 access must remain intercepted for an SEV-ES guest */
-       if (sev_es_guest(svm->vcpu.kvm)) {
-               vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
-               vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
-       }
-       recalc_intercepts(svm);
- }
  static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
  {
        struct vmcb *vmcb = svm->vmcb01.ptr;
@@@ -746,28 -703,4 +705,28 @@@ void sev_es_unmap_ghcb(struct vcpu_svm 
  void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
  void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
  
 +#define DEFINE_KVM_GHCB_ACCESSORS(field)                                              \
 +      static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \
 +      {                                                                       \
 +              return test_bit(GHCB_BITMAP_IDX(field),                         \
 +                              (unsigned long *)&svm->sev_es.valid_bitmap);    \
 +      }                                                                       \
 +                                                                              \
 +      static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \
 +      {                                                                       \
 +              return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0; \
 +      }                                                                       \
 +
 +DEFINE_KVM_GHCB_ACCESSORS(cpl)
 +DEFINE_KVM_GHCB_ACCESSORS(rax)
 +DEFINE_KVM_GHCB_ACCESSORS(rcx)
 +DEFINE_KVM_GHCB_ACCESSORS(rdx)
 +DEFINE_KVM_GHCB_ACCESSORS(rbx)
 +DEFINE_KVM_GHCB_ACCESSORS(rsi)
 +DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code)
 +DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1)
 +DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2)
 +DEFINE_KVM_GHCB_ACCESSORS(sw_scratch)
 +DEFINE_KVM_GHCB_ACCESSORS(xcr0)
 +
  #endif
index 1f6d904c6481dd4414d67be8e0acec46ed337c57,31c862d79fae220edf84b233a583ea4b495ceac8..798e60b5454b7e108a07550cb354a8ee27c0de8f
@@@ -14,7 -14,7 +14,7 @@@
   * Defines x86 CPU feature bits
   */
  #define NCAPINTS                      21         /* N 32-bit words worth of info */
 -#define NBUGINTS                      1          /* N 32-bit bug flags */
 +#define NBUGINTS                      2          /* N 32-bit bug flags */
  
  /*
   * Note: If the comment begins with a quoted string, that string is used
  #define X86_FEATURE_SEV_ES            (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
  #define X86_FEATURE_V_TSC_AUX         (19*32+ 9) /* "" Virtual TSC_AUX */
  #define X86_FEATURE_SME_COHERENT      (19*32+10) /* "" AMD hardware-enforced cache coherency */
+ #define X86_FEATURE_DEBUG_SWAP                (19*32+14) /* AMD SEV-ES full debug state swap support */
  
  /* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
  #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */
This page took 0.181524 seconds and 4 git commands to generate.