]> Git Repo - J-linux.git/commitdiff
Merge tag 'kvm-x86-svm-6.12' of https://github.com/kvm-x86/linux into HEAD
authorPaolo Bonzini <[email protected]>
Sat, 14 Sep 2024 13:56:04 +0000 (09:56 -0400)
committerPaolo Bonzini <[email protected]>
Tue, 17 Sep 2024 16:41:13 +0000 (12:41 -0400)
KVM SVM changes for 6.12:

 - Don't stuff the RSB after VM-Exit when RETPOLINE=y and AutoIBRS is enabled,
   i.e. when the CPU has already flushed the RSB.

 - Trace the per-CPU host save area as a VMCB pointer to improve readability
   and cleanup the retrieval of the SEV-ES host save area.

 - Remove unnecessary accounting of temporary nested VMCB related allocations.

1  2 
arch/x86/kvm/svm/svm.c

diff --combined arch/x86/kvm/svm/svm.c
index c1062e8c3f50999a4a0d1999b880c3b5643c6f8a,cac9e36960cad1d40c3baf28e996e8ac93d4500f..9df3e1e5ae81a1346409632edd693cb7e0740f72
@@@ -573,7 -573,7 +573,7 @@@ static void __svm_write_tsc_multiplier(
  
  static __always_inline struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
  {
-       return page_address(sd->save_area) + 0x400;
+       return &sd->save_area->host_sev_es_save;
  }
  
  static inline void kvm_cpu_svm_disable(void)
        }
  }
  
 -static void svm_emergency_disable(void)
 +static void svm_emergency_disable_virtualization_cpu(void)
  {
        kvm_rebooting = true;
  
        kvm_cpu_svm_disable();
  }
  
 -static void svm_hardware_disable(void)
 +static void svm_disable_virtualization_cpu(void)
  {
        /* Make sure we clean up behind us */
        if (tsc_scaling)
        amd_pmu_disable_virt();
  }
  
 -static int svm_hardware_enable(void)
 +static int svm_enable_virtualization_cpu(void)
  {
  
        struct svm_cpu_data *sd;
@@@ -696,7 -696,7 +696,7 @@@ static void svm_cpu_uninit(int cpu
                return;
  
        kfree(sd->sev_vmcbs);
-       __free_page(sd->save_area);
+       __free_page(__sme_pa_to_page(sd->save_area_pa));
        sd->save_area_pa = 0;
        sd->save_area = NULL;
  }
  static int svm_cpu_init(int cpu)
  {
        struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
+       struct page *save_area_page;
        int ret = -ENOMEM;
  
        memset(sd, 0, sizeof(struct svm_cpu_data));
-       sd->save_area = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL);
-       if (!sd->save_area)
+       save_area_page = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL);
+       if (!save_area_page)
                return ret;
  
        ret = sev_cpu_init(sd);
        if (ret)
                goto free_save_area;
  
-       sd->save_area_pa = __sme_page_pa(sd->save_area);
+       sd->save_area = page_address(save_area_page);
+       sd->save_area_pa = __sme_page_pa(save_area_page);
        return 0;
  
  free_save_area:
-       __free_page(sd->save_area);
-       sd->save_area = NULL;
+       __free_page(save_area_page);
        return ret;
  
  }
@@@ -1124,8 -1125,7 +1125,7 @@@ static void svm_hardware_unsetup(void
        for_each_possible_cpu(cpu)
                svm_cpu_uninit(cpu);
  
-       __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT),
-       get_order(IOPM_SIZE));
+       __free_pages(__sme_pa_to_page(iopm_base), get_order(IOPM_SIZE));
        iopm_base = 0;
  }
  
@@@ -1301,7 -1301,7 +1301,7 @@@ static void init_vmcb(struct kvm_vcpu *
        if (!kvm_hlt_in_guest(vcpu->kvm))
                svm_set_intercept(svm, INTERCEPT_HLT);
  
-       control->iopm_base_pa = __sme_set(iopm_base);
+       control->iopm_base_pa = iopm_base;
        control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
        control->int_ctl = V_INTR_MASKING_MASK;
  
@@@ -1503,7 -1503,7 +1503,7 @@@ static void svm_vcpu_free(struct kvm_vc
  
        sev_free_vcpu(vcpu);
  
-       __free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT));
+       __free_page(__sme_pa_to_page(svm->vmcb01.pa));
        __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
  }
  
@@@ -1533,7 -1533,7 +1533,7 @@@ static void svm_prepare_switch_to_guest
         * TSC_AUX is always virtualized for SEV-ES guests when the feature is
         * available. The user return MSR support is not required in this case
         * because TSC_AUX is restored on #VMEXIT from the host save area
 -       * (which has been initialized in svm_hardware_enable()).
 +       * (which has been initialized in svm_enable_virtualization_cpu()).
         */
        if (likely(tsc_aux_uret_slot >= 0) &&
            (!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm)))
@@@ -2825,17 -2825,17 +2825,17 @@@ static int efer_trap(struct kvm_vcpu *v
        return kvm_complete_insn_gp(vcpu, ret);
  }
  
 -static int svm_get_msr_feature(struct kvm_msr_entry *msr)
 +static int svm_get_feature_msr(u32 msr, u64 *data)
  {
 -      msr->data = 0;
 +      *data = 0;
  
 -      switch (msr->index) {
 +      switch (msr) {
        case MSR_AMD64_DE_CFG:
                if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
 -                      msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
 +                      *data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
                break;
        default:
 -              return KVM_MSR_RET_INVALID;
 +              return KVM_MSR_RET_UNSUPPORTED;
        }
  
        return 0;
@@@ -2876,12 -2876,6 +2876,12 @@@ static int svm_get_msr(struct kvm_vcpu 
        case MSR_CSTAR:
                msr_info->data = svm->vmcb01.ptr->save.cstar;
                break;
 +      case MSR_GS_BASE:
 +              msr_info->data = svm->vmcb01.ptr->save.gs.base;
 +              break;
 +      case MSR_FS_BASE:
 +              msr_info->data = svm->vmcb01.ptr->save.fs.base;
 +              break;
        case MSR_KERNEL_GS_BASE:
                msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base;
                break;
@@@ -3107,12 -3101,6 +3107,12 @@@ static int svm_set_msr(struct kvm_vcpu 
        case MSR_CSTAR:
                svm->vmcb01.ptr->save.cstar = data;
                break;
 +      case MSR_GS_BASE:
 +              svm->vmcb01.ptr->save.gs.base = data;
 +              break;
 +      case MSR_FS_BASE:
 +              svm->vmcb01.ptr->save.fs.base = data;
 +              break;
        case MSR_KERNEL_GS_BASE:
                svm->vmcb01.ptr->save.kernel_gs_base = data;
                break;
                 * feature is available. The user return MSR support is not
                 * required in this case because TSC_AUX is restored on #VMEXIT
                 * from the host save area (which has been initialized in
 -               * svm_hardware_enable()).
 +               * svm_enable_virtualization_cpu()).
                 */
                if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) && sev_es_guest(vcpu->kvm))
                        break;
                kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
                break;
        case MSR_AMD64_DE_CFG: {
 -              struct kvm_msr_entry msr_entry;
 +              u64 supported_de_cfg;
  
 -              msr_entry.index = msr->index;
 -              if (svm_get_msr_feature(&msr_entry))
 +              if (svm_get_feature_msr(ecx, &supported_de_cfg))
                        return 1;
  
 -              /* Check the supported bits */
 -              if (data & ~msr_entry.data)
 +              if (data & ~supported_de_cfg)
                        return 1;
  
 -              /* Don't allow the guest to change a bit, #GP */
 -              if (!msr->host_initiated && (data ^ msr_entry.data))
 +              /*
 +               * Don't let the guest change the host-programmed value.  The
 +               * MSR is very model specific, i.e. contains multiple bits that
 +               * are completely unknown to KVM, and the one bit known to KVM
 +               * is simply a reflection of hardware capabilities.
 +               */
 +              if (!msr->host_initiated && data != svm->msr_decfg)
                        return 1;
  
                svm->msr_decfg = data;
@@@ -4159,21 -4144,12 +4159,21 @@@ static int svm_vcpu_pre_run(struct kvm_
  
  static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
  {
 +      struct vcpu_svm *svm = to_svm(vcpu);
 +
        if (is_guest_mode(vcpu))
                return EXIT_FASTPATH_NONE;
  
 -      if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
 -          to_svm(vcpu)->vmcb->control.exit_info_1)
 +      switch (svm->vmcb->control.exit_code) {
 +      case SVM_EXIT_MSR:
 +              if (!svm->vmcb->control.exit_info_1)
 +                      break;
                return handle_fastpath_set_msr_irqoff(vcpu);
 +      case SVM_EXIT_HLT:
 +              return handle_fastpath_hlt(vcpu);
 +      default:
 +              break;
 +      }
  
        return EXIT_FASTPATH_NONE;
  }
@@@ -5004,9 -4980,8 +5004,9 @@@ static struct kvm_x86_ops svm_x86_ops _
        .check_processor_compatibility = svm_check_processor_compat,
  
        .hardware_unsetup = svm_hardware_unsetup,
 -      .hardware_enable = svm_hardware_enable,
 -      .hardware_disable = svm_hardware_disable,
 +      .enable_virtualization_cpu = svm_enable_virtualization_cpu,
 +      .disable_virtualization_cpu = svm_disable_virtualization_cpu,
 +      .emergency_disable_virtualization_cpu = svm_emergency_disable_virtualization_cpu,
        .has_emulated_msr = svm_has_emulated_msr,
  
        .vcpu_create = svm_vcpu_create,
        .vcpu_unblocking = avic_vcpu_unblocking,
  
        .update_exception_bitmap = svm_update_exception_bitmap,
 -      .get_msr_feature = svm_get_msr_feature,
 +      .get_feature_msr = svm_get_feature_msr,
        .get_msr = svm_get_msr,
        .set_msr = svm_set_msr,
        .get_segment_base = svm_get_segment_base,
        .enable_nmi_window = svm_enable_nmi_window,
        .enable_irq_window = svm_enable_irq_window,
        .update_cr8_intercept = svm_update_cr8_intercept,
 +
 +      .x2apic_icr_is_split = true,
        .set_virtual_apic_mode = avic_refresh_virtual_apic_mode,
        .refresh_apicv_exec_ctrl = avic_refresh_apicv_exec_ctrl,
        .apicv_post_state_restore = avic_apicv_post_state_restore,
@@@ -5251,9 -5224,6 +5251,9 @@@ static __init void svm_set_cpu_caps(voi
  
        /* CPUID 0x8000001F (SME/SEV features) */
        sev_set_cpu_caps();
 +
 +      /* Don't advertise Bus Lock Detect to guest if SVM support is absent */
 +      kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT);
  }
  
  static __init int svm_hardware_setup(void)
  
        iopm_va = page_address(iopm_pages);
        memset(iopm_va, 0xff, PAGE_SIZE * (1 << order));
-       iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
+       iopm_base = __sme_page_pa(iopm_pages);
  
        init_msrpm_offsets();
  
@@@ -5440,6 -5410,8 +5440,6 @@@ static struct kvm_x86_init_ops svm_init
  static void __svm_exit(void)
  {
        kvm_x86_vendor_exit();
 -
 -      cpu_emergency_unregister_virt_callback(svm_emergency_disable);
  }
  
  static int __init svm_init(void)
        if (r)
                return r;
  
 -      cpu_emergency_register_virt_callback(svm_emergency_disable);
 -
        /*
         * Common KVM initialization _must_ come last, after this, /dev/kvm is
         * exposed to userspace!
This page took 0.097107 seconds and 4 git commands to generate.