]> Git Repo - J-linux.git/commitdiff
Merge tag 'kvm-x86-svm-6.5' of https://github.com/kvm-x86/linux into HEAD
authorPaolo Bonzini <[email protected]>
Sat, 1 Jul 2023 11:19:42 +0000 (07:19 -0400)
committerPaolo Bonzini <[email protected]>
Sat, 1 Jul 2023 11:19:42 +0000 (07:19 -0400)
KVM SVM changes for 6.5:

 - Drop manual TR/TSS load after VM-Exit now that KVM uses VMLOAD for host state

 - Fix a not-yet-problematic missing call to trace_kvm_exit() for VM-Exits that
   are handled in the fastpath

 - Print more descriptive information about the status of SEV and SEV-ES during
   module load

 - Assert that misc_cg_set_capacity() doesn't fail to avoid should-be-impossible
   memory leaks

1  2 
arch/x86/kvm/svm/svm.c

diff --combined arch/x86/kvm/svm/svm.c
index c75497f67593fada5ea71f3db5467db44ee0c52a,3a29273d070e0711cba144a394e7d6388e92024e..d381ad4245542c2d30805433fe2a381f2f5cddf0
@@@ -244,15 -244,6 +244,6 @@@ static u8 rsm_ins_bytes[] = "\x0f\xaa"
  
  static unsigned long iopm_base;
  
- struct kvm_ldttss_desc {
-       u16 limit0;
-       u16 base0;
-       unsigned base1:8, type:5, dpl:2, p:1;
-       unsigned limit1:4, zero0:3, g:1, base2:8;
-       u32 base3;
-       u32 zero1;
- } __attribute__((packed));
  DEFINE_PER_CPU(struct svm_cpu_data, svm_data);
  
  /*
@@@ -588,7 -579,6 +579,6 @@@ static int svm_hardware_enable(void
  
        struct svm_cpu_data *sd;
        uint64_t efer;
-       struct desc_struct *gdt;
        int me = raw_smp_processor_id();
  
        rdmsrl(MSR_EFER, efer);
        sd->next_asid = sd->max_asid + 1;
        sd->min_asid = max_sev_asid + 1;
  
-       gdt = get_current_gdt_rw();
-       sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
        wrmsrl(MSR_EFER, efer | EFER_SVME);
  
        wrmsrl(MSR_VM_HSAVE_PA, sd->save_area_pa);
@@@ -752,7 -739,7 +739,7 @@@ static bool msr_write_intercepted(struc
  
        BUG_ON(offset == MSR_INVALID);
  
 -      return !!test_bit(bit_write,  &tmp);
 +      return test_bit(bit_write, &tmp);
  }
  
  static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
@@@ -2939,10 -2926,9 +2926,10 @@@ static int svm_set_msr(struct kvm_vcpu 
  
                break;
        case MSR_IA32_CR_PAT:
 -              if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
 -                      return 1;
 -              vcpu->arch.pat = data;
 +              ret = kvm_set_msr_common(vcpu, msr);
 +              if (ret)
 +                      break;
 +
                svm->vmcb01.ptr->save.g_pat = data;
                if (is_guest_mode(vcpu))
                        nested_vmcb02_compute_g_pat(svm);
@@@ -3419,8 -3405,6 +3406,6 @@@ static int svm_handle_exit(struct kvm_v
        struct kvm_run *kvm_run = vcpu->run;
        u32 exit_code = svm->vmcb->control.exit_code;
  
-       trace_kvm_exit(vcpu, KVM_ISA_SVM);
        /* SEV-ES guests must use the CR write traps to track CR registers. */
        if (!sev_es_guest(vcpu->kvm)) {
                if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE))
        return svm_invoke_exit_handler(vcpu, exit_code);
  }
  
- static void reload_tss(struct kvm_vcpu *vcpu)
- {
-       struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
-       sd->tss_desc->type = 9; /* available 32/64-bit TSS */
-       load_TR_desc();
- }
  static void pre_svm_run(struct kvm_vcpu *vcpu)
  {
        struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
@@@ -3511,7 -3487,7 +3488,7 @@@ static bool svm_is_vnmi_pending(struct 
        if (!is_vnmi_enabled(svm))
                return false;
  
 -      return !!(svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK);
 +      return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK);
  }
  
  static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu)
@@@ -4100,9 -4076,6 +4077,6 @@@ static __no_kcsan fastpath_t svm_vcpu_r
  
        svm_vcpu_enter_exit(vcpu, spec_ctrl_intercepted);
  
-       if (!sev_es_guest(vcpu->kvm))
-               reload_tss(vcpu);
        if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
                x86_spec_ctrl_restore_host(svm->virt_spec_ctrl);
  
                     SVM_EXIT_EXCP_BASE + MC_VECTOR))
                svm_handle_mce(vcpu);
  
+       trace_kvm_exit(vcpu, KVM_ISA_SVM);
        svm_complete_interrupts(vcpu);
  
        if (is_guest_mode(vcpu))
@@@ -5026,22 -5001,9 +5002,22 @@@ static __init void svm_set_cpu_caps(voi
            boot_cpu_has(X86_FEATURE_AMD_SSBD))
                kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
  
 -      /* AMD PMU PERFCTR_CORE CPUID */
 -      if (enable_pmu && boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
 -              kvm_cpu_cap_set(X86_FEATURE_PERFCTR_CORE);
 +      if (enable_pmu) {
 +              /*
 +               * Enumerate support for PERFCTR_CORE if and only if KVM has
 +               * access to enough counters to virtualize "core" support,
 +               * otherwise limit vPMU support to the legacy number of counters.
 +               */
 +              if (kvm_pmu_cap.num_counters_gp < AMD64_NUM_COUNTERS_CORE)
 +                      kvm_pmu_cap.num_counters_gp = min(AMD64_NUM_COUNTERS,
 +                                                        kvm_pmu_cap.num_counters_gp);
 +              else
 +                      kvm_cpu_cap_check_and_set(X86_FEATURE_PERFCTR_CORE);
 +
 +              if (kvm_pmu_cap.version != 2 ||
 +                  !kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE))
 +                      kvm_cpu_cap_clear(X86_FEATURE_PERFMON_V2);
 +      }
  
        /* CPUID 0x8000001F (SME/SEV features) */
        sev_set_cpu_caps();
This page took 0.067791 seconds and 4 git commands to generate.