]> Git Repo - linux.git/commitdiff
x86/kvm/vmx: Move guest enter/exit into .noinstr.text
authorThomas Gleixner <[email protected]>
Wed, 8 Jul 2020 19:51:57 +0000 (21:51 +0200)
committerPaolo Bonzini <[email protected]>
Thu, 9 Jul 2020 11:08:40 +0000 (07:08 -0400)
Move the functions which are inside the RCU off region into the
non-instrumentable text section.

Signed-off-by: Thomas Gleixner <[email protected]>
Reviewed-by: Alexandre Chartre <[email protected]>
Acked-by: Peter Zijlstra <[email protected]>
Acked-by: Paolo Bonzini <[email protected]>
Message-Id: <20200708195322.037311579@linutronix.de>
Signed-off-by: Paolo Bonzini <[email protected]>
arch/x86/include/asm/hardirq.h
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/vmx/ops.h
arch/x86/kvm/vmx/vmenter.S
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index 07533795b8d2aa3e54fe15797bfb5017927a4f03..275e7fd20310f7a237d2d22f790a2c0928c170a9 100644 (file)
@@ -67,12 +67,12 @@ static inline void kvm_set_cpu_l1tf_flush_l1d(void)
        __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1);
 }
 
-static inline void kvm_clear_cpu_l1tf_flush_l1d(void)
+static __always_inline void kvm_clear_cpu_l1tf_flush_l1d(void)
 {
        __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 0);
 }
 
-static inline bool kvm_get_cpu_l1tf_flush_l1d(void)
+static __always_inline bool kvm_get_cpu_l1tf_flush_l1d(void)
 {
        return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d);
 }
index 86c719d2b75519f292621c0508a97517e84da232..3d7d818a282cb480252094049894034ba8f1d3c8 100644 (file)
@@ -1598,7 +1598,15 @@ asmlinkage void kvm_spurious_fault(void);
        insn "\n\t"                                                     \
        "jmp    668f \n\t"                                              \
        "667: \n\t"                                                     \
+       "1: \n\t"                                                       \
+       ".pushsection .discard.instr_begin \n\t"                        \
+       ".long 1b - . \n\t"                                             \
+       ".popsection \n\t"                                              \
        "call   kvm_spurious_fault \n\t"                                \
+       "1: \n\t"                                                       \
+       ".pushsection .discard.instr_end \n\t"                          \
+       ".long 1b - . \n\t"                                             \
+       ".popsection \n\t"                                              \
        "668: \n\t"                                                     \
        _ASM_EXTABLE(666b, 667b)
 
index 5f1ac002b4b676232a326e993be25e75b59238e8..692b0c31c9c82d1bbf1f48b7e4c5362be543720b 100644 (file)
@@ -146,7 +146,9 @@ do {                                                                        \
                          : : op1 : "cc" : error, fault);               \
        return;                                                         \
 error:                                                                 \
+       instrumentation_begin();                                        \
        insn##_error(error_args);                                       \
+       instrumentation_end();                                          \
        return;                                                         \
 fault:                                                                 \
        kvm_spurious_fault();                                           \
@@ -161,7 +163,9 @@ do {                                                                        \
                          : : op1, op2 : "cc" : error, fault);          \
        return;                                                         \
 error:                                                                 \
+       instrumentation_begin();                                        \
        insn##_error(error_args);                                       \
+       instrumentation_end();                                          \
        return;                                                         \
 fault:                                                                 \
        kvm_spurious_fault();                                           \
index e0a182cb3cdddd662af694ce06544d5c01b0ae32..799db084a336b40e126f03f0003e28373e153ac9 100644 (file)
@@ -27,7 +27,7 @@
 #define VCPU_R15       __VCPU_REGS_R15 * WORD_SIZE
 #endif
 
-       .text
+.section .noinstr.text, "ax"
 
 /**
  * vmx_vmenter - VM-Enter the current loaded VMCS
@@ -234,6 +234,9 @@ SYM_FUNC_START(__vmx_vcpu_run)
        jmp 1b
 SYM_FUNC_END(__vmx_vcpu_run)
 
+
+.section .text, "ax"
+
 /**
  * vmread_error_trampoline - Trampoline from inline asm to vmread_error()
  * @field:     VMCS field encoding that failed
index 421cff3aa5b5e7a3217504d3e148674b744fe1e8..e71a3d9827810e63b8a9c0e01a799f2c1132ceae 100644 (file)
@@ -6116,7 +6116,7 @@ unexpected_vmexit:
  * information but as all relevant affected CPUs have 32KiB L1D cache size
  * there is no point in doing so.
  */
-static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
+static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
 {
        int size = PAGE_SIZE << L1D_CACHE_ORDER;
 
@@ -6149,7 +6149,7 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
        vcpu->stat.l1d_flush++;
 
        if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
-               wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
+               native_wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
                return;
        }
 
@@ -6635,7 +6635,7 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
        }
 }
 
-void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
+void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
 {
        if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) {
                vmx->loaded_vmcs->host_state.rsp = host_rsp;
@@ -6657,6 +6657,63 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
 
 bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
 
+static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
+                                       struct vcpu_vmx *vmx)
+{
+       /*
+        * VMENTER enables interrupts (host state), but the kernel state is
+        * interrupts disabled when this is invoked. Also tell RCU about
+        * it. This is the same logic as for exit_to_user_mode().
+        *
+        * This ensures that e.g. latency analysis on the host observes
+        * guest mode as interrupt enabled.
+        *
+        * guest_enter_irqoff() informs context tracking about the
+        * transition to guest mode and if enabled adjusts RCU state
+        * accordingly.
+        */
+       instrumentation_begin();
+       trace_hardirqs_on_prepare();
+       lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+       instrumentation_end();
+
+       guest_enter_irqoff();
+       lockdep_hardirqs_on(CALLER_ADDR0);
+
+       /* L1D Flush includes CPU buffer clear to mitigate MDS */
+       if (static_branch_unlikely(&vmx_l1d_should_flush))
+               vmx_l1d_flush(vcpu);
+       else if (static_branch_unlikely(&mds_user_clear))
+               mds_clear_cpu_buffers();
+
+       if (vcpu->arch.cr2 != read_cr2())
+               write_cr2(vcpu->arch.cr2);
+
+       vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
+                                  vmx->loaded_vmcs->launched);
+
+       vcpu->arch.cr2 = read_cr2();
+
+       /*
+        * VMEXIT disables interrupts (host state), but tracing and lockdep
+        * have them in state 'on' as recorded before entering guest mode.
+        * Same as enter_from_user_mode().
+        *
+        * guest_exit_irqoff() restores host context and reinstates RCU if
+        * enabled and required.
+        *
+        * This needs to be done before the below as native_read_msr()
+        * contains a tracepoint and x86_spec_ctrl_restore_host() calls
+        * into world and some more.
+        */
+       lockdep_hardirqs_off(CALLER_ADDR0);
+       guest_exit_irqoff();
+
+       instrumentation_begin();
+       trace_hardirqs_off_finish();
+       instrumentation_end();
+}
+
 static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
 {
        fastpath_t exit_fastpath;
@@ -6731,52 +6788,8 @@ reenter_guest:
         */
        x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
 
-       /*
-        * VMENTER enables interrupts (host state), but the kernel state is
-        * interrupts disabled when this is invoked. Also tell RCU about
-        * it. This is the same logic as for exit_to_user_mode().
-        *
-        * This ensures that e.g. latency analysis on the host observes
-        * guest mode as interrupt enabled.
-        *
-        * guest_enter_irqoff() informs context tracking about the
-        * transition to guest mode and if enabled adjusts RCU state
-        * accordingly.
-        */
-       trace_hardirqs_on_prepare();
-       lockdep_hardirqs_on_prepare(CALLER_ADDR0);
-       guest_enter_irqoff();
-       lockdep_hardirqs_on(CALLER_ADDR0);
-
-       /* L1D Flush includes CPU buffer clear to mitigate MDS */
-       if (static_branch_unlikely(&vmx_l1d_should_flush))
-               vmx_l1d_flush(vcpu);
-       else if (static_branch_unlikely(&mds_user_clear))
-               mds_clear_cpu_buffers();
-
-       if (vcpu->arch.cr2 != read_cr2())
-               write_cr2(vcpu->arch.cr2);
-
-       vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
-                                  vmx->loaded_vmcs->launched);
-
-       vcpu->arch.cr2 = read_cr2();
-
-       /*
-        * VMEXIT disables interrupts (host state), but tracing and lockdep
-        * have them in state 'on' as recorded before entering guest mode.
-        * Same as enter_from_user_mode().
-        *
-        * guest_exit_irqoff() restores host context and reinstates RCU if
-        * enabled and required.
-        *
-        * This needs to be done before the below as native_read_msr()
-        * contains a tracepoint and x86_spec_ctrl_restore_host() calls
-        * into world and some more.
-        */
-       lockdep_hardirqs_off(CALLER_ADDR0);
-       guest_exit_irqoff();
-       trace_hardirqs_off_finish();
+       /* The actual VMENTER/EXIT is in the .noinstr.text section. */
+       vmx_vcpu_enter_exit(vcpu, vmx);
 
        /*
         * We do not use IBRS in the kernel. If this vCPU has used the
index d7d82b3c0e4ce3bb342055f2320c2be8c1f4c4a3..e27d3db7e43f67e071e16306d6e8d694ea3cfb9f 100644 (file)
@@ -402,7 +402,7 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 }
 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
 
-asmlinkage __visible void kvm_spurious_fault(void)
+asmlinkage __visible noinstr void kvm_spurious_fault(void)
 {
        /* Fault while not rebooting.  We want the trace. */
        BUG_ON(!kvm_rebooting);
This page took 0.112591 seconds and 4 git commands to generate.