]> Git Repo - J-linux.git/commitdiff
x86/retpoline: Ensure default return thunk isn't used at runtime
authorJosh Poimboeuf <[email protected]>
Wed, 3 Jan 2024 18:36:26 +0000 (19:36 +0100)
committerBorislav Petkov (AMD) <[email protected]>
Mon, 12 Feb 2024 10:42:15 +0000 (11:42 +0100)
Make sure the default return thunk is not used after all return
instructions have been patched by the alternatives because the default
return thunk is insufficient when it comes to mitigating Retbleed or
SRSO.

Fix based on an earlier version by David Kaplan <[email protected]>.

  [ bp: Fix the compilation error of warn_thunk_thunk being an invisible
        symbol, hoist thunk macro into calling.h ]

Signed-off-by: Josh Poimboeuf <[email protected]>
Co-developed-by: Borislav Petkov (AMD) <[email protected]>
Signed-off-by: Borislav Petkov (AMD) <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Link: https://lore.kernel.org/r/20240104132446.GEZZaxnrIgIyat0pqf@fat_crate.local
arch/x86/entry/calling.h
arch/x86/entry/entry.S
arch/x86/entry/thunk_32.S
arch/x86/entry/thunk_64.S
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/cpu/bugs.c
arch/x86/lib/retpoline.S

index 39e069b68c6eeaa24540cb8065da32e859a5865c..bd31b253405388854e0db323cf63235af4cc57c8 100644 (file)
@@ -426,3 +426,63 @@ For 32-bit we have the following conventions - kernel is built with
 .endm
 
 #endif /* CONFIG_SMP */
+
+#ifdef CONFIG_X86_64
+
+/* rdi:        arg1 ... normal C conventions. rax is saved/restored. */
+.macro THUNK name, func
+SYM_FUNC_START(\name)
+       pushq %rbp
+       movq %rsp, %rbp
+
+       pushq %rdi
+       pushq %rsi
+       pushq %rdx
+       pushq %rcx
+       pushq %rax
+       pushq %r8
+       pushq %r9
+       pushq %r10
+       pushq %r11
+
+       call \func
+
+       popq %r11
+       popq %r10
+       popq %r9
+       popq %r8
+       popq %rax
+       popq %rcx
+       popq %rdx
+       popq %rsi
+       popq %rdi
+       popq %rbp
+       RET
+SYM_FUNC_END(\name)
+       _ASM_NOKPROBE(\name)
+.endm
+
+#else /* CONFIG_X86_32 */
+
+/* put return address in eax (arg1) */
+.macro THUNK name, func, put_ret_addr_in_eax=0
+SYM_CODE_START_NOALIGN(\name)
+       pushl %eax
+       pushl %ecx
+       pushl %edx
+
+       .if \put_ret_addr_in_eax
+       /* Place EIP in the arg1 */
+       movl 3*4(%esp), %eax
+       .endif
+
+       call \func
+       popl %edx
+       popl %ecx
+       popl %eax
+       RET
+       _ASM_NOKPROBE(\name)
+SYM_CODE_END(\name)
+       .endm
+
+#endif
index 8c8d38f0cb1df0ee959e09c9f912ec1ab2afce40..582731f74dc87d497ddddb408c920abb9c8c4e72 100644 (file)
@@ -7,6 +7,8 @@
 #include <linux/linkage.h>
 #include <asm/msr-index.h>
 
+#include "calling.h"
+
 .pushsection .noinstr.text, "ax"
 
 SYM_FUNC_START(entry_ibpb)
@@ -20,3 +22,5 @@ SYM_FUNC_END(entry_ibpb)
 EXPORT_SYMBOL_GPL(entry_ibpb);
 
 .popsection
+
+THUNK warn_thunk_thunk, __warn_thunk
index 0103e103a6573adab040c4e41b588edcdf7a7ecd..da37f42f45498d8282f2ea805d04eec1bfbfaa44 100644 (file)
@@ -4,33 +4,15 @@
  * Copyright 2008 by Steven Rostedt, Red Hat, Inc
  *  (inspired by Andi Kleen's thunk_64.S)
  */
-       #include <linux/export.h>
-       #include <linux/linkage.h>
-       #include <asm/asm.h>
 
-       /* put return address in eax (arg1) */
-       .macro THUNK name, func, put_ret_addr_in_eax=0
-SYM_CODE_START_NOALIGN(\name)
-       pushl %eax
-       pushl %ecx
-       pushl %edx
+#include <linux/export.h>
+#include <linux/linkage.h>
+#include <asm/asm.h>
 
-       .if \put_ret_addr_in_eax
-       /* Place EIP in the arg1 */
-       movl 3*4(%esp), %eax
-       .endif
+#include "calling.h"
 
-       call \func
-       popl %edx
-       popl %ecx
-       popl %eax
-       RET
-       _ASM_NOKPROBE(\name)
-SYM_CODE_END(\name)
-       .endm
-
-       THUNK preempt_schedule_thunk, preempt_schedule
-       THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
-       EXPORT_SYMBOL(preempt_schedule_thunk)
-       EXPORT_SYMBOL(preempt_schedule_notrace_thunk)
+THUNK preempt_schedule_thunk, preempt_schedule
+THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
+EXPORT_SYMBOL(preempt_schedule_thunk)
+EXPORT_SYMBOL(preempt_schedule_notrace_thunk)
 
index 416b400f39dbb6b16694a6e76b22bb03e8834bc5..119ebdc3d362398092736e7132f16c0c89e97c5a 100644 (file)
@@ -9,39 +9,6 @@
 #include "calling.h"
 #include <asm/asm.h>
 
-       /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
-       .macro THUNK name, func
-SYM_FUNC_START(\name)
-       pushq %rbp
-       movq %rsp, %rbp
-
-       pushq %rdi
-       pushq %rsi
-       pushq %rdx
-       pushq %rcx
-       pushq %rax
-       pushq %r8
-       pushq %r9
-       pushq %r10
-       pushq %r11
-
-       call \func
-
-       popq %r11
-       popq %r10
-       popq %r9
-       popq %r8
-       popq %rax
-       popq %rcx
-       popq %rdx
-       popq %rsi
-       popq %rdi
-       popq %rbp
-       RET
-SYM_FUNC_END(\name)
-       _ASM_NOKPROBE(\name)
-       .endm
-
 THUNK preempt_schedule_thunk, preempt_schedule
 THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
 EXPORT_SYMBOL(preempt_schedule_thunk)
index 2c0679ebe9146a32e66bb69f366edeef404c7f26..55754617eaee88406217a0af6bff5b6c3a5cd97c 100644 (file)
@@ -357,6 +357,8 @@ extern void entry_ibpb(void);
 
 extern void (*x86_return_thunk)(void);
 
+extern void __warn_thunk(void);
+
 #ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
 extern void call_depth_return_thunk(void);
 
index f2775417bda26a949699f85d8652a764a7d66f66..a78892b0f82312211787841b18068551f3f37adc 100644 (file)
@@ -2850,3 +2850,8 @@ ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *bu
        return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
 }
 #endif
+
+void __warn_thunk(void)
+{
+       WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n");
+}
index 0045153ba2224f40623c07d7960c4353879ab6cf..721b528da9acee3e4eb2168bbd65ab7303a4db3a 100644 (file)
@@ -369,19 +369,16 @@ SYM_FUNC_END(call_depth_return_thunk)
  * 'JMP __x86_return_thunk' sites are changed to something else by
  * apply_returns().
  *
- * This should be converted eventually to call a warning function which
- * should scream loudly when the default return thunk is called after
- * alternatives have been applied.
- *
- * That warning function cannot BUG() because the bug splat cannot be
- * displayed in all possible configurations, leading to users not really
- * knowing why the machine froze.
+ * The ALTERNATIVE below adds a really loud warning to catch the case
+ * where the insufficient default return thunk ends up getting used for
+ * whatever reason like miscompilation or failure of
+ * objtool/alternatives/etc to patch all the return sites.
  */
 SYM_CODE_START(__x86_return_thunk)
        UNWIND_HINT_FUNC
        ANNOTATE_NOENDBR
-       ANNOTATE_UNRET_SAFE
-       ret
+       ALTERNATIVE __stringify(ANNOTATE_UNRET_SAFE; ret), \
+                  "jmp warn_thunk_thunk", X86_FEATURE_ALWAYS
        int3
 SYM_CODE_END(__x86_return_thunk)
 EXPORT_SYMBOL(__x86_return_thunk)
This page took 0.066629 seconds and 4 git commands to generate.