]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* |
3 | * linux/arch/x86_64/entry.S | |
4 | * | |
5 | * Copyright (C) 1991, 1992 Linus Torvalds | |
6 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs | |
7 | * Copyright (C) 2000 Pavel Machek <[email protected]> | |
4d732138 | 8 | * |
1da177e4 LT |
9 | * entry.S contains the system-call and fault low-level handling routines. |
10 | * | |
cb1aaebe | 11 | * Some of this is documented in Documentation/x86/entry_64.rst |
8b4777a4 | 12 | * |
0bd7b798 | 13 | * A note on terminology: |
4d732138 IM |
14 | * - iret frame: Architecture defined interrupt frame from SS to RIP |
15 | * at the top of the kernel process stack. | |
2e91a17b AK |
16 | * |
17 | * Some macro usage: | |
6dcc5627 | 18 | * - SYM_FUNC_START/END:Define functions in the symbol table. |
4d732138 IM |
19 | * - TRACE_IRQ_*: Trace hardirq state for lock debugging. |
20 | * - idtentry: Define exception entry points. | |
1da177e4 | 21 | */ |
1da177e4 LT |
22 | #include <linux/linkage.h> |
23 | #include <asm/segment.h> | |
1da177e4 LT |
24 | #include <asm/cache.h> |
25 | #include <asm/errno.h> | |
e2d5df93 | 26 | #include <asm/asm-offsets.h> |
1da177e4 LT |
27 | #include <asm/msr.h> |
28 | #include <asm/unistd.h> | |
29 | #include <asm/thread_info.h> | |
30 | #include <asm/hw_irq.h> | |
0341c14d | 31 | #include <asm/page_types.h> |
2601e64d | 32 | #include <asm/irqflags.h> |
72fe4858 | 33 | #include <asm/paravirt.h> |
9939ddaf | 34 | #include <asm/percpu.h> |
d7abc0fa | 35 | #include <asm/asm.h> |
63bcff2a | 36 | #include <asm/smap.h> |
3891a04a | 37 | #include <asm/pgtable_types.h> |
784d5699 | 38 | #include <asm/export.h> |
8c1f7558 | 39 | #include <asm/frame.h> |
2641f08b | 40 | #include <asm/nospec-branch.h> |
d7e7528b | 41 | #include <linux/err.h> |
1da177e4 | 42 | |
6fd166aa PZ |
43 | #include "calling.h" |
44 | ||
4d732138 IM |
45 | .code64 |
46 | .section .entry.text, "ax" | |
16444a8a | 47 | |
72fe4858 | 48 | #ifdef CONFIG_PARAVIRT |
bc7b11c0 | 49 | SYM_CODE_START(native_usergs_sysret64) |
8c1f7558 | 50 | UNWIND_HINT_EMPTY |
72fe4858 GOC |
51 | swapgs |
52 | sysretq | |
bc7b11c0 | 53 | SYM_CODE_END(native_usergs_sysret64) |
72fe4858 GOC |
54 | #endif /* CONFIG_PARAVIRT */ |
55 | ||
ca37e57b | 56 | .macro TRACE_IRQS_FLAGS flags:req |
2601e64d | 57 | #ifdef CONFIG_TRACE_IRQFLAGS |
a368d7fd | 58 | btl $9, \flags /* interrupts off? */ |
4d732138 | 59 | jnc 1f |
2601e64d IM |
60 | TRACE_IRQS_ON |
61 | 1: | |
62 | #endif | |
63 | .endm | |
64 | ||
ca37e57b AL |
65 | .macro TRACE_IRQS_IRETQ |
66 | TRACE_IRQS_FLAGS EFLAGS(%rsp) | |
67 | .endm | |
68 | ||
5963e317 SR |
69 | /* |
70 | * When dynamic function tracer is enabled it will add a breakpoint | |
71 | * to all locations that it is about to modify, sync CPUs, update | |
72 | * all the code, sync CPUs, then remove the breakpoints. In this time | |
73 | * if lockdep is enabled, it might jump back into the debug handler | |
74 | * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). | |
75 | * | |
76 | * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to | |
77 | * make sure the stack pointer does not get reset back to the top | |
78 | * of the debug stack, and instead just reuses the current stack. | |
79 | */ | |
80 | #if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) | |
81 | ||
82 | .macro TRACE_IRQS_OFF_DEBUG | |
4d732138 | 83 | call debug_stack_set_zero |
5963e317 | 84 | TRACE_IRQS_OFF |
4d732138 | 85 | call debug_stack_reset |
5963e317 SR |
86 | .endm |
87 | ||
88 | .macro TRACE_IRQS_ON_DEBUG | |
4d732138 | 89 | call debug_stack_set_zero |
5963e317 | 90 | TRACE_IRQS_ON |
4d732138 | 91 | call debug_stack_reset |
5963e317 SR |
92 | .endm |
93 | ||
f2db9382 | 94 | .macro TRACE_IRQS_IRETQ_DEBUG |
6709812f | 95 | btl $9, EFLAGS(%rsp) /* interrupts off? */ |
4d732138 | 96 | jnc 1f |
5963e317 SR |
97 | TRACE_IRQS_ON_DEBUG |
98 | 1: | |
99 | .endm | |
100 | ||
101 | #else | |
4d732138 IM |
102 | # define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF |
103 | # define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON | |
104 | # define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ | |
5963e317 SR |
105 | #endif |
106 | ||
1da177e4 | 107 | /* |
4d732138 | 108 | * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. |
1da177e4 | 109 | * |
fda57b22 AL |
110 | * This is the only entry point used for 64-bit system calls. The |
111 | * hardware interface is reasonably well designed and the register to | |
112 | * argument mapping Linux uses fits well with the registers that are | |
113 | * available when SYSCALL is used. | |
114 | * | |
115 | * SYSCALL instructions can be found inlined in libc implementations as | |
116 | * well as some other programs and libraries. There are also a handful | |
117 | * of SYSCALL instructions in the vDSO used, for example, as a | |
118 | * clock_gettimeofday fallback. | |
119 | * | |
4d732138 | 120 | * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, |
b87cf63e DV |
121 | * then loads new ss, cs, and rip from previously programmed MSRs. |
122 | * rflags gets masked by a value from another MSR (so CLD and CLAC | |
123 | * are not needed). SYSCALL does not save anything on the stack | |
124 | * and does not change rsp. | |
125 | * | |
126 | * Registers on entry: | |
1da177e4 | 127 | * rax system call number |
b87cf63e DV |
128 | * rcx return address |
129 | * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) | |
1da177e4 | 130 | * rdi arg0 |
1da177e4 | 131 | * rsi arg1 |
0bd7b798 | 132 | * rdx arg2 |
b87cf63e | 133 | * r10 arg3 (needs to be moved to rcx to conform to C ABI) |
1da177e4 LT |
134 | * r8 arg4 |
135 | * r9 arg5 | |
4d732138 | 136 | * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) |
0bd7b798 | 137 | * |
1da177e4 LT |
138 | * Only called from user space. |
139 | * | |
7fcb3bc3 | 140 | * When user can change pt_regs->foo always force IRET. That is because |
7bf36bbc AK |
141 | * it deals with uncanonical addresses better. SYSRET has trouble |
142 | * with them due to bugs in both AMD and Intel CPUs. | |
0bd7b798 | 143 | */ |
1da177e4 | 144 | |
bc7b11c0 | 145 | SYM_CODE_START(entry_SYSCALL_64) |
8c1f7558 | 146 | UNWIND_HINT_EMPTY |
9ed8e7d8 DV |
147 | /* |
148 | * Interrupts are off on entry. | |
149 | * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, | |
150 | * it is too small to ever cause noticeable irq latency. | |
151 | */ | |
72fe4858 | 152 | |
8a9949bc | 153 | swapgs |
bf904d27 | 154 | /* tss.sp2 is scratch space. */ |
98f05b51 | 155 | movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2) |
bf904d27 | 156 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp |
4d732138 | 157 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
9ed8e7d8 DV |
158 | |
159 | /* Construct struct pt_regs on stack */ | |
98f05b51 AL |
160 | pushq $__USER_DS /* pt_regs->ss */ |
161 | pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */ | |
162 | pushq %r11 /* pt_regs->flags */ | |
163 | pushq $__USER_CS /* pt_regs->cs */ | |
164 | pushq %rcx /* pt_regs->ip */ | |
26ba4e57 | 165 | SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) |
98f05b51 | 166 | pushq %rax /* pt_regs->orig_ax */ |
30907fd1 DB |
167 | |
168 | PUSH_AND_CLEAR_REGS rax=$-ENOSYS | |
4d732138 | 169 | |
548c3050 AL |
170 | TRACE_IRQS_OFF |
171 | ||
1e423bff | 172 | /* IRQs are off. */ |
dfe64506 LT |
173 | movq %rax, %rdi |
174 | movq %rsp, %rsi | |
1e423bff AL |
175 | call do_syscall_64 /* returns with IRQs disabled */ |
176 | ||
810f80a6 | 177 | TRACE_IRQS_ON /* return enables interrupts */ |
fffbb5dc DV |
178 | |
179 | /* | |
180 | * Try to use SYSRET instead of IRET if we're returning to | |
8a055d7f AL |
181 | * a completely clean 64-bit userspace context. If we're not, |
182 | * go to the slow exit path. | |
fffbb5dc | 183 | */ |
4d732138 IM |
184 | movq RCX(%rsp), %rcx |
185 | movq RIP(%rsp), %r11 | |
8a055d7f AL |
186 | |
187 | cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ | |
188 | jne swapgs_restore_regs_and_return_to_usermode | |
fffbb5dc DV |
189 | |
190 | /* | |
191 | * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP | |
192 | * in kernel space. This essentially lets the user take over | |
17be0aec | 193 | * the kernel, since userspace controls RSP. |
fffbb5dc | 194 | * |
17be0aec | 195 | * If width of "canonical tail" ever becomes variable, this will need |
fffbb5dc | 196 | * to be updated to remain correct on both old and new CPUs. |
361b4b58 | 197 | * |
cbe0317b KS |
198 | * Change top bits to match most significant bit (47th or 56th bit |
199 | * depending on paging mode) in the address. | |
fffbb5dc | 200 | */ |
09e61a77 | 201 | #ifdef CONFIG_X86_5LEVEL |
39b95522 KS |
202 | ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \ |
203 | "shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57 | |
09e61a77 | 204 | #else |
17be0aec DV |
205 | shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx |
206 | sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx | |
09e61a77 | 207 | #endif |
4d732138 | 208 | |
17be0aec DV |
209 | /* If this changed %rcx, it was not canonical */ |
210 | cmpq %rcx, %r11 | |
8a055d7f | 211 | jne swapgs_restore_regs_and_return_to_usermode |
fffbb5dc | 212 | |
4d732138 | 213 | cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ |
8a055d7f | 214 | jne swapgs_restore_regs_and_return_to_usermode |
fffbb5dc | 215 | |
4d732138 IM |
216 | movq R11(%rsp), %r11 |
217 | cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ | |
8a055d7f | 218 | jne swapgs_restore_regs_and_return_to_usermode |
fffbb5dc DV |
219 | |
220 | /* | |
3e035305 BP |
221 | * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot |
222 | * restore RF properly. If the slowpath sets it for whatever reason, we | |
223 | * need to restore it correctly. | |
224 | * | |
225 | * SYSRET can restore TF, but unlike IRET, restoring TF results in a | |
226 | * trap from userspace immediately after SYSRET. This would cause an | |
227 | * infinite loop whenever #DB happens with register state that satisfies | |
228 | * the opportunistic SYSRET conditions. For example, single-stepping | |
229 | * this user code: | |
fffbb5dc | 230 | * |
4d732138 | 231 | * movq $stuck_here, %rcx |
fffbb5dc DV |
232 | * pushfq |
233 | * popq %r11 | |
234 | * stuck_here: | |
235 | * | |
236 | * would never get past 'stuck_here'. | |
237 | */ | |
4d732138 | 238 | testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 |
8a055d7f | 239 | jnz swapgs_restore_regs_and_return_to_usermode |
fffbb5dc DV |
240 | |
241 | /* nothing to check for RSP */ | |
242 | ||
4d732138 | 243 | cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ |
8a055d7f | 244 | jne swapgs_restore_regs_and_return_to_usermode |
fffbb5dc DV |
245 | |
246 | /* | |
4d732138 IM |
247 | * We win! This label is here just for ease of understanding |
248 | * perf profiles. Nothing jumps here. | |
fffbb5dc DV |
249 | */ |
250 | syscall_return_via_sysret: | |
17be0aec | 251 | /* rcx and r11 are already restored (see code above) */ |
502af0d7 | 252 | POP_REGS pop_rdi=0 skip_r11rcx=1 |
3e3b9293 AL |
253 | |
254 | /* | |
255 | * Now all regs are restored except RSP and RDI. | |
256 | * Save old stack pointer and switch to trampoline stack. | |
257 | */ | |
258 | movq %rsp, %rdi | |
c482feef | 259 | movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp |
1fb14363 | 260 | UNWIND_HINT_EMPTY |
3e3b9293 AL |
261 | |
262 | pushq RSP-RDI(%rdi) /* RSP */ | |
263 | pushq (%rdi) /* RDI */ | |
264 | ||
265 | /* | |
266 | * We are on the trampoline stack. All regs except RDI are live. | |
267 | * We can do future final exit work right here. | |
268 | */ | |
afaef01c AP |
269 | STACKLEAK_ERASE_NOCLOBBER |
270 | ||
6fd166aa | 271 | SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi |
3e3b9293 | 272 | |
4fbb3910 | 273 | popq %rdi |
3e3b9293 | 274 | popq %rsp |
fffbb5dc | 275 | USERGS_SYSRET64 |
bc7b11c0 | 276 | SYM_CODE_END(entry_SYSCALL_64) |
0bd7b798 | 277 | |
0100301b BG |
278 | /* |
279 | * %rdi: prev task | |
280 | * %rsi: next task | |
281 | */ | |
96c64806 | 282 | SYM_FUNC_START(__switch_to_asm) |
0100301b BG |
283 | /* |
284 | * Save callee-saved registers | |
285 | * This must match the order in inactive_task_frame | |
286 | */ | |
287 | pushq %rbp | |
288 | pushq %rbx | |
289 | pushq %r12 | |
290 | pushq %r13 | |
291 | pushq %r14 | |
292 | pushq %r15 | |
293 | ||
294 | /* switch stack */ | |
295 | movq %rsp, TASK_threadsp(%rdi) | |
296 | movq TASK_threadsp(%rsi), %rsp | |
297 | ||
050e9baa | 298 | #ifdef CONFIG_STACKPROTECTOR |
0100301b | 299 | movq TASK_stack_canary(%rsi), %rbx |
e6401c13 | 300 | movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset |
0100301b BG |
301 | #endif |
302 | ||
c995efd5 DW |
303 | #ifdef CONFIG_RETPOLINE |
304 | /* | |
305 | * When switching from a shallower to a deeper call stack | |
306 | * the RSB may either underflow or use entries populated | |
307 | * with userspace addresses. On CPUs where those concerns | |
308 | * exist, overwrite the RSB with entries which capture | |
309 | * speculative execution to prevent attack. | |
310 | */ | |
d1c99108 | 311 | FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW |
c995efd5 DW |
312 | #endif |
313 | ||
0100301b BG |
314 | /* restore callee-saved registers */ |
315 | popq %r15 | |
316 | popq %r14 | |
317 | popq %r13 | |
318 | popq %r12 | |
319 | popq %rbx | |
320 | popq %rbp | |
321 | ||
322 | jmp __switch_to | |
96c64806 | 323 | SYM_FUNC_END(__switch_to_asm) |
0100301b | 324 | |
1eeb207f DV |
325 | /* |
326 | * A newly forked process directly context switches into this address. | |
327 | * | |
0100301b | 328 | * rax: prev task we switched from |
616d2483 BG |
329 | * rbx: kernel thread func (NULL for user thread) |
330 | * r12: kernel thread arg | |
1eeb207f | 331 | */ |
bc7b11c0 | 332 | SYM_CODE_START(ret_from_fork) |
8c1f7558 | 333 | UNWIND_HINT_EMPTY |
0100301b | 334 | movq %rax, %rdi |
ebd57499 | 335 | call schedule_tail /* rdi: 'prev' task parameter */ |
1eeb207f | 336 | |
ebd57499 JP |
337 | testq %rbx, %rbx /* from kernel_thread? */ |
338 | jnz 1f /* kernel threads are uncommon */ | |
24d978b7 | 339 | |
616d2483 | 340 | 2: |
8c1f7558 | 341 | UNWIND_HINT_REGS |
ebd57499 | 342 | movq %rsp, %rdi |
24d978b7 AL |
343 | call syscall_return_slowpath /* returns with IRQs disabled */ |
344 | TRACE_IRQS_ON /* user mode is traced as IRQS on */ | |
8a055d7f | 345 | jmp swapgs_restore_regs_and_return_to_usermode |
616d2483 BG |
346 | |
347 | 1: | |
348 | /* kernel thread */ | |
d31a5802 | 349 | UNWIND_HINT_EMPTY |
616d2483 | 350 | movq %r12, %rdi |
34fdce69 | 351 | CALL_NOSPEC rbx |
616d2483 BG |
352 | /* |
353 | * A kernel thread is allowed to return here after successfully | |
354 | * calling do_execve(). Exit to userspace to complete the execve() | |
355 | * syscall. | |
356 | */ | |
357 | movq $0, RAX(%rsp) | |
358 | jmp 2b | |
bc7b11c0 | 359 | SYM_CODE_END(ret_from_fork) |
1eeb207f | 360 | |
939b7871 | 361 | /* |
3304c9c3 DV |
362 | * Build the entry stubs with some assembler magic. |
363 | * We pack 1 stub into every 8-byte block. | |
939b7871 | 364 | */ |
3304c9c3 | 365 | .align 8 |
bc7b11c0 | 366 | SYM_CODE_START(irq_entries_start) |
3304c9c3 DV |
367 | vector=FIRST_EXTERNAL_VECTOR |
368 | .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) | |
8c1f7558 | 369 | UNWIND_HINT_IRET_REGS |
4d732138 | 370 | pushq $(~vector+0x80) /* Note: always in signed byte range */ |
3304c9c3 | 371 | jmp common_interrupt |
3304c9c3 | 372 | .align 8 |
8c1f7558 | 373 | vector=vector+1 |
3304c9c3 | 374 | .endr |
bc7b11c0 | 375 | SYM_CODE_END(irq_entries_start) |
939b7871 | 376 | |
f8a8fe61 | 377 | .align 8 |
bc7b11c0 | 378 | SYM_CODE_START(spurious_entries_start) |
f8a8fe61 TG |
379 | vector=FIRST_SYSTEM_VECTOR |
380 | .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) | |
381 | UNWIND_HINT_IRET_REGS | |
382 | pushq $(~vector+0x80) /* Note: always in signed byte range */ | |
383 | jmp common_spurious | |
384 | .align 8 | |
385 | vector=vector+1 | |
386 | .endr | |
bc7b11c0 | 387 | SYM_CODE_END(spurious_entries_start) |
f8a8fe61 | 388 | |
1d3e53e8 AL |
389 | .macro DEBUG_ENTRY_ASSERT_IRQS_OFF |
390 | #ifdef CONFIG_DEBUG_ENTRY | |
e17f8234 BO |
391 | pushq %rax |
392 | SAVE_FLAGS(CLBR_RAX) | |
393 | testl $X86_EFLAGS_IF, %eax | |
1d3e53e8 AL |
394 | jz .Lokay_\@ |
395 | ud2 | |
396 | .Lokay_\@: | |
e17f8234 | 397 | popq %rax |
1d3e53e8 AL |
398 | #endif |
399 | .endm | |
400 | ||
401 | /* | |
402 | * Enters the IRQ stack if we're not already using it. NMI-safe. Clobbers | |
403 | * flags and puts old RSP into old_rsp, and leaves all other GPRs alone. | |
404 | * Requires kernel GSBASE. | |
405 | * | |
406 | * The invariant is that, if irq_count != -1, then the IRQ stack is in use. | |
407 | */ | |
2ba64741 | 408 | .macro ENTER_IRQ_STACK regs=1 old_rsp save_ret=0 |
1d3e53e8 | 409 | DEBUG_ENTRY_ASSERT_IRQS_OFF |
2ba64741 DB |
410 | |
411 | .if \save_ret | |
412 | /* | |
413 | * If save_ret is set, the original stack contains one additional | |
414 | * entry -- the return address. Therefore, move the address one | |
415 | * entry below %rsp to \old_rsp. | |
416 | */ | |
417 | leaq 8(%rsp), \old_rsp | |
418 | .else | |
1d3e53e8 | 419 | movq %rsp, \old_rsp |
2ba64741 | 420 | .endif |
8c1f7558 JP |
421 | |
422 | .if \regs | |
423 | UNWIND_HINT_REGS base=\old_rsp | |
424 | .endif | |
425 | ||
1d3e53e8 | 426 | incl PER_CPU_VAR(irq_count) |
29955909 | 427 | jnz .Lirq_stack_push_old_rsp_\@ |
1d3e53e8 AL |
428 | |
429 | /* | |
430 | * Right now, if we just incremented irq_count to zero, we've | |
431 | * claimed the IRQ stack but we haven't switched to it yet. | |
432 | * | |
433 | * If anything is added that can interrupt us here without using IST, | |
434 | * it must be *extremely* careful to limit its stack usage. This | |
435 | * could include kprobes and a hypothetical future IST-less #DB | |
436 | * handler. | |
29955909 AL |
437 | * |
438 | * The OOPS unwinder relies on the word at the top of the IRQ | |
439 | * stack linking back to the previous RSP for the entire time we're | |
440 | * on the IRQ stack. For this to work reliably, we need to write | |
441 | * it before we actually move ourselves to the IRQ stack. | |
442 | */ | |
443 | ||
e6401c13 | 444 | movq \old_rsp, PER_CPU_VAR(irq_stack_backing_store + IRQ_STACK_SIZE - 8) |
758a2e31 | 445 | movq PER_CPU_VAR(hardirq_stack_ptr), %rsp |
29955909 AL |
446 | |
447 | #ifdef CONFIG_DEBUG_ENTRY | |
448 | /* | |
449 | * If the first movq above becomes wrong due to IRQ stack layout | |
450 | * changes, the only way we'll notice is if we try to unwind right | |
451 | * here. Assert that we set up the stack right to catch this type | |
452 | * of bug quickly. | |
1d3e53e8 | 453 | */ |
29955909 AL |
454 | cmpq -8(%rsp), \old_rsp |
455 | je .Lirq_stack_okay\@ | |
456 | ud2 | |
457 | .Lirq_stack_okay\@: | |
458 | #endif | |
1d3e53e8 | 459 | |
29955909 | 460 | .Lirq_stack_push_old_rsp_\@: |
1d3e53e8 | 461 | pushq \old_rsp |
8c1f7558 JP |
462 | |
463 | .if \regs | |
464 | UNWIND_HINT_REGS indirect=1 | |
465 | .endif | |
2ba64741 DB |
466 | |
467 | .if \save_ret | |
468 | /* | |
469 | * Push the return address to the stack. This return address can | |
470 | * be found at the "real" original RSP, which was offset by 8 at | |
471 | * the beginning of this macro. | |
472 | */ | |
473 | pushq -8(\old_rsp) | |
474 | .endif | |
1d3e53e8 AL |
475 | .endm |
476 | ||
477 | /* | |
478 | * Undoes ENTER_IRQ_STACK. | |
479 | */ | |
8c1f7558 | 480 | .macro LEAVE_IRQ_STACK regs=1 |
1d3e53e8 AL |
481 | DEBUG_ENTRY_ASSERT_IRQS_OFF |
482 | /* We need to be off the IRQ stack before decrementing irq_count. */ | |
483 | popq %rsp | |
484 | ||
8c1f7558 JP |
485 | .if \regs |
486 | UNWIND_HINT_REGS | |
487 | .endif | |
488 | ||
1d3e53e8 AL |
489 | /* |
490 | * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming | |
491 | * the irq stack but we're not on it. | |
492 | */ | |
493 | ||
494 | decl PER_CPU_VAR(irq_count) | |
495 | .endm | |
496 | ||
d99015b1 | 497 | /* |
f3d415ea | 498 | * Interrupt entry helper function. |
d99015b1 | 499 | * |
f3d415ea DB |
500 | * Entry runs with interrupts off. Stack layout at entry: |
501 | * +----------------------------------------------------+ | |
502 | * | regs->ss | | |
503 | * | regs->rsp | | |
504 | * | regs->eflags | | |
505 | * | regs->cs | | |
506 | * | regs->ip | | |
507 | * +----------------------------------------------------+ | |
508 | * | regs->orig_ax = ~(interrupt number) | | |
509 | * +----------------------------------------------------+ | |
510 | * | return address | | |
511 | * +----------------------------------------------------+ | |
d99015b1 | 512 | */ |
bc7b11c0 | 513 | SYM_CODE_START(interrupt_entry) |
81b67439 | 514 | UNWIND_HINT_IRET_REGS offset=16 |
f3d415ea | 515 | ASM_CLAC |
f6f64681 | 516 | cld |
7f2590a1 | 517 | |
f3d415ea | 518 | testb $3, CS-ORIG_RAX+8(%rsp) |
7f2590a1 AL |
519 | jz 1f |
520 | SWAPGS | |
18ec54fd | 521 | FENCE_SWAPGS_USER_ENTRY |
f3d415ea DB |
522 | /* |
523 | * Switch to the thread stack. The IRET frame and orig_ax are | |
524 | * on the stack, as well as the return address. RDI..R12 are | |
525 | * not (yet) on the stack and space has not (yet) been | |
526 | * allocated for them. | |
527 | */ | |
90a6acc4 | 528 | pushq %rdi |
f3d415ea | 529 | |
90a6acc4 DB |
530 | /* Need to switch before accessing the thread stack. */ |
531 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi | |
532 | movq %rsp, %rdi | |
533 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | |
f3d415ea DB |
534 | |
535 | /* | |
536 | * We have RDI, return address, and orig_ax on the stack on | |
537 | * top of the IRET frame. That means offset=24 | |
538 | */ | |
539 | UNWIND_HINT_IRET_REGS base=%rdi offset=24 | |
90a6acc4 DB |
540 | |
541 | pushq 7*8(%rdi) /* regs->ss */ | |
542 | pushq 6*8(%rdi) /* regs->rsp */ | |
543 | pushq 5*8(%rdi) /* regs->eflags */ | |
544 | pushq 4*8(%rdi) /* regs->cs */ | |
545 | pushq 3*8(%rdi) /* regs->ip */ | |
81b67439 | 546 | UNWIND_HINT_IRET_REGS |
90a6acc4 DB |
547 | pushq 2*8(%rdi) /* regs->orig_ax */ |
548 | pushq 8(%rdi) /* return address */ | |
90a6acc4 DB |
549 | |
550 | movq (%rdi), %rdi | |
64dbc122 | 551 | jmp 2f |
7f2590a1 | 552 | 1: |
18ec54fd JP |
553 | FENCE_SWAPGS_KERNEL_ENTRY |
554 | 2: | |
0e34d226 DB |
555 | PUSH_AND_CLEAR_REGS save_ret=1 |
556 | ENCODE_FRAME_POINTER 8 | |
76f5df43 | 557 | |
2ba64741 | 558 | testb $3, CS+8(%rsp) |
dde74f2e | 559 | jz 1f |
02bc7768 AL |
560 | |
561 | /* | |
7f2590a1 AL |
562 | * IRQ from user mode. |
563 | * | |
f1075053 AL |
564 | * We need to tell lockdep that IRQs are off. We can't do this until |
565 | * we fix gsbase, and we should do it before enter_from_user_mode | |
f3d415ea | 566 | * (which can take locks). Since TRACE_IRQS_OFF is idempotent, |
f1075053 AL |
567 | * the simplest way to handle it is to just call it twice if |
568 | * we enter from user mode. There's no reason to optimize this since | |
569 | * TRACE_IRQS_OFF is a no-op if lockdep is off. | |
570 | */ | |
571 | TRACE_IRQS_OFF | |
572 | ||
478dc89c | 573 | CALL_enter_from_user_mode |
02bc7768 | 574 | |
76f5df43 | 575 | 1: |
2ba64741 | 576 | ENTER_IRQ_STACK old_rsp=%rdi save_ret=1 |
f6f64681 DV |
577 | /* We entered an interrupt context - irqs are off: */ |
578 | TRACE_IRQS_OFF | |
579 | ||
2ba64741 | 580 | ret |
bc7b11c0 | 581 | SYM_CODE_END(interrupt_entry) |
a50480cb | 582 | _ASM_NOKPROBE(interrupt_entry) |
2ba64741 | 583 | |
f3d415ea DB |
584 | |
585 | /* Interrupt entry/exit. */ | |
1da177e4 | 586 | |
f8a8fe61 TG |
587 | /* |
588 | * The interrupt stubs push (~vector+0x80) onto the stack and | |
589 | * then jump to common_spurious/interrupt. | |
590 | */ | |
cc66936e | 591 | SYM_CODE_START_LOCAL(common_spurious) |
f8a8fe61 TG |
592 | addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ |
593 | call interrupt_entry | |
594 | UNWIND_HINT_REGS indirect=1 | |
595 | call smp_spurious_interrupt /* rdi points to pt_regs */ | |
596 | jmp ret_from_intr | |
cc66936e | 597 | SYM_CODE_END(common_spurious) |
f8a8fe61 TG |
598 | _ASM_NOKPROBE(common_spurious) |
599 | ||
600 | /* common_interrupt is a hotpath. Align it */ | |
939b7871 | 601 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
cc66936e | 602 | SYM_CODE_START_LOCAL(common_interrupt) |
4d732138 | 603 | addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ |
3aa99fc3 DB |
604 | call interrupt_entry |
605 | UNWIND_HINT_REGS indirect=1 | |
606 | call do_IRQ /* rdi points to pt_regs */ | |
34061f13 | 607 | /* 0(%rsp): old RSP */ |
7effaa88 | 608 | ret_from_intr: |
2140a994 | 609 | DISABLE_INTERRUPTS(CLBR_ANY) |
2601e64d | 610 | TRACE_IRQS_OFF |
625dbc3b | 611 | |
1d3e53e8 | 612 | LEAVE_IRQ_STACK |
625dbc3b | 613 | |
03335e95 | 614 | testb $3, CS(%rsp) |
dde74f2e | 615 | jz retint_kernel |
4d732138 | 616 | |
02bc7768 | 617 | /* Interrupt came from user space */ |
30a2441c | 618 | .Lretint_user: |
02bc7768 AL |
619 | mov %rsp,%rdi |
620 | call prepare_exit_to_usermode | |
810f80a6 | 621 | TRACE_IRQS_ON |
26c4ef9c | 622 | |
26ba4e57 | 623 | SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) |
26c4ef9c AL |
624 | #ifdef CONFIG_DEBUG_ENTRY |
625 | /* Assert that pt_regs indicates user mode. */ | |
1e4c4f61 | 626 | testb $3, CS(%rsp) |
26c4ef9c AL |
627 | jnz 1f |
628 | ud2 | |
629 | 1: | |
630 | #endif | |
502af0d7 | 631 | POP_REGS pop_rdi=0 |
3e3b9293 AL |
632 | |
633 | /* | |
634 | * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. | |
635 | * Save old stack pointer and switch to trampoline stack. | |
636 | */ | |
637 | movq %rsp, %rdi | |
c482feef | 638 | movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp |
1fb14363 | 639 | UNWIND_HINT_EMPTY |
3e3b9293 AL |
640 | |
641 | /* Copy the IRET frame to the trampoline stack. */ | |
642 | pushq 6*8(%rdi) /* SS */ | |
643 | pushq 5*8(%rdi) /* RSP */ | |
644 | pushq 4*8(%rdi) /* EFLAGS */ | |
645 | pushq 3*8(%rdi) /* CS */ | |
646 | pushq 2*8(%rdi) /* RIP */ | |
647 | ||
648 | /* Push user RDI on the trampoline stack. */ | |
649 | pushq (%rdi) | |
650 | ||
651 | /* | |
652 | * We are on the trampoline stack. All regs except RDI are live. | |
653 | * We can do future final exit work right here. | |
654 | */ | |
afaef01c | 655 | STACKLEAK_ERASE_NOCLOBBER |
3e3b9293 | 656 | |
6fd166aa | 657 | SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi |
8a09317b | 658 | |
3e3b9293 AL |
659 | /* Restore RDI. */ |
660 | popq %rdi | |
661 | SWAPGS | |
26c4ef9c AL |
662 | INTERRUPT_RETURN |
663 | ||
2601e64d | 664 | |
627276cb | 665 | /* Returning to kernel space */ |
6ba71b76 | 666 | retint_kernel: |
48593975 | 667 | #ifdef CONFIG_PREEMPTION |
627276cb DV |
668 | /* Interrupts are off */ |
669 | /* Check if we need preemption */ | |
6709812f | 670 | btl $9, EFLAGS(%rsp) /* were interrupts off? */ |
6ba71b76 | 671 | jnc 1f |
b5b447b6 | 672 | cmpl $0, PER_CPU_VAR(__preempt_count) |
36acef25 | 673 | jnz 1f |
627276cb | 674 | call preempt_schedule_irq |
6ba71b76 | 675 | 1: |
627276cb | 676 | #endif |
2601e64d IM |
677 | /* |
678 | * The iretq could re-enable interrupts: | |
679 | */ | |
680 | TRACE_IRQS_IRETQ | |
fffbb5dc | 681 | |
26ba4e57 | 682 | SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL) |
26c4ef9c AL |
683 | #ifdef CONFIG_DEBUG_ENTRY |
684 | /* Assert that pt_regs indicates kernel mode. */ | |
1e4c4f61 | 685 | testb $3, CS(%rsp) |
26c4ef9c AL |
686 | jz 1f |
687 | ud2 | |
688 | 1: | |
689 | #endif | |
502af0d7 | 690 | POP_REGS |
e872045b | 691 | addq $8, %rsp /* skip regs->orig_ax */ |
10bcc80e MD |
692 | /* |
693 | * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization | |
694 | * when returning from IPI handler. | |
695 | */ | |
7209a75d AL |
696 | INTERRUPT_RETURN |
697 | ||
cc66936e | 698 | SYM_INNER_LABEL_ALIGN(native_iret, SYM_L_GLOBAL) |
8c1f7558 | 699 | UNWIND_HINT_IRET_REGS |
3891a04a PA |
700 | /* |
701 | * Are we returning to a stack segment from the LDT? Note: in | |
702 | * 64-bit mode SS:RSP on the exception stack is always valid. | |
703 | */ | |
34273f41 | 704 | #ifdef CONFIG_X86_ESPFIX64 |
4d732138 IM |
705 | testb $4, (SS-RIP)(%rsp) |
706 | jnz native_irq_return_ldt | |
34273f41 | 707 | #endif |
3891a04a | 708 | |
cc66936e | 709 | SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL) |
b645af2d AL |
710 | /* |
711 | * This may fault. Non-paranoid faults on return to userspace are | |
712 | * handled by fixup_bad_iret. These include #SS, #GP, and #NP. | |
713 | * Double-faults due to espfix64 are handled in do_double_fault. | |
714 | * Other faults here are fatal. | |
715 | */ | |
1da177e4 | 716 | iretq |
3701d863 | 717 | |
34273f41 | 718 | #ifdef CONFIG_X86_ESPFIX64 |
7209a75d | 719 | native_irq_return_ldt: |
85063fac AL |
720 | /* |
721 | * We are running with user GSBASE. All GPRs contain their user | |
722 | * values. We have a percpu ESPFIX stack that is eight slots | |
723 | * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom | |
724 | * of the ESPFIX stack. | |
725 | * | |
726 | * We clobber RAX and RDI in this code. We stash RDI on the | |
727 | * normal stack and RAX on the ESPFIX stack. | |
728 | * | |
729 | * The ESPFIX stack layout we set up looks like this: | |
730 | * | |
731 | * --- top of ESPFIX stack --- | |
732 | * SS | |
733 | * RSP | |
734 | * RFLAGS | |
735 | * CS | |
736 | * RIP <-- RSP points here when we're done | |
737 | * RAX <-- espfix_waddr points here | |
738 | * --- bottom of ESPFIX stack --- | |
739 | */ | |
740 | ||
741 | pushq %rdi /* Stash user RDI */ | |
8a09317b DH |
742 | SWAPGS /* to kernel GS */ |
743 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ | |
744 | ||
4d732138 | 745 | movq PER_CPU_VAR(espfix_waddr), %rdi |
85063fac AL |
746 | movq %rax, (0*8)(%rdi) /* user RAX */ |
747 | movq (1*8)(%rsp), %rax /* user RIP */ | |
4d732138 | 748 | movq %rax, (1*8)(%rdi) |
85063fac | 749 | movq (2*8)(%rsp), %rax /* user CS */ |
4d732138 | 750 | movq %rax, (2*8)(%rdi) |
85063fac | 751 | movq (3*8)(%rsp), %rax /* user RFLAGS */ |
4d732138 | 752 | movq %rax, (3*8)(%rdi) |
85063fac | 753 | movq (5*8)(%rsp), %rax /* user SS */ |
4d732138 | 754 | movq %rax, (5*8)(%rdi) |
85063fac | 755 | movq (4*8)(%rsp), %rax /* user RSP */ |
4d732138 | 756 | movq %rax, (4*8)(%rdi) |
85063fac AL |
757 | /* Now RAX == RSP. */ |
758 | ||
759 | andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ | |
85063fac AL |
760 | |
761 | /* | |
762 | * espfix_stack[31:16] == 0. The page tables are set up such that | |
763 | * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of | |
764 | * espfix_waddr for any X. That is, there are 65536 RO aliases of | |
765 | * the same page. Set up RSP so that RSP[31:16] contains the | |
766 | * respective 16 bits of the /userspace/ RSP and RSP nonetheless | |
767 | * still points to an RO alias of the ESPFIX stack. | |
768 | */ | |
4d732138 | 769 | orq PER_CPU_VAR(espfix_stack), %rax |
8a09317b | 770 | |
6fd166aa | 771 | SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi |
8a09317b DH |
772 | SWAPGS /* to user GS */ |
773 | popq %rdi /* Restore user RDI */ | |
774 | ||
4d732138 | 775 | movq %rax, %rsp |
8c1f7558 | 776 | UNWIND_HINT_IRET_REGS offset=8 |
85063fac AL |
777 | |
778 | /* | |
779 | * At this point, we cannot write to the stack any more, but we can | |
780 | * still read. | |
781 | */ | |
782 | popq %rax /* Restore user RAX */ | |
783 | ||
784 | /* | |
785 | * RSP now points to an ordinary IRET frame, except that the page | |
786 | * is read-only and RSP[31:16] are preloaded with the userspace | |
787 | * values. We can now IRET back to userspace. | |
788 | */ | |
4d732138 | 789 | jmp native_irq_return_iret |
34273f41 | 790 | #endif |
cc66936e | 791 | SYM_CODE_END(common_interrupt) |
a50480cb | 792 | _ASM_NOKPROBE(common_interrupt) |
3891a04a | 793 | |
1da177e4 LT |
794 | /* |
795 | * APIC interrupts. | |
0bd7b798 | 796 | */ |
cf910e83 | 797 | .macro apicinterrupt3 num sym do_sym |
bc7b11c0 | 798 | SYM_CODE_START(\sym) |
8c1f7558 | 799 | UNWIND_HINT_IRET_REGS |
4d732138 | 800 | pushq $~(\num) |
3aa99fc3 DB |
801 | call interrupt_entry |
802 | UNWIND_HINT_REGS indirect=1 | |
803 | call \do_sym /* rdi points to pt_regs */ | |
4d732138 | 804 | jmp ret_from_intr |
bc7b11c0 | 805 | SYM_CODE_END(\sym) |
a50480cb | 806 | _ASM_NOKPROBE(\sym) |
322648d1 | 807 | .endm |
1da177e4 | 808 | |
469f0023 | 809 | /* Make sure APIC interrupt handlers end up in the irqentry section: */ |
229a7186 MH |
810 | #define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax" |
811 | #define POP_SECTION_IRQENTRY .popsection | |
469f0023 | 812 | |
cf910e83 | 813 | .macro apicinterrupt num sym do_sym |
469f0023 | 814 | PUSH_SECTION_IRQENTRY |
cf910e83 | 815 | apicinterrupt3 \num \sym \do_sym |
469f0023 | 816 | POP_SECTION_IRQENTRY |
cf910e83 SA |
817 | .endm |
818 | ||
322648d1 | 819 | #ifdef CONFIG_SMP |
4d732138 IM |
820 | apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt |
821 | apicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt | |
322648d1 | 822 | #endif |
1da177e4 | 823 | |
03b48632 | 824 | #ifdef CONFIG_X86_UV |
4d732138 | 825 | apicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt |
03b48632 | 826 | #endif |
4d732138 IM |
827 | |
828 | apicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt | |
829 | apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi | |
89b831ef | 830 | |
d78f2664 | 831 | #ifdef CONFIG_HAVE_KVM |
4d732138 IM |
832 | apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi |
833 | apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi | |
210f84b0 | 834 | apicinterrupt3 POSTED_INTR_NESTED_VECTOR kvm_posted_intr_nested_ipi smp_kvm_posted_intr_nested_ipi |
d78f2664 YZ |
835 | #endif |
836 | ||
33e5ff63 | 837 | #ifdef CONFIG_X86_MCE_THRESHOLD |
4d732138 | 838 | apicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt |
33e5ff63 SA |
839 | #endif |
840 | ||
24fd78a8 | 841 | #ifdef CONFIG_X86_MCE_AMD |
4d732138 | 842 | apicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt |
24fd78a8 AG |
843 | #endif |
844 | ||
33e5ff63 | 845 | #ifdef CONFIG_X86_THERMAL_VECTOR |
4d732138 | 846 | apicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt |
33e5ff63 | 847 | #endif |
1812924b | 848 | |
322648d1 | 849 | #ifdef CONFIG_SMP |
4d732138 IM |
850 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt |
851 | apicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt | |
852 | apicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt | |
322648d1 | 853 | #endif |
1da177e4 | 854 | |
4d732138 IM |
855 | apicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt |
856 | apicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt | |
0bd7b798 | 857 | |
e360adbe | 858 | #ifdef CONFIG_IRQ_WORK |
4d732138 | 859 | apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt |
241771ef IM |
860 | #endif |
861 | ||
1da177e4 LT |
862 | /* |
863 | * Exception entry points. | |
0bd7b798 | 864 | */ |
8f34c5b5 | 865 | #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + (x) * 8) |
577ed45e | 866 | |
a0d14b89 | 867 | .macro idtentry_part do_sym, has_error_code:req, read_cr2:req, paranoid:req, shift_ist=-1, ist_offset=0 |
2fd37912 PZ |
868 | |
869 | .if \paranoid | |
870 | call paranoid_entry | |
871 | /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ | |
872 | .else | |
873 | call error_entry | |
874 | .endif | |
875 | UNWIND_HINT_REGS | |
876 | ||
a0d14b89 | 877 | .if \read_cr2 |
6879298b TG |
878 | /* |
879 | * Store CR2 early so subsequent faults cannot clobber it. Use R12 as | |
880 | * intermediate storage as RDX can be clobbered in enter_from_user_mode(). | |
881 | * GET_CR2_INTO can clobber RAX. | |
882 | */ | |
883 | GET_CR2_INTO(%r12); | |
a0d14b89 PZ |
884 | .endif |
885 | ||
2fd37912 PZ |
886 | .if \shift_ist != -1 |
887 | TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */ | |
888 | .else | |
889 | TRACE_IRQS_OFF | |
890 | .endif | |
a0d14b89 PZ |
891 | |
892 | .if \paranoid == 0 | |
893 | testb $3, CS(%rsp) | |
894 | jz .Lfrom_kernel_no_context_tracking_\@ | |
895 | CALL_enter_from_user_mode | |
896 | .Lfrom_kernel_no_context_tracking_\@: | |
2fd37912 PZ |
897 | .endif |
898 | ||
899 | movq %rsp, %rdi /* pt_regs pointer */ | |
900 | ||
901 | .if \has_error_code | |
902 | movq ORIG_RAX(%rsp), %rsi /* get error code */ | |
903 | movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ | |
904 | .else | |
905 | xorl %esi, %esi /* no error code */ | |
906 | .endif | |
907 | ||
908 | .if \shift_ist != -1 | |
909 | subq $\ist_offset, CPU_TSS_IST(\shift_ist) | |
910 | .endif | |
911 | ||
6879298b TG |
912 | .if \read_cr2 |
913 | movq %r12, %rdx /* Move CR2 into 3rd argument */ | |
914 | .endif | |
915 | ||
2fd37912 PZ |
916 | call \do_sym |
917 | ||
918 | .if \shift_ist != -1 | |
919 | addq $\ist_offset, CPU_TSS_IST(\shift_ist) | |
920 | .endif | |
921 | ||
922 | .if \paranoid | |
923 | /* this procedure expect "no swapgs" flag in ebx */ | |
924 | jmp paranoid_exit | |
925 | .else | |
926 | jmp error_exit | |
927 | .endif | |
928 | ||
929 | .endm | |
930 | ||
bd7b1f7c AL |
931 | /** |
932 | * idtentry - Generate an IDT entry stub | |
933 | * @sym: Name of the generated entry point | |
4234653e PZ |
934 | * @do_sym: C function to be called |
935 | * @has_error_code: True if this IDT vector has an error code on the stack | |
936 | * @paranoid: non-zero means that this vector may be invoked from | |
bd7b1f7c AL |
937 | * kernel mode with user GSBASE and/or user CR3. |
938 | * 2 is special -- see below. | |
939 | * @shift_ist: Set to an IST index if entries from kernel mode should | |
4234653e | 940 | * decrement the IST stack so that nested entries get a |
bd7b1f7c | 941 | * fresh stack. (This is for #DB, which has a nasty habit |
4234653e PZ |
942 | * of recursing.) |
943 | * @create_gap: create a 6-word stack gap when coming from kernel mode. | |
a0d14b89 | 944 | * @read_cr2: load CR2 into the 3rd argument; done before calling any C code |
bd7b1f7c AL |
945 | * |
946 | * idtentry generates an IDT stub that sets up a usable kernel context, | |
947 | * creates struct pt_regs, and calls @do_sym. The stub has the following | |
948 | * special behaviors: | |
949 | * | |
950 | * On an entry from user mode, the stub switches from the trampoline or | |
951 | * IST stack to the normal thread stack. On an exit to user mode, the | |
952 | * normal exit-to-usermode path is invoked. | |
953 | * | |
954 | * On an exit to kernel mode, if @paranoid == 0, we check for preemption, | |
955 | * whereas we omit the preemption check if @paranoid != 0. This is purely | |
956 | * because the implementation is simpler this way. The kernel only needs | |
957 | * to check for asynchronous kernel preemption when IRQ handlers return. | |
958 | * | |
959 | * If @paranoid == 0, then the stub will handle IRET faults by pretending | |
960 | * that the fault came from user mode. It will handle gs_change faults by | |
961 | * pretending that the fault happened with kernel GSBASE. Since this handling | |
962 | * is omitted for @paranoid != 0, the #GP, #SS, and #NP stubs must have | |
963 | * @paranoid == 0. This special handling will do the wrong thing for | |
964 | * espfix-induced #DF on IRET, so #DF must not use @paranoid == 0. | |
965 | * | |
966 | * @paranoid == 2 is special: the stub will never switch stacks. This is for | |
967 | * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS. | |
968 | */ | |
a0d14b89 | 969 | .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 create_gap=0 read_cr2=0 |
bc7b11c0 | 970 | SYM_CODE_START(\sym) |
98990a33 | 971 | UNWIND_HINT_IRET_REGS offset=\has_error_code*8 |
8c1f7558 | 972 | |
577ed45e | 973 | /* Sanity check */ |
4234653e | 974 | .if \shift_ist != -1 && \paranoid != 1 |
577ed45e AL |
975 | .error "using shift_ist requires paranoid=1" |
976 | .endif | |
977 | ||
4234653e PZ |
978 | .if \create_gap && \paranoid |
979 | .error "using create_gap requires paranoid=0" | |
980 | .endif | |
981 | ||
ee4eb87b | 982 | ASM_CLAC |
cb5dd2c5 | 983 | |
82c62fa0 | 984 | .if \has_error_code == 0 |
4d732138 | 985 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
cb5dd2c5 AL |
986 | .endif |
987 | ||
071ccc96 | 988 | .if \paranoid == 1 |
9e809d15 | 989 | testb $3, CS-ORIG_RAX(%rsp) /* If coming from userspace, switch stacks */ |
7f2590a1 | 990 | jnz .Lfrom_usermode_switch_stack_\@ |
48e08d0f | 991 | .endif |
7f2590a1 | 992 | |
2700fefd JP |
993 | .if \create_gap == 1 |
994 | /* | |
995 | * If coming from kernel space, create a 6-word gap to allow the | |
996 | * int3 handler to emulate a call instruction. | |
997 | */ | |
998 | testb $3, CS-ORIG_RAX(%rsp) | |
999 | jnz .Lfrom_usermode_no_gap_\@ | |
1000 | .rept 6 | |
1001 | pushq 5*8(%rsp) | |
1002 | .endr | |
1003 | UNWIND_HINT_IRET_REGS offset=8 | |
1004 | .Lfrom_usermode_no_gap_\@: | |
1005 | .endif | |
1006 | ||
a0d14b89 | 1007 | idtentry_part \do_sym, \has_error_code, \read_cr2, \paranoid, \shift_ist, \ist_offset |
cb5dd2c5 | 1008 | |
071ccc96 | 1009 | .if \paranoid == 1 |
48e08d0f | 1010 | /* |
7f2590a1 | 1011 | * Entry from userspace. Switch stacks and treat it |
48e08d0f AL |
1012 | * as a normal entry. This means that paranoid handlers |
1013 | * run in real process context if user_mode(regs). | |
1014 | */ | |
7f2590a1 | 1015 | .Lfrom_usermode_switch_stack_\@: |
a0d14b89 | 1016 | idtentry_part \do_sym, \has_error_code, \read_cr2, paranoid=0 |
48e08d0f AL |
1017 | .endif |
1018 | ||
a50480cb | 1019 | _ASM_NOKPROBE(\sym) |
bc7b11c0 | 1020 | SYM_CODE_END(\sym) |
322648d1 | 1021 | .endm |
b8b1d08b | 1022 | |
4d732138 IM |
1023 | idtentry divide_error do_divide_error has_error_code=0 |
1024 | idtentry overflow do_overflow has_error_code=0 | |
1025 | idtentry bounds do_bounds has_error_code=0 | |
1026 | idtentry invalid_op do_invalid_op has_error_code=0 | |
1027 | idtentry device_not_available do_device_not_available has_error_code=0 | |
a0d14b89 | 1028 | idtentry double_fault do_double_fault has_error_code=1 paranoid=2 read_cr2=1 |
4d732138 IM |
1029 | idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 |
1030 | idtentry invalid_TSS do_invalid_TSS has_error_code=1 | |
1031 | idtentry segment_not_present do_segment_not_present has_error_code=1 | |
1032 | idtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0 | |
1033 | idtentry coprocessor_error do_coprocessor_error has_error_code=0 | |
1034 | idtentry alignment_check do_alignment_check has_error_code=1 | |
1035 | idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 | |
1036 | ||
1037 | ||
1038 | /* | |
1039 | * Reload gs selector with exception handling | |
1040 | * edi: new selector | |
1041 | */ | |
6dcc5627 | 1042 | SYM_FUNC_START(native_load_gs_index) |
8c1f7558 | 1043 | FRAME_BEGIN |
131484c8 | 1044 | pushfq |
b8aa287f | 1045 | DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) |
ca37e57b | 1046 | TRACE_IRQS_OFF |
9f1e87ea | 1047 | SWAPGS |
42c748bb | 1048 | .Lgs_change: |
4d732138 | 1049 | movl %edi, %gs |
96e5d28a | 1050 | 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE |
72fe4858 | 1051 | SWAPGS |
ca37e57b | 1052 | TRACE_IRQS_FLAGS (%rsp) |
131484c8 | 1053 | popfq |
8c1f7558 | 1054 | FRAME_END |
9f1e87ea | 1055 | ret |
6dcc5627 | 1056 | SYM_FUNC_END(native_load_gs_index) |
784d5699 | 1057 | EXPORT_SYMBOL(native_load_gs_index) |
0bd7b798 | 1058 | |
98ededb6 | 1059 | _ASM_EXTABLE(.Lgs_change, .Lbad_gs) |
4d732138 | 1060 | .section .fixup, "ax" |
1da177e4 | 1061 | /* running with kernelgs */ |
ef77e688 | 1062 | SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs) |
4d732138 | 1063 | SWAPGS /* switch back to user gs */ |
b038c842 AL |
1064 | .macro ZAP_GS |
1065 | /* This can't be a string because the preprocessor needs to see it. */ | |
1066 | movl $__USER_DS, %eax | |
1067 | movl %eax, %gs | |
1068 | .endm | |
1069 | ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG | |
4d732138 IM |
1070 | xorl %eax, %eax |
1071 | movl %eax, %gs | |
1072 | jmp 2b | |
ef77e688 | 1073 | SYM_CODE_END(.Lbad_gs) |
9f1e87ea | 1074 | .previous |
0bd7b798 | 1075 | |
2699500b | 1076 | /* Call softirq on interrupt stack. Interrupts are off. */ |
6dcc5627 | 1077 | SYM_FUNC_START(do_softirq_own_stack) |
4d732138 IM |
1078 | pushq %rbp |
1079 | mov %rsp, %rbp | |
8c1f7558 | 1080 | ENTER_IRQ_STACK regs=0 old_rsp=%r11 |
4d732138 | 1081 | call __do_softirq |
8c1f7558 | 1082 | LEAVE_IRQ_STACK regs=0 |
2699500b | 1083 | leaveq |
ed6b676c | 1084 | ret |
6dcc5627 | 1085 | SYM_FUNC_END(do_softirq_own_stack) |
75154f40 | 1086 | |
28c11b0f | 1087 | #ifdef CONFIG_XEN_PV |
5878d5d6 | 1088 | idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 |
3d75e1b8 JF |
1089 | |
1090 | /* | |
9f1e87ea CG |
1091 | * A note on the "critical region" in our callback handler. |
1092 | * We want to avoid stacking callback handlers due to events occurring | |
1093 | * during handling of the last event. To do this, we keep events disabled | |
1094 | * until we've done all processing. HOWEVER, we must enable events before | |
1095 | * popping the stack frame (can't be done atomically) and so it would still | |
1096 | * be possible to get enough handler activations to overflow the stack. | |
1097 | * Although unlikely, bugs of that kind are hard to track down, so we'd | |
1098 | * like to avoid the possibility. | |
1099 | * So, on entry to the handler we detect whether we interrupted an | |
1100 | * existing activation in its critical region -- if so, we pop the current | |
1101 | * activation and restart the handler using the previous one. | |
1102 | */ | |
ef1e0315 JS |
1103 | /* do_hypervisor_callback(struct *pt_regs) */ |
1104 | SYM_CODE_START_LOCAL(xen_do_hypervisor_callback) | |
4d732138 | 1105 | |
9f1e87ea CG |
1106 | /* |
1107 | * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will | |
1108 | * see the correct pointer to the pt_regs | |
1109 | */ | |
8c1f7558 | 1110 | UNWIND_HINT_FUNC |
4d732138 | 1111 | movq %rdi, %rsp /* we don't return, adjust the stack frame */ |
8c1f7558 | 1112 | UNWIND_HINT_REGS |
1d3e53e8 AL |
1113 | |
1114 | ENTER_IRQ_STACK old_rsp=%r10 | |
4d732138 | 1115 | call xen_evtchn_do_upcall |
1d3e53e8 AL |
1116 | LEAVE_IRQ_STACK |
1117 | ||
48593975 | 1118 | #ifndef CONFIG_PREEMPTION |
4d732138 | 1119 | call xen_maybe_preempt_hcall |
fdfd811d | 1120 | #endif |
4d732138 | 1121 | jmp error_exit |
ef1e0315 | 1122 | SYM_CODE_END(xen_do_hypervisor_callback) |
3d75e1b8 JF |
1123 | |
1124 | /* | |
9f1e87ea CG |
1125 | * Hypervisor uses this for application faults while it executes. |
1126 | * We get here for two reasons: | |
1127 | * 1. Fault while reloading DS, ES, FS or GS | |
1128 | * 2. Fault while executing IRET | |
1129 | * Category 1 we do not need to fix up as Xen has already reloaded all segment | |
1130 | * registers that could be reloaded and zeroed the others. | |
1131 | * Category 2 we fix up by killing the current process. We cannot use the | |
1132 | * normal Linux return path in this case because if we use the IRET hypercall | |
1133 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. | |
1134 | * We distinguish between categories by comparing each saved segment register | |
1135 | * with its current contents: any discrepancy means we in category 1. | |
1136 | */ | |
bc7b11c0 | 1137 | SYM_CODE_START(xen_failsafe_callback) |
8c1f7558 | 1138 | UNWIND_HINT_EMPTY |
4d732138 IM |
1139 | movl %ds, %ecx |
1140 | cmpw %cx, 0x10(%rsp) | |
1141 | jne 1f | |
1142 | movl %es, %ecx | |
1143 | cmpw %cx, 0x18(%rsp) | |
1144 | jne 1f | |
1145 | movl %fs, %ecx | |
1146 | cmpw %cx, 0x20(%rsp) | |
1147 | jne 1f | |
1148 | movl %gs, %ecx | |
1149 | cmpw %cx, 0x28(%rsp) | |
1150 | jne 1f | |
3d75e1b8 | 1151 | /* All segments match their saved values => Category 2 (Bad IRET). */ |
4d732138 IM |
1152 | movq (%rsp), %rcx |
1153 | movq 8(%rsp), %r11 | |
1154 | addq $0x30, %rsp | |
1155 | pushq $0 /* RIP */ | |
8c1f7558 | 1156 | UNWIND_HINT_IRET_REGS offset=8 |
4d732138 | 1157 | jmp general_protection |
3d75e1b8 | 1158 | 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ |
4d732138 IM |
1159 | movq (%rsp), %rcx |
1160 | movq 8(%rsp), %r11 | |
1161 | addq $0x30, %rsp | |
8c1f7558 | 1162 | UNWIND_HINT_IRET_REGS |
4d732138 | 1163 | pushq $-1 /* orig_ax = -1 => not a system call */ |
3f01daec | 1164 | PUSH_AND_CLEAR_REGS |
946c1911 | 1165 | ENCODE_FRAME_POINTER |
4d732138 | 1166 | jmp error_exit |
bc7b11c0 | 1167 | SYM_CODE_END(xen_failsafe_callback) |
28c11b0f | 1168 | #endif /* CONFIG_XEN_PV */ |
3d75e1b8 | 1169 | |
28c11b0f | 1170 | #ifdef CONFIG_XEN_PVHVM |
cf910e83 | 1171 | apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ |
38e20b07 | 1172 | xen_hvm_callback_vector xen_evtchn_do_upcall |
28c11b0f | 1173 | #endif |
38e20b07 | 1174 | |
ddeb8f21 | 1175 | |
bc2b0331 | 1176 | #if IS_ENABLED(CONFIG_HYPERV) |
cf910e83 | 1177 | apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ |
bc2b0331 | 1178 | hyperv_callback_vector hyperv_vector_handler |
93286261 VK |
1179 | |
1180 | apicinterrupt3 HYPERV_REENLIGHTENMENT_VECTOR \ | |
1181 | hyperv_reenlightenment_vector hyperv_reenlightenment_intr | |
248e742a MK |
1182 | |
1183 | apicinterrupt3 HYPERV_STIMER0_VECTOR \ | |
1184 | hv_stimer0_callback_vector hv_stimer0_vector_handler | |
bc2b0331 S |
1185 | #endif /* CONFIG_HYPERV */ |
1186 | ||
498ad393 ZY |
1187 | #if IS_ENABLED(CONFIG_ACRN_GUEST) |
1188 | apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ | |
1189 | acrn_hv_callback_vector acrn_hv_vector_handler | |
1190 | #endif | |
1191 | ||
2a594d4c | 1192 | idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=IST_INDEX_DB ist_offset=DB_STACK_OFFSET |
2700fefd | 1193 | idtentry int3 do_int3 has_error_code=0 create_gap=1 |
4d732138 IM |
1194 | idtentry stack_segment do_stack_segment has_error_code=1 |
1195 | ||
28c11b0f | 1196 | #ifdef CONFIG_XEN_PV |
43e41110 | 1197 | idtentry xennmi do_nmi has_error_code=0 |
5878d5d6 | 1198 | idtentry xendebug do_debug has_error_code=0 |
6cac5a92 | 1199 | #endif |
4d732138 IM |
1200 | |
1201 | idtentry general_protection do_general_protection has_error_code=1 | |
a0d14b89 | 1202 | idtentry page_fault do_page_fault has_error_code=1 read_cr2=1 |
4d732138 | 1203 | |
ddeb8f21 | 1204 | #ifdef CONFIG_X86_MCE |
6f41c34d | 1205 | idtentry machine_check do_mce has_error_code=0 paranoid=1 |
ddeb8f21 AH |
1206 | #endif |
1207 | ||
ebfc453e | 1208 | /* |
9e809d15 | 1209 | * Save all registers in pt_regs, and switch gs if needed. |
ebfc453e DV |
1210 | * Use slow, but surefire "are we in kernel?" check. |
1211 | * Return: ebx=0: need swapgs on exit, ebx=1: otherwise | |
1212 | */ | |
ef1e0315 | 1213 | SYM_CODE_START_LOCAL(paranoid_entry) |
8c1f7558 | 1214 | UNWIND_HINT_FUNC |
1eeb207f | 1215 | cld |
9e809d15 DB |
1216 | PUSH_AND_CLEAR_REGS save_ret=1 |
1217 | ENCODE_FRAME_POINTER 8 | |
4d732138 IM |
1218 | movl $1, %ebx |
1219 | movl $MSR_GS_BASE, %ecx | |
1eeb207f | 1220 | rdmsr |
4d732138 IM |
1221 | testl %edx, %edx |
1222 | js 1f /* negative -> in kernel */ | |
1eeb207f | 1223 | SWAPGS |
4d732138 | 1224 | xorl %ebx, %ebx |
8a09317b DH |
1225 | |
1226 | 1: | |
16561f27 DH |
1227 | /* |
1228 | * Always stash CR3 in %r14. This value will be restored, | |
ae852495 AL |
1229 | * verbatim, at exit. Needed if paranoid_entry interrupted |
1230 | * another entry that already switched to the user CR3 value | |
1231 | * but has not yet returned to userspace. | |
16561f27 DH |
1232 | * |
1233 | * This is also why CS (stashed in the "iret frame" by the | |
1234 | * hardware at entry) can not be used: this may be a return | |
ae852495 | 1235 | * to kernel code, but with a user CR3 value. |
16561f27 | 1236 | */ |
8a09317b DH |
1237 | SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 |
1238 | ||
18ec54fd JP |
1239 | /* |
1240 | * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an | |
1241 | * unconditional CR3 write, even in the PTI case. So do an lfence | |
1242 | * to prevent GS speculation, regardless of whether PTI is enabled. | |
1243 | */ | |
1244 | FENCE_SWAPGS_KERNEL_ENTRY | |
1245 | ||
8a09317b | 1246 | ret |
ef1e0315 | 1247 | SYM_CODE_END(paranoid_entry) |
ddeb8f21 | 1248 | |
ebfc453e DV |
1249 | /* |
1250 | * "Paranoid" exit path from exception stack. This is invoked | |
1251 | * only on return from non-NMI IST interrupts that came | |
1252 | * from kernel space. | |
1253 | * | |
1254 | * We may be returning to very strange contexts (e.g. very early | |
1255 | * in syscall entry), so checking for preemption here would | |
1256 | * be complicated. Fortunately, we there's no good reason | |
1257 | * to try to handle preemption here. | |
4d732138 IM |
1258 | * |
1259 | * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) | |
ebfc453e | 1260 | */ |
ef1e0315 | 1261 | SYM_CODE_START_LOCAL(paranoid_exit) |
8c1f7558 | 1262 | UNWIND_HINT_REGS |
2140a994 | 1263 | DISABLE_INTERRUPTS(CLBR_ANY) |
5963e317 | 1264 | TRACE_IRQS_OFF_DEBUG |
4d732138 | 1265 | testl %ebx, %ebx /* swapgs needed? */ |
e5317832 | 1266 | jnz .Lparanoid_exit_no_swapgs |
f2db9382 | 1267 | TRACE_IRQS_IRETQ |
16561f27 | 1268 | /* Always restore stashed CR3 value (see paranoid_entry) */ |
21e94459 | 1269 | RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 |
ddeb8f21 | 1270 | SWAPGS_UNSAFE_STACK |
45c08383 | 1271 | jmp restore_regs_and_return_to_kernel |
e5317832 | 1272 | .Lparanoid_exit_no_swapgs: |
f2db9382 | 1273 | TRACE_IRQS_IRETQ_DEBUG |
16561f27 | 1274 | /* Always restore stashed CR3 value (see paranoid_entry) */ |
e4865757 | 1275 | RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 |
e5317832 | 1276 | jmp restore_regs_and_return_to_kernel |
ef1e0315 | 1277 | SYM_CODE_END(paranoid_exit) |
ddeb8f21 AH |
1278 | |
1279 | /* | |
9e809d15 | 1280 | * Save all registers in pt_regs, and switch GS if needed. |
ddeb8f21 | 1281 | */ |
ef1e0315 | 1282 | SYM_CODE_START_LOCAL(error_entry) |
9e809d15 | 1283 | UNWIND_HINT_FUNC |
ddeb8f21 | 1284 | cld |
9e809d15 DB |
1285 | PUSH_AND_CLEAR_REGS save_ret=1 |
1286 | ENCODE_FRAME_POINTER 8 | |
03335e95 | 1287 | testb $3, CS+8(%rsp) |
cb6f64ed | 1288 | jz .Lerror_kernelspace |
539f5113 | 1289 | |
cb6f64ed AL |
1290 | /* |
1291 | * We entered from user mode or we're pretending to have entered | |
1292 | * from user mode due to an IRET fault. | |
1293 | */ | |
ddeb8f21 | 1294 | SWAPGS |
18ec54fd | 1295 | FENCE_SWAPGS_USER_ENTRY |
8a09317b DH |
1296 | /* We have user CR3. Change to kernel CR3. */ |
1297 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rax | |
539f5113 | 1298 | |
cb6f64ed | 1299 | .Lerror_entry_from_usermode_after_swapgs: |
7f2590a1 AL |
1300 | /* Put us onto the real thread stack. */ |
1301 | popq %r12 /* save return addr in %12 */ | |
1302 | movq %rsp, %rdi /* arg0 = pt_regs pointer */ | |
1303 | call sync_regs | |
1304 | movq %rax, %rsp /* switch stack */ | |
1305 | ENCODE_FRAME_POINTER | |
1306 | pushq %r12 | |
f1075053 | 1307 | ret |
02bc7768 | 1308 | |
18ec54fd JP |
1309 | .Lerror_entry_done_lfence: |
1310 | FENCE_SWAPGS_KERNEL_ENTRY | |
cb6f64ed | 1311 | .Lerror_entry_done: |
ddeb8f21 | 1312 | ret |
ddeb8f21 | 1313 | |
ebfc453e DV |
1314 | /* |
1315 | * There are two places in the kernel that can potentially fault with | |
1316 | * usergs. Handle them here. B stepping K8s sometimes report a | |
1317 | * truncated RIP for IRET exceptions returning to compat mode. Check | |
1318 | * for these here too. | |
1319 | */ | |
cb6f64ed | 1320 | .Lerror_kernelspace: |
4d732138 IM |
1321 | leaq native_irq_return_iret(%rip), %rcx |
1322 | cmpq %rcx, RIP+8(%rsp) | |
cb6f64ed | 1323 | je .Lerror_bad_iret |
4d732138 IM |
1324 | movl %ecx, %eax /* zero extend */ |
1325 | cmpq %rax, RIP+8(%rsp) | |
cb6f64ed | 1326 | je .Lbstep_iret |
42c748bb | 1327 | cmpq $.Lgs_change, RIP+8(%rsp) |
18ec54fd | 1328 | jne .Lerror_entry_done_lfence |
539f5113 AL |
1329 | |
1330 | /* | |
42c748bb | 1331 | * hack: .Lgs_change can fail with user gsbase. If this happens, fix up |
539f5113 | 1332 | * gsbase and proceed. We'll fix up the exception and land in |
42c748bb | 1333 | * .Lgs_change's error handler with kernel gsbase. |
539f5113 | 1334 | */ |
2fa5f04f | 1335 | SWAPGS |
18ec54fd | 1336 | FENCE_SWAPGS_USER_ENTRY |
2fa5f04f | 1337 | jmp .Lerror_entry_done |
ae24ffe5 | 1338 | |
cb6f64ed | 1339 | .Lbstep_iret: |
ae24ffe5 | 1340 | /* Fix truncated RIP */ |
4d732138 | 1341 | movq %rcx, RIP+8(%rsp) |
b645af2d AL |
1342 | /* fall through */ |
1343 | ||
cb6f64ed | 1344 | .Lerror_bad_iret: |
539f5113 | 1345 | /* |
8a09317b DH |
1346 | * We came from an IRET to user mode, so we have user |
1347 | * gsbase and CR3. Switch to kernel gsbase and CR3: | |
539f5113 | 1348 | */ |
b645af2d | 1349 | SWAPGS |
18ec54fd | 1350 | FENCE_SWAPGS_USER_ENTRY |
8a09317b | 1351 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rax |
539f5113 AL |
1352 | |
1353 | /* | |
1354 | * Pretend that the exception came from user mode: set up pt_regs | |
b3681dd5 | 1355 | * as if we faulted immediately after IRET. |
539f5113 | 1356 | */ |
4d732138 IM |
1357 | mov %rsp, %rdi |
1358 | call fixup_bad_iret | |
1359 | mov %rax, %rsp | |
cb6f64ed | 1360 | jmp .Lerror_entry_from_usermode_after_swapgs |
ef1e0315 | 1361 | SYM_CODE_END(error_entry) |
ddeb8f21 | 1362 | |
ef1e0315 | 1363 | SYM_CODE_START_LOCAL(error_exit) |
8c1f7558 | 1364 | UNWIND_HINT_REGS |
2140a994 | 1365 | DISABLE_INTERRUPTS(CLBR_ANY) |
ddeb8f21 | 1366 | TRACE_IRQS_OFF |
b3681dd5 AL |
1367 | testb $3, CS(%rsp) |
1368 | jz retint_kernel | |
30a2441c | 1369 | jmp .Lretint_user |
ef1e0315 | 1370 | SYM_CODE_END(error_exit) |
ddeb8f21 | 1371 | |
929bacec AL |
1372 | /* |
1373 | * Runs on exception stack. Xen PV does not go through this path at all, | |
1374 | * so we can use real assembly here. | |
8a09317b DH |
1375 | * |
1376 | * Registers: | |
1377 | * %r14: Used to save/restore the CR3 of the interrupted context | |
1378 | * when PAGE_TABLE_ISOLATION is in use. Do not clobber. | |
929bacec | 1379 | */ |
bc7b11c0 | 1380 | SYM_CODE_START(nmi) |
8c1f7558 | 1381 | UNWIND_HINT_IRET_REGS |
929bacec | 1382 | |
3f3c8b8c SR |
1383 | /* |
1384 | * We allow breakpoints in NMIs. If a breakpoint occurs, then | |
1385 | * the iretq it performs will take us out of NMI context. | |
1386 | * This means that we can have nested NMIs where the next | |
1387 | * NMI is using the top of the stack of the previous NMI. We | |
1388 | * can't let it execute because the nested NMI will corrupt the | |
1389 | * stack of the previous NMI. NMI handlers are not re-entrant | |
1390 | * anyway. | |
1391 | * | |
1392 | * To handle this case we do the following: | |
1393 | * Check the a special location on the stack that contains | |
1394 | * a variable that is set when NMIs are executing. | |
1395 | * The interrupted task's stack is also checked to see if it | |
1396 | * is an NMI stack. | |
1397 | * If the variable is not set and the stack is not the NMI | |
1398 | * stack then: | |
1399 | * o Set the special variable on the stack | |
0b22930e AL |
1400 | * o Copy the interrupt frame into an "outermost" location on the |
1401 | * stack | |
1402 | * o Copy the interrupt frame into an "iret" location on the stack | |
3f3c8b8c SR |
1403 | * o Continue processing the NMI |
1404 | * If the variable is set or the previous stack is the NMI stack: | |
0b22930e | 1405 | * o Modify the "iret" location to jump to the repeat_nmi |
3f3c8b8c SR |
1406 | * o return back to the first NMI |
1407 | * | |
1408 | * Now on exit of the first NMI, we first clear the stack variable | |
1409 | * The NMI stack will tell any nested NMIs at that point that it is | |
1410 | * nested. Then we pop the stack normally with iret, and if there was | |
1411 | * a nested NMI that updated the copy interrupt stack frame, a | |
1412 | * jump will be made to the repeat_nmi code that will handle the second | |
1413 | * NMI. | |
9b6e6a83 AL |
1414 | * |
1415 | * However, espfix prevents us from directly returning to userspace | |
1416 | * with a single IRET instruction. Similarly, IRET to user mode | |
1417 | * can fault. We therefore handle NMIs from user space like | |
1418 | * other IST entries. | |
3f3c8b8c SR |
1419 | */ |
1420 | ||
e93c1730 AL |
1421 | ASM_CLAC |
1422 | ||
146b2b09 | 1423 | /* Use %rdx as our temp variable throughout */ |
4d732138 | 1424 | pushq %rdx |
3f3c8b8c | 1425 | |
9b6e6a83 AL |
1426 | testb $3, CS-RIP+8(%rsp) |
1427 | jz .Lnmi_from_kernel | |
1428 | ||
1429 | /* | |
1430 | * NMI from user mode. We need to run on the thread stack, but we | |
1431 | * can't go through the normal entry paths: NMIs are masked, and | |
1432 | * we don't want to enable interrupts, because then we'll end | |
1433 | * up in an awkward situation in which IRQs are on but NMIs | |
1434 | * are off. | |
83c133cf AL |
1435 | * |
1436 | * We also must not push anything to the stack before switching | |
1437 | * stacks lest we corrupt the "NMI executing" variable. | |
9b6e6a83 AL |
1438 | */ |
1439 | ||
929bacec | 1440 | swapgs |
9b6e6a83 | 1441 | cld |
18ec54fd | 1442 | FENCE_SWAPGS_USER_ENTRY |
8a09317b | 1443 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx |
9b6e6a83 AL |
1444 | movq %rsp, %rdx |
1445 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | |
8c1f7558 | 1446 | UNWIND_HINT_IRET_REGS base=%rdx offset=8 |
9b6e6a83 AL |
1447 | pushq 5*8(%rdx) /* pt_regs->ss */ |
1448 | pushq 4*8(%rdx) /* pt_regs->rsp */ | |
1449 | pushq 3*8(%rdx) /* pt_regs->flags */ | |
1450 | pushq 2*8(%rdx) /* pt_regs->cs */ | |
1451 | pushq 1*8(%rdx) /* pt_regs->rip */ | |
8c1f7558 | 1452 | UNWIND_HINT_IRET_REGS |
9b6e6a83 | 1453 | pushq $-1 /* pt_regs->orig_ax */ |
30907fd1 | 1454 | PUSH_AND_CLEAR_REGS rdx=(%rdx) |
946c1911 | 1455 | ENCODE_FRAME_POINTER |
9b6e6a83 AL |
1456 | |
1457 | /* | |
1458 | * At this point we no longer need to worry about stack damage | |
1459 | * due to nesting -- we're on the normal thread stack and we're | |
1460 | * done with the NMI stack. | |
1461 | */ | |
1462 | ||
1463 | movq %rsp, %rdi | |
1464 | movq $-1, %rsi | |
1465 | call do_nmi | |
1466 | ||
45d5a168 | 1467 | /* |
9b6e6a83 | 1468 | * Return back to user mode. We must *not* do the normal exit |
946c1911 | 1469 | * work, because we don't want to enable interrupts. |
45d5a168 | 1470 | */ |
8a055d7f | 1471 | jmp swapgs_restore_regs_and_return_to_usermode |
45d5a168 | 1472 | |
9b6e6a83 | 1473 | .Lnmi_from_kernel: |
3f3c8b8c | 1474 | /* |
0b22930e AL |
1475 | * Here's what our stack frame will look like: |
1476 | * +---------------------------------------------------------+ | |
1477 | * | original SS | | |
1478 | * | original Return RSP | | |
1479 | * | original RFLAGS | | |
1480 | * | original CS | | |
1481 | * | original RIP | | |
1482 | * +---------------------------------------------------------+ | |
1483 | * | temp storage for rdx | | |
1484 | * +---------------------------------------------------------+ | |
1485 | * | "NMI executing" variable | | |
1486 | * +---------------------------------------------------------+ | |
1487 | * | iret SS } Copied from "outermost" frame | | |
1488 | * | iret Return RSP } on each loop iteration; overwritten | | |
1489 | * | iret RFLAGS } by a nested NMI to force another | | |
1490 | * | iret CS } iteration if needed. | | |
1491 | * | iret RIP } | | |
1492 | * +---------------------------------------------------------+ | |
1493 | * | outermost SS } initialized in first_nmi; | | |
1494 | * | outermost Return RSP } will not be changed before | | |
1495 | * | outermost RFLAGS } NMI processing is done. | | |
1496 | * | outermost CS } Copied to "iret" frame on each | | |
1497 | * | outermost RIP } iteration. | | |
1498 | * +---------------------------------------------------------+ | |
1499 | * | pt_regs | | |
1500 | * +---------------------------------------------------------+ | |
1501 | * | |
1502 | * The "original" frame is used by hardware. Before re-enabling | |
1503 | * NMIs, we need to be done with it, and we need to leave enough | |
1504 | * space for the asm code here. | |
1505 | * | |
1506 | * We return by executing IRET while RSP points to the "iret" frame. | |
1507 | * That will either return for real or it will loop back into NMI | |
1508 | * processing. | |
1509 | * | |
1510 | * The "outermost" frame is copied to the "iret" frame on each | |
1511 | * iteration of the loop, so each iteration starts with the "iret" | |
1512 | * frame pointing to the final return target. | |
1513 | */ | |
1514 | ||
45d5a168 | 1515 | /* |
0b22930e AL |
1516 | * Determine whether we're a nested NMI. |
1517 | * | |
a27507ca AL |
1518 | * If we interrupted kernel code between repeat_nmi and |
1519 | * end_repeat_nmi, then we are a nested NMI. We must not | |
1520 | * modify the "iret" frame because it's being written by | |
1521 | * the outer NMI. That's okay; the outer NMI handler is | |
1522 | * about to about to call do_nmi anyway, so we can just | |
1523 | * resume the outer NMI. | |
45d5a168 | 1524 | */ |
a27507ca AL |
1525 | |
1526 | movq $repeat_nmi, %rdx | |
1527 | cmpq 8(%rsp), %rdx | |
1528 | ja 1f | |
1529 | movq $end_repeat_nmi, %rdx | |
1530 | cmpq 8(%rsp), %rdx | |
1531 | ja nested_nmi_out | |
1532 | 1: | |
45d5a168 | 1533 | |
3f3c8b8c | 1534 | /* |
a27507ca | 1535 | * Now check "NMI executing". If it's set, then we're nested. |
0b22930e AL |
1536 | * This will not detect if we interrupted an outer NMI just |
1537 | * before IRET. | |
3f3c8b8c | 1538 | */ |
4d732138 IM |
1539 | cmpl $1, -8(%rsp) |
1540 | je nested_nmi | |
3f3c8b8c SR |
1541 | |
1542 | /* | |
0b22930e AL |
1543 | * Now test if the previous stack was an NMI stack. This covers |
1544 | * the case where we interrupt an outer NMI after it clears | |
810bc075 AL |
1545 | * "NMI executing" but before IRET. We need to be careful, though: |
1546 | * there is one case in which RSP could point to the NMI stack | |
1547 | * despite there being no NMI active: naughty userspace controls | |
1548 | * RSP at the very beginning of the SYSCALL targets. We can | |
1549 | * pull a fast one on naughty userspace, though: we program | |
1550 | * SYSCALL to mask DF, so userspace cannot cause DF to be set | |
1551 | * if it controls the kernel's RSP. We set DF before we clear | |
1552 | * "NMI executing". | |
3f3c8b8c | 1553 | */ |
0784b364 DV |
1554 | lea 6*8(%rsp), %rdx |
1555 | /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ | |
1556 | cmpq %rdx, 4*8(%rsp) | |
1557 | /* If the stack pointer is above the NMI stack, this is a normal NMI */ | |
1558 | ja first_nmi | |
4d732138 | 1559 | |
0784b364 DV |
1560 | subq $EXCEPTION_STKSZ, %rdx |
1561 | cmpq %rdx, 4*8(%rsp) | |
1562 | /* If it is below the NMI stack, it is a normal NMI */ | |
1563 | jb first_nmi | |
810bc075 AL |
1564 | |
1565 | /* Ah, it is within the NMI stack. */ | |
1566 | ||
1567 | testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) | |
1568 | jz first_nmi /* RSP was user controlled. */ | |
1569 | ||
1570 | /* This is a nested NMI. */ | |
0784b364 | 1571 | |
3f3c8b8c SR |
1572 | nested_nmi: |
1573 | /* | |
0b22930e AL |
1574 | * Modify the "iret" frame to point to repeat_nmi, forcing another |
1575 | * iteration of NMI handling. | |
3f3c8b8c | 1576 | */ |
23a781e9 | 1577 | subq $8, %rsp |
4d732138 IM |
1578 | leaq -10*8(%rsp), %rdx |
1579 | pushq $__KERNEL_DS | |
1580 | pushq %rdx | |
131484c8 | 1581 | pushfq |
4d732138 IM |
1582 | pushq $__KERNEL_CS |
1583 | pushq $repeat_nmi | |
3f3c8b8c SR |
1584 | |
1585 | /* Put stack back */ | |
4d732138 | 1586 | addq $(6*8), %rsp |
3f3c8b8c SR |
1587 | |
1588 | nested_nmi_out: | |
4d732138 | 1589 | popq %rdx |
3f3c8b8c | 1590 | |
0b22930e | 1591 | /* We are returning to kernel mode, so this cannot result in a fault. */ |
929bacec | 1592 | iretq |
3f3c8b8c SR |
1593 | |
1594 | first_nmi: | |
0b22930e | 1595 | /* Restore rdx. */ |
4d732138 | 1596 | movq (%rsp), %rdx |
62610913 | 1597 | |
36f1a77b AL |
1598 | /* Make room for "NMI executing". */ |
1599 | pushq $0 | |
3f3c8b8c | 1600 | |
0b22930e | 1601 | /* Leave room for the "iret" frame */ |
4d732138 | 1602 | subq $(5*8), %rsp |
28696f43 | 1603 | |
0b22930e | 1604 | /* Copy the "original" frame to the "outermost" frame */ |
3f3c8b8c | 1605 | .rept 5 |
4d732138 | 1606 | pushq 11*8(%rsp) |
3f3c8b8c | 1607 | .endr |
8c1f7558 | 1608 | UNWIND_HINT_IRET_REGS |
62610913 | 1609 | |
79fb4ad6 SR |
1610 | /* Everything up to here is safe from nested NMIs */ |
1611 | ||
a97439aa AL |
1612 | #ifdef CONFIG_DEBUG_ENTRY |
1613 | /* | |
1614 | * For ease of testing, unmask NMIs right away. Disabled by | |
1615 | * default because IRET is very expensive. | |
1616 | */ | |
1617 | pushq $0 /* SS */ | |
1618 | pushq %rsp /* RSP (minus 8 because of the previous push) */ | |
1619 | addq $8, (%rsp) /* Fix up RSP */ | |
1620 | pushfq /* RFLAGS */ | |
1621 | pushq $__KERNEL_CS /* CS */ | |
1622 | pushq $1f /* RIP */ | |
929bacec | 1623 | iretq /* continues at repeat_nmi below */ |
8c1f7558 | 1624 | UNWIND_HINT_IRET_REGS |
a97439aa AL |
1625 | 1: |
1626 | #endif | |
1627 | ||
0b22930e | 1628 | repeat_nmi: |
62610913 JB |
1629 | /* |
1630 | * If there was a nested NMI, the first NMI's iret will return | |
1631 | * here. But NMIs are still enabled and we can take another | |
1632 | * nested NMI. The nested NMI checks the interrupted RIP to see | |
1633 | * if it is between repeat_nmi and end_repeat_nmi, and if so | |
1634 | * it will just return, as we are about to repeat an NMI anyway. | |
1635 | * This makes it safe to copy to the stack frame that a nested | |
1636 | * NMI will update. | |
0b22930e AL |
1637 | * |
1638 | * RSP is pointing to "outermost RIP". gsbase is unknown, but, if | |
1639 | * we're repeating an NMI, gsbase has the same value that it had on | |
1640 | * the first iteration. paranoid_entry will load the kernel | |
36f1a77b AL |
1641 | * gsbase if needed before we call do_nmi. "NMI executing" |
1642 | * is zero. | |
62610913 | 1643 | */ |
36f1a77b | 1644 | movq $1, 10*8(%rsp) /* Set "NMI executing". */ |
3f3c8b8c | 1645 | |
62610913 | 1646 | /* |
0b22930e AL |
1647 | * Copy the "outermost" frame to the "iret" frame. NMIs that nest |
1648 | * here must not modify the "iret" frame while we're writing to | |
1649 | * it or it will end up containing garbage. | |
62610913 | 1650 | */ |
4d732138 | 1651 | addq $(10*8), %rsp |
3f3c8b8c | 1652 | .rept 5 |
4d732138 | 1653 | pushq -6*8(%rsp) |
3f3c8b8c | 1654 | .endr |
4d732138 | 1655 | subq $(5*8), %rsp |
62610913 | 1656 | end_repeat_nmi: |
3f3c8b8c SR |
1657 | |
1658 | /* | |
0b22930e AL |
1659 | * Everything below this point can be preempted by a nested NMI. |
1660 | * If this happens, then the inner NMI will change the "iret" | |
1661 | * frame to point back to repeat_nmi. | |
3f3c8b8c | 1662 | */ |
4d732138 | 1663 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
76f5df43 | 1664 | |
1fd466ef | 1665 | /* |
ebfc453e | 1666 | * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit |
1fd466ef SR |
1667 | * as we should not be calling schedule in NMI context. |
1668 | * Even with normal interrupts enabled. An NMI should not be | |
1669 | * setting NEED_RESCHED or anything that normal interrupts and | |
1670 | * exceptions might do. | |
1671 | */ | |
4d732138 | 1672 | call paranoid_entry |
8c1f7558 | 1673 | UNWIND_HINT_REGS |
7fbb98c5 | 1674 | |
ddeb8f21 | 1675 | /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ |
4d732138 IM |
1676 | movq %rsp, %rdi |
1677 | movq $-1, %rsi | |
1678 | call do_nmi | |
7fbb98c5 | 1679 | |
16561f27 | 1680 | /* Always restore stashed CR3 value (see paranoid_entry) */ |
21e94459 | 1681 | RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 |
8a09317b | 1682 | |
4d732138 IM |
1683 | testl %ebx, %ebx /* swapgs needed? */ |
1684 | jnz nmi_restore | |
ddeb8f21 AH |
1685 | nmi_swapgs: |
1686 | SWAPGS_UNSAFE_STACK | |
1687 | nmi_restore: | |
502af0d7 | 1688 | POP_REGS |
0b22930e | 1689 | |
471ee483 AL |
1690 | /* |
1691 | * Skip orig_ax and the "outermost" frame to point RSP at the "iret" | |
1692 | * at the "iret" frame. | |
1693 | */ | |
1694 | addq $6*8, %rsp | |
28696f43 | 1695 | |
810bc075 AL |
1696 | /* |
1697 | * Clear "NMI executing". Set DF first so that we can easily | |
1698 | * distinguish the remaining code between here and IRET from | |
929bacec AL |
1699 | * the SYSCALL entry and exit paths. |
1700 | * | |
1701 | * We arguably should just inspect RIP instead, but I (Andy) wrote | |
1702 | * this code when I had the misapprehension that Xen PV supported | |
1703 | * NMIs, and Xen PV would break that approach. | |
810bc075 AL |
1704 | */ |
1705 | std | |
1706 | movq $0, 5*8(%rsp) /* clear "NMI executing" */ | |
0b22930e AL |
1707 | |
1708 | /* | |
929bacec AL |
1709 | * iretq reads the "iret" frame and exits the NMI stack in a |
1710 | * single instruction. We are returning to kernel mode, so this | |
1711 | * cannot result in a fault. Similarly, we don't need to worry | |
1712 | * about espfix64 on the way back to kernel mode. | |
0b22930e | 1713 | */ |
929bacec | 1714 | iretq |
bc7b11c0 | 1715 | SYM_CODE_END(nmi) |
ddeb8f21 | 1716 | |
dffb3f9d AL |
1717 | #ifndef CONFIG_IA32_EMULATION |
1718 | /* | |
1719 | * This handles SYSCALL from 32-bit code. There is no way to program | |
1720 | * MSRs to fully disable 32-bit SYSCALL. | |
1721 | */ | |
bc7b11c0 | 1722 | SYM_CODE_START(ignore_sysret) |
8c1f7558 | 1723 | UNWIND_HINT_EMPTY |
4d732138 | 1724 | mov $-ENOSYS, %eax |
b2b1d94c | 1725 | sysretl |
bc7b11c0 | 1726 | SYM_CODE_END(ignore_sysret) |
dffb3f9d | 1727 | #endif |
2deb4be2 | 1728 | |
bc7b11c0 | 1729 | SYM_CODE_START(rewind_stack_do_exit) |
8c1f7558 | 1730 | UNWIND_HINT_FUNC |
2deb4be2 AL |
1731 | /* Prevent any naive code from trying to unwind to our caller. */ |
1732 | xorl %ebp, %ebp | |
1733 | ||
1734 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rax | |
8c1f7558 | 1735 | leaq -PTREGS_SIZE(%rax), %rsp |
f977df7b | 1736 | UNWIND_HINT_REGS |
2deb4be2 AL |
1737 | |
1738 | call do_exit | |
bc7b11c0 | 1739 | SYM_CODE_END(rewind_stack_do_exit) |