]> Git Repo - linux.git/blame - arch/x86/kernel/entry_64.S
traps: x86_64: add TRACE_IRQS_OFF in error_entry
[linux.git] / arch / x86 / kernel / entry_64.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <[email protected]>
1da177e4
LT
7 */
8
9/*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
14 *
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
17 *
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
2e91a17b
AK
23 *
24 * Some macro usage:
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
1da177e4
LT
38 */
39
1da177e4
LT
40#include <linux/linkage.h>
41#include <asm/segment.h>
1da177e4
LT
42#include <asm/cache.h>
43#include <asm/errno.h>
44#include <asm/dwarf2.h>
45#include <asm/calling.h>
e2d5df93 46#include <asm/asm-offsets.h>
1da177e4
LT
47#include <asm/msr.h>
48#include <asm/unistd.h>
49#include <asm/thread_info.h>
50#include <asm/hw_irq.h>
5f8efbb9 51#include <asm/page.h>
2601e64d 52#include <asm/irqflags.h>
72fe4858 53#include <asm/paravirt.h>
395a59d0 54#include <asm/ftrace.h>
1da177e4 55
86a1c34a
RM
56/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57#include <linux/elf-em.h>
58#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
59#define __AUDIT_ARCH_64BIT 0x80000000
60#define __AUDIT_ARCH_LE 0x40000000
61
1da177e4
LT
62 .code64
63
16444a8a 64#ifdef CONFIG_FTRACE
d61f82d0
SR
65#ifdef CONFIG_DYNAMIC_FTRACE
66ENTRY(mcount)
67
68 subq $0x38, %rsp
69 movq %rax, (%rsp)
70 movq %rcx, 8(%rsp)
71 movq %rdx, 16(%rsp)
72 movq %rsi, 24(%rsp)
73 movq %rdi, 32(%rsp)
74 movq %r8, 40(%rsp)
75 movq %r9, 48(%rsp)
76
77 movq 0x38(%rsp), %rdi
395a59d0 78 subq $MCOUNT_INSN_SIZE, %rdi
d61f82d0
SR
79
80.globl mcount_call
81mcount_call:
82 call ftrace_stub
83
84 movq 48(%rsp), %r9
85 movq 40(%rsp), %r8
86 movq 32(%rsp), %rdi
87 movq 24(%rsp), %rsi
88 movq 16(%rsp), %rdx
89 movq 8(%rsp), %rcx
90 movq (%rsp), %rax
91 addq $0x38, %rsp
92
93 retq
94END(mcount)
95
96ENTRY(ftrace_caller)
97
98 /* taken from glibc */
99 subq $0x38, %rsp
100 movq %rax, (%rsp)
101 movq %rcx, 8(%rsp)
102 movq %rdx, 16(%rsp)
103 movq %rsi, 24(%rsp)
104 movq %rdi, 32(%rsp)
105 movq %r8, 40(%rsp)
106 movq %r9, 48(%rsp)
107
108 movq 0x38(%rsp), %rdi
109 movq 8(%rbp), %rsi
395a59d0 110 subq $MCOUNT_INSN_SIZE, %rdi
d61f82d0
SR
111
112.globl ftrace_call
113ftrace_call:
114 call ftrace_stub
115
116 movq 48(%rsp), %r9
117 movq 40(%rsp), %r8
118 movq 32(%rsp), %rdi
119 movq 24(%rsp), %rsi
120 movq 16(%rsp), %rdx
121 movq 8(%rsp), %rcx
122 movq (%rsp), %rax
123 addq $0x38, %rsp
124
125.globl ftrace_stub
126ftrace_stub:
127 retq
128END(ftrace_caller)
129
130#else /* ! CONFIG_DYNAMIC_FTRACE */
16444a8a
ACM
131ENTRY(mcount)
132 cmpq $ftrace_stub, ftrace_trace_function
133 jnz trace
134.globl ftrace_stub
135ftrace_stub:
136 retq
137
138trace:
139 /* taken from glibc */
140 subq $0x38, %rsp
141 movq %rax, (%rsp)
142 movq %rcx, 8(%rsp)
143 movq %rdx, 16(%rsp)
144 movq %rsi, 24(%rsp)
145 movq %rdi, 32(%rsp)
146 movq %r8, 40(%rsp)
147 movq %r9, 48(%rsp)
148
149 movq 0x38(%rsp), %rdi
150 movq 8(%rbp), %rsi
395a59d0 151 subq $MCOUNT_INSN_SIZE, %rdi
16444a8a
ACM
152
153 call *ftrace_trace_function
154
155 movq 48(%rsp), %r9
156 movq 40(%rsp), %r8
157 movq 32(%rsp), %rdi
158 movq 24(%rsp), %rsi
159 movq 16(%rsp), %rdx
160 movq 8(%rsp), %rcx
161 movq (%rsp), %rax
162 addq $0x38, %rsp
163
164 jmp ftrace_stub
165END(mcount)
d61f82d0
SR
166#endif /* CONFIG_DYNAMIC_FTRACE */
167#endif /* CONFIG_FTRACE */
16444a8a 168
dc37db4d 169#ifndef CONFIG_PREEMPT
1da177e4
LT
170#define retint_kernel retint_restore_args
171#endif
2601e64d 172
72fe4858 173#ifdef CONFIG_PARAVIRT
2be29982 174ENTRY(native_usergs_sysret64)
72fe4858
GOC
175 swapgs
176 sysretq
177#endif /* CONFIG_PARAVIRT */
178
2601e64d
IM
179
180.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
181#ifdef CONFIG_TRACE_IRQFLAGS
182 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
183 jnc 1f
184 TRACE_IRQS_ON
1851:
186#endif
187.endm
188
1da177e4
LT
189/*
190 * C code is not supposed to know about undefined top of stack. Every time
191 * a C function with an pt_regs argument is called from the SYSCALL based
192 * fast path FIXUP_TOP_OF_STACK is needed.
193 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
194 * manipulation.
195 */
196
197 /* %rsp:at FRAMEEND */
198 .macro FIXUP_TOP_OF_STACK tmp
199 movq %gs:pda_oldrsp,\tmp
200 movq \tmp,RSP(%rsp)
201 movq $__USER_DS,SS(%rsp)
202 movq $__USER_CS,CS(%rsp)
203 movq $-1,RCX(%rsp)
204 movq R11(%rsp),\tmp /* get eflags */
205 movq \tmp,EFLAGS(%rsp)
206 .endm
207
208 .macro RESTORE_TOP_OF_STACK tmp,offset=0
209 movq RSP-\offset(%rsp),\tmp
210 movq \tmp,%gs:pda_oldrsp
211 movq EFLAGS-\offset(%rsp),\tmp
212 movq \tmp,R11-\offset(%rsp)
213 .endm
214
215 .macro FAKE_STACK_FRAME child_rip
216 /* push in order ss, rsp, eflags, cs, rip */
3829ee6b 217 xorl %eax, %eax
e04e0a63 218 pushq $__KERNEL_DS /* ss */
1da177e4 219 CFI_ADJUST_CFA_OFFSET 8
7effaa88 220 /*CFI_REL_OFFSET ss,0*/
1da177e4
LT
221 pushq %rax /* rsp */
222 CFI_ADJUST_CFA_OFFSET 8
7effaa88 223 CFI_REL_OFFSET rsp,0
1da177e4
LT
224 pushq $(1<<9) /* eflags - interrupts on */
225 CFI_ADJUST_CFA_OFFSET 8
7effaa88 226 /*CFI_REL_OFFSET rflags,0*/
1da177e4
LT
227 pushq $__KERNEL_CS /* cs */
228 CFI_ADJUST_CFA_OFFSET 8
7effaa88 229 /*CFI_REL_OFFSET cs,0*/
1da177e4
LT
230 pushq \child_rip /* rip */
231 CFI_ADJUST_CFA_OFFSET 8
7effaa88 232 CFI_REL_OFFSET rip,0
1da177e4
LT
233 pushq %rax /* orig rax */
234 CFI_ADJUST_CFA_OFFSET 8
235 .endm
236
237 .macro UNFAKE_STACK_FRAME
238 addq $8*6, %rsp
239 CFI_ADJUST_CFA_OFFSET -(6*8)
240 .endm
241
7effaa88
JB
242 .macro CFI_DEFAULT_STACK start=1
243 .if \start
244 CFI_STARTPROC simple
adf14236 245 CFI_SIGNAL_FRAME
7effaa88
JB
246 CFI_DEF_CFA rsp,SS+8
247 .else
248 CFI_DEF_CFA_OFFSET SS+8
249 .endif
250 CFI_REL_OFFSET r15,R15
251 CFI_REL_OFFSET r14,R14
252 CFI_REL_OFFSET r13,R13
253 CFI_REL_OFFSET r12,R12
254 CFI_REL_OFFSET rbp,RBP
255 CFI_REL_OFFSET rbx,RBX
256 CFI_REL_OFFSET r11,R11
257 CFI_REL_OFFSET r10,R10
258 CFI_REL_OFFSET r9,R9
259 CFI_REL_OFFSET r8,R8
260 CFI_REL_OFFSET rax,RAX
261 CFI_REL_OFFSET rcx,RCX
262 CFI_REL_OFFSET rdx,RDX
263 CFI_REL_OFFSET rsi,RSI
264 CFI_REL_OFFSET rdi,RDI
265 CFI_REL_OFFSET rip,RIP
266 /*CFI_REL_OFFSET cs,CS*/
267 /*CFI_REL_OFFSET rflags,EFLAGS*/
268 CFI_REL_OFFSET rsp,RSP
269 /*CFI_REL_OFFSET ss,SS*/
1da177e4
LT
270 .endm
271/*
272 * A newly forked process directly context switches into this.
273 */
274/* rdi: prev */
275ENTRY(ret_from_fork)
1da177e4 276 CFI_DEFAULT_STACK
658fdbef 277 push kernel_eflags(%rip)
e0a5a5d9 278 CFI_ADJUST_CFA_OFFSET 8
658fdbef 279 popf # reset kernel eflags
e0a5a5d9 280 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
281 call schedule_tail
282 GET_THREAD_INFO(%rcx)
26ccb8a7 283 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
1da177e4
LT
284 jnz rff_trace
285rff_action:
286 RESTORE_REST
287 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
288 je int_ret_from_sys_call
26ccb8a7 289 testl $_TIF_IA32,TI_flags(%rcx)
1da177e4
LT
290 jnz int_ret_from_sys_call
291 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
292 jmp ret_from_sys_call
293rff_trace:
294 movq %rsp,%rdi
295 call syscall_trace_leave
296 GET_THREAD_INFO(%rcx)
297 jmp rff_action
298 CFI_ENDPROC
4b787e0b 299END(ret_from_fork)
1da177e4
LT
300
301/*
302 * System call entry. Upto 6 arguments in registers are supported.
303 *
304 * SYSCALL does not save anything on the stack and does not change the
305 * stack pointer.
306 */
307
308/*
309 * Register setup:
310 * rax system call number
311 * rdi arg0
312 * rcx return address for syscall/sysret, C arg3
313 * rsi arg1
314 * rdx arg2
315 * r10 arg3 (--> moved to rcx for C)
316 * r8 arg4
317 * r9 arg5
318 * r11 eflags for syscall/sysret, temporary for C
319 * r12-r15,rbp,rbx saved by C code, not touched.
320 *
321 * Interrupts are off on entry.
322 * Only called from user space.
323 *
324 * XXX if we had a free scratch register we could save the RSP into the stack frame
325 * and report it properly in ps. Unfortunately we haven't.
7bf36bbc
AK
326 *
327 * When user can change the frames always force IRET. That is because
328 * it deals with uncanonical addresses better. SYSRET has trouble
329 * with them due to bugs in both AMD and Intel CPUs.
1da177e4
LT
330 */
331
332ENTRY(system_call)
7effaa88 333 CFI_STARTPROC simple
adf14236 334 CFI_SIGNAL_FRAME
dffead4e 335 CFI_DEF_CFA rsp,PDA_STACKOFFSET
7effaa88
JB
336 CFI_REGISTER rip,rcx
337 /*CFI_REGISTER rflags,r11*/
72fe4858
GOC
338 SWAPGS_UNSAFE_STACK
339 /*
340 * A hypervisor implementation might want to use a label
341 * after the swapgs, so that it can do the swapgs
342 * for the guest and jump here on syscall.
343 */
344ENTRY(system_call_after_swapgs)
345
1da177e4
LT
346 movq %rsp,%gs:pda_oldrsp
347 movq %gs:pda_kernelstack,%rsp
2601e64d
IM
348 /*
349 * No need to follow this irqs off/on section - it's straight
350 * and short:
351 */
72fe4858 352 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4
LT
353 SAVE_ARGS 8,1
354 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
7effaa88
JB
355 movq %rcx,RIP-ARGOFFSET(%rsp)
356 CFI_REL_OFFSET rip,RIP-ARGOFFSET
1da177e4 357 GET_THREAD_INFO(%rcx)
d4d67150 358 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
1da177e4 359 jnz tracesys
86a1c34a 360system_call_fastpath:
1da177e4
LT
361 cmpq $__NR_syscall_max,%rax
362 ja badsys
363 movq %r10,%rcx
364 call *sys_call_table(,%rax,8) # XXX: rip relative
365 movq %rax,RAX-ARGOFFSET(%rsp)
366/*
367 * Syscall return path ending with SYSRET (fast path)
368 * Has incomplete stack frame and undefined top of stack.
369 */
1da177e4 370ret_from_sys_call:
11b854b2 371 movl $_TIF_ALLWORK_MASK,%edi
1da177e4
LT
372 /* edi: flagmask */
373sysret_check:
10cd706d 374 LOCKDEP_SYS_EXIT
1da177e4 375 GET_THREAD_INFO(%rcx)
72fe4858 376 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 377 TRACE_IRQS_OFF
26ccb8a7 378 movl TI_flags(%rcx),%edx
1da177e4
LT
379 andl %edi,%edx
380 jnz sysret_careful
bcddc015 381 CFI_REMEMBER_STATE
2601e64d
IM
382 /*
383 * sysretq will re-enable interrupts:
384 */
385 TRACE_IRQS_ON
1da177e4 386 movq RIP-ARGOFFSET(%rsp),%rcx
7effaa88 387 CFI_REGISTER rip,rcx
1da177e4 388 RESTORE_ARGS 0,-ARG_SKIP,1
7effaa88 389 /*CFI_REGISTER rflags,r11*/
c7245da6 390 movq %gs:pda_oldrsp, %rsp
2be29982 391 USERGS_SYSRET64
1da177e4 392
bcddc015 393 CFI_RESTORE_STATE
1da177e4
LT
394 /* Handle reschedules */
395 /* edx: work, edi: workmask */
396sysret_careful:
397 bt $TIF_NEED_RESCHED,%edx
398 jnc sysret_signal
2601e64d 399 TRACE_IRQS_ON
72fe4858 400 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 401 pushq %rdi
7effaa88 402 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
403 call schedule
404 popq %rdi
7effaa88 405 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
406 jmp sysret_check
407
408 /* Handle a signal */
409sysret_signal:
2601e64d 410 TRACE_IRQS_ON
72fe4858 411 ENABLE_INTERRUPTS(CLBR_NONE)
86a1c34a
RM
412#ifdef CONFIG_AUDITSYSCALL
413 bt $TIF_SYSCALL_AUDIT,%edx
414 jc sysret_audit
415#endif
10ffdbb8 416 /* edx: work flags (arg3) */
1da177e4
LT
417 leaq do_notify_resume(%rip),%rax
418 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
419 xorl %esi,%esi # oldset -> arg2
420 call ptregscall_common
15e8f348 421 movl $_TIF_WORK_MASK,%edi
7bf36bbc
AK
422 /* Use IRET because user could have changed frame. This
423 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
72fe4858 424 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 425 TRACE_IRQS_OFF
7bf36bbc 426 jmp int_with_check
1da177e4 427
7effaa88
JB
428badsys:
429 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
430 jmp ret_from_sys_call
431
86a1c34a
RM
432#ifdef CONFIG_AUDITSYSCALL
433 /*
434 * Fast path for syscall audit without full syscall trace.
435 * We just call audit_syscall_entry() directly, and then
436 * jump back to the normal fast path.
437 */
438auditsys:
439 movq %r10,%r9 /* 6th arg: 4th syscall arg */
440 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
441 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
442 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
443 movq %rax,%rsi /* 2nd arg: syscall number */
444 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
445 call audit_syscall_entry
446 LOAD_ARGS 0 /* reload call-clobbered registers */
447 jmp system_call_fastpath
448
449 /*
450 * Return fast path for syscall audit. Call audit_syscall_exit()
451 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
452 * masked off.
453 */
454sysret_audit:
455 movq %rax,%rsi /* second arg, syscall return value */
456 cmpq $0,%rax /* is it < 0? */
457 setl %al /* 1 if so, 0 if not */
458 movzbl %al,%edi /* zero-extend that into %edi */
459 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
460 call audit_syscall_exit
461 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
462 jmp sysret_check
463#endif /* CONFIG_AUDITSYSCALL */
464
1da177e4
LT
465 /* Do syscall tracing */
466tracesys:
86a1c34a
RM
467#ifdef CONFIG_AUDITSYSCALL
468 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
469 jz auditsys
470#endif
1da177e4 471 SAVE_REST
a31f8dd7 472 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
1da177e4
LT
473 FIXUP_TOP_OF_STACK %rdi
474 movq %rsp,%rdi
475 call syscall_trace_enter
d4d67150
RM
476 /*
477 * Reload arg registers from stack in case ptrace changed them.
478 * We don't reload %rax because syscall_trace_enter() returned
479 * the value it wants us to use in the table lookup.
480 */
481 LOAD_ARGS ARGOFFSET, 1
1da177e4
LT
482 RESTORE_REST
483 cmpq $__NR_syscall_max,%rax
a31f8dd7 484 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
1da177e4
LT
485 movq %r10,%rcx /* fixup for C */
486 call *sys_call_table(,%rax,8)
a31f8dd7 487 movq %rax,RAX-ARGOFFSET(%rsp)
7bf36bbc 488 /* Use IRET because user could have changed frame */
1da177e4 489
1da177e4
LT
490/*
491 * Syscall return path ending with IRET.
492 * Has correct top of stack, but partial stack frame.
bcddc015
JB
493 */
494 .globl int_ret_from_sys_call
5cbf1565 495 .globl int_with_check
bcddc015 496int_ret_from_sys_call:
72fe4858 497 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 498 TRACE_IRQS_OFF
1da177e4
LT
499 testl $3,CS-ARGOFFSET(%rsp)
500 je retint_restore_args
501 movl $_TIF_ALLWORK_MASK,%edi
502 /* edi: mask to check */
503int_with_check:
10cd706d 504 LOCKDEP_SYS_EXIT_IRQ
1da177e4 505 GET_THREAD_INFO(%rcx)
26ccb8a7 506 movl TI_flags(%rcx),%edx
1da177e4
LT
507 andl %edi,%edx
508 jnz int_careful
26ccb8a7 509 andl $~TS_COMPAT,TI_status(%rcx)
1da177e4
LT
510 jmp retint_swapgs
511
512 /* Either reschedule or signal or syscall exit tracking needed. */
513 /* First do a reschedule test. */
514 /* edx: work, edi: workmask */
515int_careful:
516 bt $TIF_NEED_RESCHED,%edx
517 jnc int_very_careful
2601e64d 518 TRACE_IRQS_ON
72fe4858 519 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 520 pushq %rdi
7effaa88 521 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
522 call schedule
523 popq %rdi
7effaa88 524 CFI_ADJUST_CFA_OFFSET -8
72fe4858 525 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 526 TRACE_IRQS_OFF
1da177e4
LT
527 jmp int_with_check
528
529 /* handle signals and tracing -- both require a full stack frame */
530int_very_careful:
2601e64d 531 TRACE_IRQS_ON
72fe4858 532 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4
LT
533 SAVE_REST
534 /* Check for syscall exit trace */
d4d67150 535 testl $_TIF_WORK_SYSCALL_EXIT,%edx
1da177e4
LT
536 jz int_signal
537 pushq %rdi
7effaa88 538 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
539 leaq 8(%rsp),%rdi # &ptregs -> arg1
540 call syscall_trace_leave
541 popq %rdi
7effaa88 542 CFI_ADJUST_CFA_OFFSET -8
d4d67150 543 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
1da177e4
LT
544 jmp int_restore_rest
545
546int_signal:
8f4d37ec 547 testl $_TIF_DO_NOTIFY_MASK,%edx
1da177e4
LT
548 jz 1f
549 movq %rsp,%rdi # &ptregs -> arg1
550 xorl %esi,%esi # oldset -> arg2
551 call do_notify_resume
eca91e78 5521: movl $_TIF_WORK_MASK,%edi
1da177e4
LT
553int_restore_rest:
554 RESTORE_REST
72fe4858 555 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 556 TRACE_IRQS_OFF
1da177e4
LT
557 jmp int_with_check
558 CFI_ENDPROC
bcddc015 559END(system_call)
1da177e4
LT
560
561/*
562 * Certain special system calls that need to save a complete full stack frame.
563 */
564
565 .macro PTREGSCALL label,func,arg
566 .globl \label
567\label:
568 leaq \func(%rip),%rax
569 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
570 jmp ptregscall_common
4b787e0b 571END(\label)
1da177e4
LT
572 .endm
573
7effaa88
JB
574 CFI_STARTPROC
575
1da177e4
LT
576 PTREGSCALL stub_clone, sys_clone, %r8
577 PTREGSCALL stub_fork, sys_fork, %rdi
578 PTREGSCALL stub_vfork, sys_vfork, %rdi
1da177e4
LT
579 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
580 PTREGSCALL stub_iopl, sys_iopl, %rsi
581
582ENTRY(ptregscall_common)
1da177e4 583 popq %r11
7effaa88
JB
584 CFI_ADJUST_CFA_OFFSET -8
585 CFI_REGISTER rip, r11
1da177e4
LT
586 SAVE_REST
587 movq %r11, %r15
7effaa88 588 CFI_REGISTER rip, r15
1da177e4
LT
589 FIXUP_TOP_OF_STACK %r11
590 call *%rax
591 RESTORE_TOP_OF_STACK %r11
592 movq %r15, %r11
7effaa88 593 CFI_REGISTER rip, r11
1da177e4
LT
594 RESTORE_REST
595 pushq %r11
7effaa88
JB
596 CFI_ADJUST_CFA_OFFSET 8
597 CFI_REL_OFFSET rip, 0
1da177e4
LT
598 ret
599 CFI_ENDPROC
4b787e0b 600END(ptregscall_common)
1da177e4
LT
601
602ENTRY(stub_execve)
603 CFI_STARTPROC
604 popq %r11
7effaa88
JB
605 CFI_ADJUST_CFA_OFFSET -8
606 CFI_REGISTER rip, r11
1da177e4 607 SAVE_REST
1da177e4 608 FIXUP_TOP_OF_STACK %r11
5d119b2c 609 movq %rsp, %rcx
1da177e4 610 call sys_execve
1da177e4 611 RESTORE_TOP_OF_STACK %r11
1da177e4
LT
612 movq %rax,RAX(%rsp)
613 RESTORE_REST
614 jmp int_ret_from_sys_call
615 CFI_ENDPROC
4b787e0b 616END(stub_execve)
1da177e4
LT
617
618/*
619 * sigreturn is special because it needs to restore all registers on return.
620 * This cannot be done with SYSRET, so use the IRET return path instead.
621 */
622ENTRY(stub_rt_sigreturn)
623 CFI_STARTPROC
7effaa88
JB
624 addq $8, %rsp
625 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
626 SAVE_REST
627 movq %rsp,%rdi
628 FIXUP_TOP_OF_STACK %r11
629 call sys_rt_sigreturn
630 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
631 RESTORE_REST
632 jmp int_ret_from_sys_call
633 CFI_ENDPROC
4b787e0b 634END(stub_rt_sigreturn)
1da177e4 635
7effaa88
JB
636/*
637 * initial frame state for interrupts and exceptions
638 */
639 .macro _frame ref
640 CFI_STARTPROC simple
adf14236 641 CFI_SIGNAL_FRAME
7effaa88
JB
642 CFI_DEF_CFA rsp,SS+8-\ref
643 /*CFI_REL_OFFSET ss,SS-\ref*/
644 CFI_REL_OFFSET rsp,RSP-\ref
645 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
646 /*CFI_REL_OFFSET cs,CS-\ref*/
647 CFI_REL_OFFSET rip,RIP-\ref
648 .endm
649
650/* initial frame state for interrupts (and exceptions without error code) */
651#define INTR_FRAME _frame RIP
652/* initial frame state for exceptions with error code (and interrupts with
653 vector already pushed) */
654#define XCPT_FRAME _frame ORIG_RAX
655
1da177e4
LT
656/*
657 * Interrupt entry/exit.
658 *
659 * Interrupt entry points save only callee clobbered registers in fast path.
660 *
661 * Entry runs with interrupts off.
662 */
663
664/* 0(%rsp): interrupt number */
665 .macro interrupt func
1da177e4 666 cld
1da177e4
LT
667 SAVE_ARGS
668 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
1de9c3f6 669 pushq %rbp
097a0788
GC
670 /*
671 * Save rbp twice: One is for marking the stack frame, as usual, and the
672 * other, to fill pt_regs properly. This is because bx comes right
673 * before the last saved register in that structure, and not bp. If the
674 * base pointer were in the place bx is today, this would not be needed.
675 */
676 movq %rbp, -8(%rsp)
1de9c3f6
JB
677 CFI_ADJUST_CFA_OFFSET 8
678 CFI_REL_OFFSET rbp, 0
679 movq %rsp,%rbp
680 CFI_DEF_CFA_REGISTER rbp
1da177e4
LT
681 testl $3,CS(%rdi)
682 je 1f
72fe4858 683 SWAPGS
96e54049
AK
684 /* irqcount is used to check if a CPU is already on an interrupt
685 stack or not. While this is essentially redundant with preempt_count
686 it is a little cheaper to use a separate counter in the PDA
687 (short of moving irq_enter into assembly, which would be too
688 much work) */
6891: incl %gs:pda_irqcount
1de9c3f6 690 cmoveq %gs:pda_irqstackptr,%rsp
2699500b 691 push %rbp # backlink for old unwinder
2601e64d
IM
692 /*
693 * We entered an interrupt context - irqs are off:
694 */
695 TRACE_IRQS_OFF
1da177e4
LT
696 call \func
697 .endm
698
699ENTRY(common_interrupt)
7effaa88 700 XCPT_FRAME
1da177e4
LT
701 interrupt do_IRQ
702 /* 0(%rsp): oldrsp-ARGOFFSET */
7effaa88 703ret_from_intr:
72fe4858 704 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 705 TRACE_IRQS_OFF
3829ee6b 706 decl %gs:pda_irqcount
1de9c3f6 707 leaveq
7effaa88 708 CFI_DEF_CFA_REGISTER rsp
1de9c3f6 709 CFI_ADJUST_CFA_OFFSET -8
7effaa88 710exit_intr:
1da177e4
LT
711 GET_THREAD_INFO(%rcx)
712 testl $3,CS-ARGOFFSET(%rsp)
713 je retint_kernel
714
715 /* Interrupt came from user space */
716 /*
717 * Has a correct top of stack, but a partial stack frame
718 * %rcx: thread info. Interrupts off.
719 */
720retint_with_reschedule:
721 movl $_TIF_WORK_MASK,%edi
7effaa88 722retint_check:
10cd706d 723 LOCKDEP_SYS_EXIT_IRQ
26ccb8a7 724 movl TI_flags(%rcx),%edx
1da177e4 725 andl %edi,%edx
7effaa88 726 CFI_REMEMBER_STATE
1da177e4 727 jnz retint_careful
10cd706d
PZ
728
729retint_swapgs: /* return to user-space */
2601e64d
IM
730 /*
731 * The iretq could re-enable interrupts:
732 */
72fe4858 733 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d 734 TRACE_IRQS_IRETQ
72fe4858 735 SWAPGS
2601e64d
IM
736 jmp restore_args
737
10cd706d 738retint_restore_args: /* return to kernel space */
72fe4858 739 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d
IM
740 /*
741 * The iretq could re-enable interrupts:
742 */
743 TRACE_IRQS_IRETQ
744restore_args:
3701d863
IM
745 RESTORE_ARGS 0,8,0
746
f7f3d791 747irq_return:
72fe4858 748 INTERRUPT_RETURN
3701d863
IM
749
750 .section __ex_table, "a"
751 .quad irq_return, bad_iret
752 .previous
753
754#ifdef CONFIG_PARAVIRT
72fe4858 755ENTRY(native_iret)
1da177e4
LT
756 iretq
757
758 .section __ex_table,"a"
72fe4858 759 .quad native_iret, bad_iret
1da177e4 760 .previous
3701d863
IM
761#endif
762
1da177e4 763 .section .fixup,"ax"
1da177e4 764bad_iret:
3aa4b37d
RM
765 /*
766 * The iret traps when the %cs or %ss being restored is bogus.
767 * We've lost the original trap vector and error code.
768 * #GPF is the most likely one to get for an invalid selector.
769 * So pretend we completed the iret and took the #GPF in user mode.
770 *
771 * We are now running with the kernel GS after exception recovery.
772 * But error_entry expects us to have user GS to match the user %cs,
773 * so swap back.
774 */
775 pushq $0
776
777 SWAPGS
778 jmp general_protection
779
72fe4858
GOC
780 .previous
781
7effaa88 782 /* edi: workmask, edx: work */
1da177e4 783retint_careful:
7effaa88 784 CFI_RESTORE_STATE
1da177e4
LT
785 bt $TIF_NEED_RESCHED,%edx
786 jnc retint_signal
2601e64d 787 TRACE_IRQS_ON
72fe4858 788 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 789 pushq %rdi
7effaa88 790 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
791 call schedule
792 popq %rdi
7effaa88 793 CFI_ADJUST_CFA_OFFSET -8
1da177e4 794 GET_THREAD_INFO(%rcx)
72fe4858 795 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 796 TRACE_IRQS_OFF
1da177e4
LT
797 jmp retint_check
798
799retint_signal:
8f4d37ec 800 testl $_TIF_DO_NOTIFY_MASK,%edx
10ffdbb8 801 jz retint_swapgs
2601e64d 802 TRACE_IRQS_ON
72fe4858 803 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4
LT
804 SAVE_REST
805 movq $-1,ORIG_RAX(%rsp)
3829ee6b 806 xorl %esi,%esi # oldset
1da177e4
LT
807 movq %rsp,%rdi # &pt_regs
808 call do_notify_resume
809 RESTORE_REST
72fe4858 810 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 811 TRACE_IRQS_OFF
be9e6870 812 GET_THREAD_INFO(%rcx)
eca91e78 813 jmp retint_with_reschedule
1da177e4
LT
814
815#ifdef CONFIG_PREEMPT
816 /* Returning to kernel space. Check if we need preemption */
817 /* rcx: threadinfo. interrupts off. */
b06babac 818ENTRY(retint_kernel)
26ccb8a7 819 cmpl $0,TI_preempt_count(%rcx)
1da177e4 820 jnz retint_restore_args
26ccb8a7 821 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
1da177e4
LT
822 jnc retint_restore_args
823 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
824 jnc retint_restore_args
825 call preempt_schedule_irq
826 jmp exit_intr
827#endif
4b787e0b 828
1da177e4 829 CFI_ENDPROC
4b787e0b 830END(common_interrupt)
1da177e4
LT
831
832/*
833 * APIC interrupts.
834 */
835 .macro apicinterrupt num,func
7effaa88 836 INTR_FRAME
19eadf98 837 pushq $~(\num)
7effaa88 838 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
839 interrupt \func
840 jmp ret_from_intr
841 CFI_ENDPROC
842 .endm
843
844ENTRY(thermal_interrupt)
845 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
4b787e0b 846END(thermal_interrupt)
1da177e4 847
89b831ef
JS
848ENTRY(threshold_interrupt)
849 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
4b787e0b 850END(threshold_interrupt)
89b831ef 851
1da177e4
LT
852#ifdef CONFIG_SMP
853ENTRY(reschedule_interrupt)
854 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
4b787e0b 855END(reschedule_interrupt)
1da177e4 856
e5bc8b6b
AK
857 .macro INVALIDATE_ENTRY num
858ENTRY(invalidate_interrupt\num)
859 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
4b787e0b 860END(invalidate_interrupt\num)
e5bc8b6b
AK
861 .endm
862
863 INVALIDATE_ENTRY 0
864 INVALIDATE_ENTRY 1
865 INVALIDATE_ENTRY 2
866 INVALIDATE_ENTRY 3
867 INVALIDATE_ENTRY 4
868 INVALIDATE_ENTRY 5
869 INVALIDATE_ENTRY 6
870 INVALIDATE_ENTRY 7
1da177e4
LT
871
872ENTRY(call_function_interrupt)
873 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
4b787e0b 874END(call_function_interrupt)
3b16cf87
JA
875ENTRY(call_function_single_interrupt)
876 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
877END(call_function_single_interrupt)
61014292
EB
878ENTRY(irq_move_cleanup_interrupt)
879 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
880END(irq_move_cleanup_interrupt)
1da177e4
LT
881#endif
882
1da177e4
LT
883ENTRY(apic_timer_interrupt)
884 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
4b787e0b 885END(apic_timer_interrupt)
1da177e4 886
1812924b
CW
887ENTRY(uv_bau_message_intr1)
888 apicinterrupt 220,uv_bau_message_interrupt
889END(uv_bau_message_intr1)
890
1da177e4
LT
891ENTRY(error_interrupt)
892 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
4b787e0b 893END(error_interrupt)
1da177e4
LT
894
895ENTRY(spurious_interrupt)
896 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
4b787e0b 897END(spurious_interrupt)
1da177e4
LT
898
899/*
900 * Exception entry points.
901 */
902 .macro zeroentry sym
7effaa88 903 INTR_FRAME
fab58420 904 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 905 pushq $0 /* push error code/oldrax */
7effaa88 906 CFI_ADJUST_CFA_OFFSET 8
1da177e4 907 pushq %rax /* push real oldrax to the rdi slot */
7effaa88 908 CFI_ADJUST_CFA_OFFSET 8
37550907 909 CFI_REL_OFFSET rax,0
1da177e4
LT
910 leaq \sym(%rip),%rax
911 jmp error_entry
7effaa88 912 CFI_ENDPROC
1da177e4
LT
913 .endm
914
915 .macro errorentry sym
7effaa88 916 XCPT_FRAME
fab58420 917 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 918 pushq %rax
7effaa88 919 CFI_ADJUST_CFA_OFFSET 8
37550907 920 CFI_REL_OFFSET rax,0
1da177e4
LT
921 leaq \sym(%rip),%rax
922 jmp error_entry
7effaa88 923 CFI_ENDPROC
1da177e4
LT
924 .endm
925
926 /* error code is on the stack already */
927 /* handle NMI like exceptions that can happen everywhere */
2601e64d 928 .macro paranoidentry sym, ist=0, irqtrace=1
1da177e4
LT
929 SAVE_ALL
930 cld
931 movl $1,%ebx
932 movl $MSR_GS_BASE,%ecx
933 rdmsr
934 testl %edx,%edx
935 js 1f
72fe4858 936 SWAPGS
1da177e4 937 xorl %ebx,%ebx
b556b35e
JB
9381:
939 .if \ist
940 movq %gs:pda_data_offset, %rbp
941 .endif
942 movq %rsp,%rdi
1da177e4
LT
943 movq ORIG_RAX(%rsp),%rsi
944 movq $-1,ORIG_RAX(%rsp)
b556b35e 945 .if \ist
5f8efbb9 946 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 947 .endif
1da177e4 948 call \sym
b556b35e 949 .if \ist
5f8efbb9 950 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 951 .endif
72fe4858 952 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
953 .if \irqtrace
954 TRACE_IRQS_OFF
955 .endif
1da177e4 956 .endm
2601e64d
IM
957
958 /*
959 * "Paranoid" exit path from exception stack.
960 * Paranoid because this is used by NMIs and cannot take
961 * any kernel state for granted.
962 * We don't do kernel preemption checks here, because only
963 * NMI should be common and it does not enable IRQs and
964 * cannot get reschedule ticks.
965 *
966 * "trace" is 0 for the NMI handler only, because irq-tracing
967 * is fundamentally NMI-unsafe. (we cannot change the soft and
968 * hard flags at once, atomically)
969 */
970 .macro paranoidexit trace=1
971 /* ebx: no swapgs flag */
972paranoid_exit\trace:
973 testl %ebx,%ebx /* swapgs needed? */
974 jnz paranoid_restore\trace
975 testl $3,CS(%rsp)
976 jnz paranoid_userspace\trace
977paranoid_swapgs\trace:
7a0a2dff 978 .if \trace
2601e64d 979 TRACE_IRQS_IRETQ 0
7a0a2dff 980 .endif
72fe4858 981 SWAPGS_UNSAFE_STACK
2601e64d
IM
982paranoid_restore\trace:
983 RESTORE_ALL 8
3701d863 984 jmp irq_return
2601e64d
IM
985paranoid_userspace\trace:
986 GET_THREAD_INFO(%rcx)
26ccb8a7 987 movl TI_flags(%rcx),%ebx
2601e64d
IM
988 andl $_TIF_WORK_MASK,%ebx
989 jz paranoid_swapgs\trace
990 movq %rsp,%rdi /* &pt_regs */
991 call sync_regs
992 movq %rax,%rsp /* switch stack for scheduling */
993 testl $_TIF_NEED_RESCHED,%ebx
994 jnz paranoid_schedule\trace
995 movl %ebx,%edx /* arg3: thread flags */
996 .if \trace
997 TRACE_IRQS_ON
998 .endif
72fe4858 999 ENABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
1000 xorl %esi,%esi /* arg2: oldset */
1001 movq %rsp,%rdi /* arg1: &pt_regs */
1002 call do_notify_resume
72fe4858 1003 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
1004 .if \trace
1005 TRACE_IRQS_OFF
1006 .endif
1007 jmp paranoid_userspace\trace
1008paranoid_schedule\trace:
1009 .if \trace
1010 TRACE_IRQS_ON
1011 .endif
72fe4858 1012 ENABLE_INTERRUPTS(CLBR_ANY)
2601e64d 1013 call schedule
72fe4858 1014 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d
IM
1015 .if \trace
1016 TRACE_IRQS_OFF
1017 .endif
1018 jmp paranoid_userspace\trace
1019 CFI_ENDPROC
1020 .endm
1021
1da177e4
LT
1022/*
1023 * Exception entry point. This expects an error code/orig_rax on the stack
1024 * and the exception handler in %rax.
1025 */
d28c4393 1026KPROBE_ENTRY(error_entry)
7effaa88 1027 _frame RDI
37550907 1028 CFI_REL_OFFSET rax,0
1da177e4
LT
1029 /* rdi slot contains rax, oldrax contains error code */
1030 cld
1031 subq $14*8,%rsp
1032 CFI_ADJUST_CFA_OFFSET (14*8)
1033 movq %rsi,13*8(%rsp)
1034 CFI_REL_OFFSET rsi,RSI
1035 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
37550907 1036 CFI_REGISTER rax,rsi
1da177e4
LT
1037 movq %rdx,12*8(%rsp)
1038 CFI_REL_OFFSET rdx,RDX
1039 movq %rcx,11*8(%rsp)
1040 CFI_REL_OFFSET rcx,RCX
1041 movq %rsi,10*8(%rsp) /* store rax */
1042 CFI_REL_OFFSET rax,RAX
1043 movq %r8, 9*8(%rsp)
1044 CFI_REL_OFFSET r8,R8
1045 movq %r9, 8*8(%rsp)
1046 CFI_REL_OFFSET r9,R9
1047 movq %r10,7*8(%rsp)
1048 CFI_REL_OFFSET r10,R10
1049 movq %r11,6*8(%rsp)
1050 CFI_REL_OFFSET r11,R11
1051 movq %rbx,5*8(%rsp)
1052 CFI_REL_OFFSET rbx,RBX
1053 movq %rbp,4*8(%rsp)
1054 CFI_REL_OFFSET rbp,RBP
1055 movq %r12,3*8(%rsp)
1056 CFI_REL_OFFSET r12,R12
1057 movq %r13,2*8(%rsp)
1058 CFI_REL_OFFSET r13,R13
1059 movq %r14,1*8(%rsp)
1060 CFI_REL_OFFSET r14,R14
1061 movq %r15,(%rsp)
1062 CFI_REL_OFFSET r15,R15
1063 xorl %ebx,%ebx
1064 testl $3,CS(%rsp)
1065 je error_kernelspace
1066error_swapgs:
72fe4858 1067 SWAPGS
6b11d4ef
AH
1068error_sti:
1069 TRACE_IRQS_OFF
1da177e4 1070 movq %rdi,RDI(%rsp)
37550907 1071 CFI_REL_OFFSET rdi,RDI
1da177e4
LT
1072 movq %rsp,%rdi
1073 movq ORIG_RAX(%rsp),%rsi /* get error code */
1074 movq $-1,ORIG_RAX(%rsp)
1075 call *%rax
10cd706d
PZ
1076 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1077error_exit:
1078 movl %ebx,%eax
1da177e4 1079 RESTORE_REST
72fe4858 1080 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 1081 TRACE_IRQS_OFF
1da177e4
LT
1082 GET_THREAD_INFO(%rcx)
1083 testl %eax,%eax
1084 jne retint_kernel
10cd706d 1085 LOCKDEP_SYS_EXIT_IRQ
26ccb8a7 1086 movl TI_flags(%rcx),%edx
1da177e4
LT
1087 movl $_TIF_WORK_MASK,%edi
1088 andl %edi,%edx
1089 jnz retint_careful
10cd706d 1090 jmp retint_swapgs
1da177e4
LT
1091 CFI_ENDPROC
1092
1093error_kernelspace:
1094 incl %ebx
1095 /* There are two places in the kernel that can potentially fault with
1096 usergs. Handle them here. The exception handlers after
1097 iret run with kernel gs again, so don't set the user space flag.
1098 B stepping K8s sometimes report an truncated RIP for IRET
1099 exceptions returning to compat mode. Check for these here too. */
9d8ad5d6
VN
1100 leaq irq_return(%rip),%rcx
1101 cmpq %rcx,RIP(%rsp)
1da177e4 1102 je error_swapgs
9d8ad5d6
VN
1103 movl %ecx,%ecx /* zero extend */
1104 cmpq %rcx,RIP(%rsp)
1da177e4
LT
1105 je error_swapgs
1106 cmpq $gs_change,RIP(%rsp)
1107 je error_swapgs
1108 jmp error_sti
d28c4393 1109KPROBE_END(error_entry)
1da177e4
LT
1110
1111 /* Reload gs selector with exception handling */
1112 /* edi: new selector */
9f9d489a 1113ENTRY(native_load_gs_index)
7effaa88 1114 CFI_STARTPROC
1da177e4 1115 pushf
7effaa88 1116 CFI_ADJUST_CFA_OFFSET 8
72fe4858
GOC
1117 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1118 SWAPGS
1da177e4
LT
1119gs_change:
1120 movl %edi,%gs
11212: mfence /* workaround */
72fe4858 1122 SWAPGS
1da177e4 1123 popf
7effaa88 1124 CFI_ADJUST_CFA_OFFSET -8
1da177e4 1125 ret
7effaa88 1126 CFI_ENDPROC
9f9d489a 1127ENDPROC(native_load_gs_index)
1da177e4
LT
1128
1129 .section __ex_table,"a"
1130 .align 8
1131 .quad gs_change,bad_gs
1132 .previous
1133 .section .fixup,"ax"
1134 /* running with kernelgs */
1135bad_gs:
72fe4858 1136 SWAPGS /* switch back to user gs */
1da177e4
LT
1137 xorl %eax,%eax
1138 movl %eax,%gs
1139 jmp 2b
1140 .previous
1141
1142/*
1143 * Create a kernel thread.
1144 *
1145 * C extern interface:
1146 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
1147 *
1148 * asm input arguments:
1149 * rdi: fn, rsi: arg, rdx: flags
1150 */
1151ENTRY(kernel_thread)
1152 CFI_STARTPROC
1153 FAKE_STACK_FRAME $child_rip
1154 SAVE_ALL
1155
1156 # rdi: flags, rsi: usp, rdx: will be &pt_regs
1157 movq %rdx,%rdi
1158 orq kernel_thread_flags(%rip),%rdi
1159 movq $-1, %rsi
1160 movq %rsp, %rdx
1161
1162 xorl %r8d,%r8d
1163 xorl %r9d,%r9d
1164
1165 # clone now
1166 call do_fork
1167 movq %rax,RAX(%rsp)
1168 xorl %edi,%edi
1169
1170 /*
1171 * It isn't worth to check for reschedule here,
1172 * so internally to the x86_64 port you can rely on kernel_thread()
1173 * not to reschedule the child before returning, this avoids the need
1174 * of hacks for example to fork off the per-CPU idle tasks.
1175 * [Hopefully no generic code relies on the reschedule -AK]
1176 */
1177 RESTORE_ALL
1178 UNFAKE_STACK_FRAME
1179 ret
1180 CFI_ENDPROC
4b787e0b 1181ENDPROC(kernel_thread)
1da177e4
LT
1182
1183child_rip:
c05991ed
AK
1184 pushq $0 # fake return address
1185 CFI_STARTPROC
1da177e4
LT
1186 /*
1187 * Here we are in the child and the registers are set as they were
1188 * at kernel_thread() invocation in the parent.
1189 */
1190 movq %rdi, %rax
1191 movq %rsi, %rdi
1192 call *%rax
1193 # exit
1c5b5cfd 1194 mov %eax, %edi
1da177e4 1195 call do_exit
c05991ed 1196 CFI_ENDPROC
4b787e0b 1197ENDPROC(child_rip)
1da177e4
LT
1198
1199/*
1200 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1201 *
1202 * C extern interface:
1203 * extern long execve(char *name, char **argv, char **envp)
1204 *
1205 * asm input arguments:
1206 * rdi: name, rsi: argv, rdx: envp
1207 *
1208 * We want to fallback into:
5d119b2c 1209 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1da177e4
LT
1210 *
1211 * do_sys_execve asm fallback arguments:
5d119b2c 1212 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1da177e4 1213 */
3db03b4a 1214ENTRY(kernel_execve)
1da177e4
LT
1215 CFI_STARTPROC
1216 FAKE_STACK_FRAME $0
1217 SAVE_ALL
5d119b2c 1218 movq %rsp,%rcx
1da177e4
LT
1219 call sys_execve
1220 movq %rax, RAX(%rsp)
1221 RESTORE_REST
1222 testq %rax,%rax
1223 je int_ret_from_sys_call
1224 RESTORE_ARGS
1225 UNFAKE_STACK_FRAME
1226 ret
1227 CFI_ENDPROC
3db03b4a 1228ENDPROC(kernel_execve)
1da177e4 1229
0f2fbdcb 1230KPROBE_ENTRY(page_fault)
1da177e4 1231 errorentry do_page_fault
d28c4393 1232KPROBE_END(page_fault)
1da177e4
LT
1233
1234ENTRY(coprocessor_error)
1235 zeroentry do_coprocessor_error
4b787e0b 1236END(coprocessor_error)
1da177e4
LT
1237
1238ENTRY(simd_coprocessor_error)
1239 zeroentry do_simd_coprocessor_error
4b787e0b 1240END(simd_coprocessor_error)
1da177e4
LT
1241
1242ENTRY(device_not_available)
1243 zeroentry math_state_restore
4b787e0b 1244END(device_not_available)
1da177e4
LT
1245
1246 /* runs on exception stack */
0f2fbdcb 1247KPROBE_ENTRY(debug)
7effaa88 1248 INTR_FRAME
09402947 1249 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4
LT
1250 pushq $0
1251 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1252 paranoidentry do_debug, DEBUG_STACK
2601e64d 1253 paranoidexit
d28c4393 1254KPROBE_END(debug)
1da177e4
LT
1255
1256 /* runs on exception stack */
eddb6fb9 1257KPROBE_ENTRY(nmi)
7effaa88 1258 INTR_FRAME
09402947 1259 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1260 pushq $-1
7effaa88 1261 CFI_ADJUST_CFA_OFFSET 8
2601e64d
IM
1262 paranoidentry do_nmi, 0, 0
1263#ifdef CONFIG_TRACE_IRQFLAGS
1264 paranoidexit 0
1265#else
1266 jmp paranoid_exit1
1267 CFI_ENDPROC
1268#endif
d28c4393 1269KPROBE_END(nmi)
6fefb0d1 1270
0f2fbdcb 1271KPROBE_ENTRY(int3)
b556b35e 1272 INTR_FRAME
09402947 1273 PARAVIRT_ADJUST_EXCEPTION_FRAME
b556b35e
JB
1274 pushq $0
1275 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1276 paranoidentry do_int3, DEBUG_STACK
2601e64d 1277 jmp paranoid_exit1
b556b35e 1278 CFI_ENDPROC
d28c4393 1279KPROBE_END(int3)
1da177e4
LT
1280
1281ENTRY(overflow)
1282 zeroentry do_overflow
4b787e0b 1283END(overflow)
1da177e4
LT
1284
1285ENTRY(bounds)
1286 zeroentry do_bounds
4b787e0b 1287END(bounds)
1da177e4
LT
1288
1289ENTRY(invalid_op)
1290 zeroentry do_invalid_op
4b787e0b 1291END(invalid_op)
1da177e4
LT
1292
1293ENTRY(coprocessor_segment_overrun)
1294 zeroentry do_coprocessor_segment_overrun
4b787e0b 1295END(coprocessor_segment_overrun)
1da177e4 1296
1da177e4
LT
1297 /* runs on exception stack */
1298ENTRY(double_fault)
7effaa88 1299 XCPT_FRAME
09402947 1300 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1301 paranoidentry do_double_fault
2601e64d 1302 jmp paranoid_exit1
1da177e4 1303 CFI_ENDPROC
4b787e0b 1304END(double_fault)
1da177e4
LT
1305
1306ENTRY(invalid_TSS)
1307 errorentry do_invalid_TSS
4b787e0b 1308END(invalid_TSS)
1da177e4
LT
1309
1310ENTRY(segment_not_present)
1311 errorentry do_segment_not_present
4b787e0b 1312END(segment_not_present)
1da177e4
LT
1313
1314 /* runs on exception stack */
1315ENTRY(stack_segment)
7effaa88 1316 XCPT_FRAME
09402947 1317 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1318 paranoidentry do_stack_segment
2601e64d 1319 jmp paranoid_exit1
1da177e4 1320 CFI_ENDPROC
4b787e0b 1321END(stack_segment)
1da177e4 1322
0f2fbdcb 1323KPROBE_ENTRY(general_protection)
1da177e4 1324 errorentry do_general_protection
d28c4393 1325KPROBE_END(general_protection)
1da177e4
LT
1326
1327ENTRY(alignment_check)
1328 errorentry do_alignment_check
4b787e0b 1329END(alignment_check)
1da177e4
LT
1330
1331ENTRY(divide_error)
1332 zeroentry do_divide_error
4b787e0b 1333END(divide_error)
1da177e4
LT
1334
1335ENTRY(spurious_interrupt_bug)
1336 zeroentry do_spurious_interrupt_bug
4b787e0b 1337END(spurious_interrupt_bug)
1da177e4
LT
1338
1339#ifdef CONFIG_X86_MCE
1340 /* runs on exception stack */
1341ENTRY(machine_check)
7effaa88 1342 INTR_FRAME
09402947 1343 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4
LT
1344 pushq $0
1345 CFI_ADJUST_CFA_OFFSET 8
1346 paranoidentry do_machine_check
2601e64d 1347 jmp paranoid_exit1
1da177e4 1348 CFI_ENDPROC
4b787e0b 1349END(machine_check)
1da177e4
LT
1350#endif
1351
2699500b 1352/* Call softirq on interrupt stack. Interrupts are off. */
ed6b676c 1353ENTRY(call_softirq)
7effaa88 1354 CFI_STARTPROC
2699500b
AK
1355 push %rbp
1356 CFI_ADJUST_CFA_OFFSET 8
1357 CFI_REL_OFFSET rbp,0
1358 mov %rsp,%rbp
1359 CFI_DEF_CFA_REGISTER rbp
ed6b676c 1360 incl %gs:pda_irqcount
2699500b
AK
1361 cmove %gs:pda_irqstackptr,%rsp
1362 push %rbp # backlink for old unwinder
ed6b676c 1363 call __do_softirq
2699500b 1364 leaveq
7effaa88 1365 CFI_DEF_CFA_REGISTER rsp
2699500b 1366 CFI_ADJUST_CFA_OFFSET -8
ed6b676c 1367 decl %gs:pda_irqcount
ed6b676c 1368 ret
7effaa88 1369 CFI_ENDPROC
4b787e0b 1370ENDPROC(call_softirq)
75154f40
AK
1371
1372KPROBE_ENTRY(ignore_sysret)
1373 CFI_STARTPROC
1374 mov $-ENOSYS,%eax
1375 sysret
1376 CFI_ENDPROC
1377ENDPROC(ignore_sysret)
3d75e1b8
JF
1378
1379#ifdef CONFIG_XEN
1380ENTRY(xen_hypervisor_callback)
1381 zeroentry xen_do_hypervisor_callback
1382END(xen_hypervisor_callback)
1383
1384/*
1385# A note on the "critical region" in our callback handler.
1386# We want to avoid stacking callback handlers due to events occurring
1387# during handling of the last event. To do this, we keep events disabled
1388# until we've done all processing. HOWEVER, we must enable events before
1389# popping the stack frame (can't be done atomically) and so it would still
1390# be possible to get enough handler activations to overflow the stack.
1391# Although unlikely, bugs of that kind are hard to track down, so we'd
1392# like to avoid the possibility.
1393# So, on entry to the handler we detect whether we interrupted an
1394# existing activation in its critical region -- if so, we pop the current
1395# activation and restart the handler using the previous one.
1396*/
1397ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1398 CFI_STARTPROC
1399/* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1400 see the correct pointer to the pt_regs */
1401 movq %rdi, %rsp # we don't return, adjust the stack frame
1402 CFI_ENDPROC
1403 CFI_DEFAULT_STACK
140411: incl %gs:pda_irqcount
1405 movq %rsp,%rbp
1406 CFI_DEF_CFA_REGISTER rbp
1407 cmovzq %gs:pda_irqstackptr,%rsp
1408 pushq %rbp # backlink for old unwinder
1409 call xen_evtchn_do_upcall
1410 popq %rsp
1411 CFI_DEF_CFA_REGISTER rsp
1412 decl %gs:pda_irqcount
1413 jmp error_exit
1414 CFI_ENDPROC
1415END(do_hypervisor_callback)
1416
1417/*
1418# Hypervisor uses this for application faults while it executes.
1419# We get here for two reasons:
1420# 1. Fault while reloading DS, ES, FS or GS
1421# 2. Fault while executing IRET
1422# Category 1 we do not need to fix up as Xen has already reloaded all segment
1423# registers that could be reloaded and zeroed the others.
1424# Category 2 we fix up by killing the current process. We cannot use the
1425# normal Linux return path in this case because if we use the IRET hypercall
1426# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1427# We distinguish between categories by comparing each saved segment register
1428# with its current contents: any discrepancy means we in category 1.
1429*/
1430ENTRY(xen_failsafe_callback)
4a5c3e77
JF
1431 framesz = (RIP-0x30) /* workaround buggy gas */
1432 _frame framesz
3d75e1b8
JF
1433 CFI_REL_OFFSET rcx, 0
1434 CFI_REL_OFFSET r11, 8
1435 movw %ds,%cx
1436 cmpw %cx,0x10(%rsp)
1437 CFI_REMEMBER_STATE
1438 jne 1f
1439 movw %es,%cx
1440 cmpw %cx,0x18(%rsp)
1441 jne 1f
1442 movw %fs,%cx
1443 cmpw %cx,0x20(%rsp)
1444 jne 1f
1445 movw %gs,%cx
1446 cmpw %cx,0x28(%rsp)
1447 jne 1f
1448 /* All segments match their saved values => Category 2 (Bad IRET). */
1449 movq (%rsp),%rcx
1450 CFI_RESTORE rcx
1451 movq 8(%rsp),%r11
1452 CFI_RESTORE r11
1453 addq $0x30,%rsp
1454 CFI_ADJUST_CFA_OFFSET -0x30
4a5c3e77
JF
1455 pushq $0
1456 CFI_ADJUST_CFA_OFFSET 8
1457 pushq %r11
1458 CFI_ADJUST_CFA_OFFSET 8
1459 pushq %rcx
1460 CFI_ADJUST_CFA_OFFSET 8
1461 jmp general_protection
3d75e1b8
JF
1462 CFI_RESTORE_STATE
14631: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1464 movq (%rsp),%rcx
1465 CFI_RESTORE rcx
1466 movq 8(%rsp),%r11
1467 CFI_RESTORE r11
1468 addq $0x30,%rsp
1469 CFI_ADJUST_CFA_OFFSET -0x30
1470 pushq $0
1471 CFI_ADJUST_CFA_OFFSET 8
1472 SAVE_ALL
1473 jmp error_exit
1474 CFI_ENDPROC
3d75e1b8
JF
1475END(xen_failsafe_callback)
1476
1477#endif /* CONFIG_XEN */
This page took 0.778732 seconds and 4 git commands to generate.