]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/entry.S | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs | |
6 | * Copyright (C) 2000 Pavel Machek <[email protected]> | |
1da177e4 LT |
7 | */ |
8 | ||
9 | /* | |
10 | * entry.S contains the system-call and fault low-level handling routines. | |
11 | * | |
12 | * NOTE: This code handles signal-recognition, which happens every time | |
13 | * after an interrupt and after each system call. | |
0bd7b798 AH |
14 | * |
15 | * Normal syscalls and interrupts don't save a full stack frame, this is | |
1da177e4 | 16 | * only done for syscall tracing, signals or fork/exec et.al. |
0bd7b798 AH |
17 | * |
18 | * A note on terminology: | |
19 | * - top of stack: Architecture defined interrupt frame from SS to RIP | |
20 | * at the top of the kernel process stack. | |
1da177e4 | 21 | * - partial stack frame: partially saved registers upto R11. |
0bd7b798 | 22 | * - full stack frame: Like partial stack frame, but all register saved. |
2e91a17b AK |
23 | * |
24 | * Some macro usage: | |
25 | * - CFI macros are used to generate dwarf2 unwind information for better | |
26 | * backtraces. They don't change any code. | |
27 | * - SAVE_ALL/RESTORE_ALL - Save/restore all registers | |
28 | * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify. | |
29 | * There are unfortunately lots of special cases where some registers | |
30 | * not touched. The macro is a big mess that should be cleaned up. | |
31 | * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS. | |
32 | * Gives a full stack frame. | |
33 | * - ENTRY/END Define functions in the symbol table. | |
34 | * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack | |
35 | * frame that is otherwise undefined after a SYSCALL | |
36 | * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging. | |
37 | * - errorentry/paranoidentry/zeroentry - Define exception entry points. | |
1da177e4 LT |
38 | */ |
39 | ||
1da177e4 LT |
40 | #include <linux/linkage.h> |
41 | #include <asm/segment.h> | |
1da177e4 LT |
42 | #include <asm/cache.h> |
43 | #include <asm/errno.h> | |
44 | #include <asm/dwarf2.h> | |
45 | #include <asm/calling.h> | |
e2d5df93 | 46 | #include <asm/asm-offsets.h> |
1da177e4 LT |
47 | #include <asm/msr.h> |
48 | #include <asm/unistd.h> | |
49 | #include <asm/thread_info.h> | |
50 | #include <asm/hw_irq.h> | |
5f8efbb9 | 51 | #include <asm/page.h> |
2601e64d | 52 | #include <asm/irqflags.h> |
72fe4858 | 53 | #include <asm/paravirt.h> |
395a59d0 | 54 | #include <asm/ftrace.h> |
9939ddaf | 55 | #include <asm/percpu.h> |
1da177e4 | 56 | |
86a1c34a RM |
57 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
58 | #include <linux/elf-em.h> | |
59 | #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) | |
60 | #define __AUDIT_ARCH_64BIT 0x80000000 | |
61 | #define __AUDIT_ARCH_LE 0x40000000 | |
62 | ||
1da177e4 | 63 | .code64 |
606576ce | 64 | #ifdef CONFIG_FUNCTION_TRACER |
d61f82d0 SR |
65 | #ifdef CONFIG_DYNAMIC_FTRACE |
66 | ENTRY(mcount) | |
d61f82d0 SR |
67 | retq |
68 | END(mcount) | |
69 | ||
70 | ENTRY(ftrace_caller) | |
60a7ecf4 SR |
71 | cmpl $0, function_trace_stop |
72 | jne ftrace_stub | |
d61f82d0 | 73 | |
d680fe44 | 74 | MCOUNT_SAVE_FRAME |
d61f82d0 SR |
75 | |
76 | movq 0x38(%rsp), %rdi | |
77 | movq 8(%rbp), %rsi | |
395a59d0 | 78 | subq $MCOUNT_INSN_SIZE, %rdi |
d61f82d0 SR |
79 | |
80 | .globl ftrace_call | |
81 | ftrace_call: | |
82 | call ftrace_stub | |
83 | ||
d680fe44 | 84 | MCOUNT_RESTORE_FRAME |
d61f82d0 | 85 | |
48d68b20 FW |
86 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
87 | .globl ftrace_graph_call | |
88 | ftrace_graph_call: | |
89 | jmp ftrace_stub | |
90 | #endif | |
d61f82d0 SR |
91 | |
92 | .globl ftrace_stub | |
93 | ftrace_stub: | |
94 | retq | |
95 | END(ftrace_caller) | |
96 | ||
97 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | |
16444a8a | 98 | ENTRY(mcount) |
60a7ecf4 SR |
99 | cmpl $0, function_trace_stop |
100 | jne ftrace_stub | |
101 | ||
16444a8a ACM |
102 | cmpq $ftrace_stub, ftrace_trace_function |
103 | jnz trace | |
48d68b20 FW |
104 | |
105 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
106 | cmpq $ftrace_stub, ftrace_graph_return | |
107 | jnz ftrace_graph_caller | |
e49dc19c SR |
108 | |
109 | cmpq $ftrace_graph_entry_stub, ftrace_graph_entry | |
110 | jnz ftrace_graph_caller | |
48d68b20 FW |
111 | #endif |
112 | ||
16444a8a ACM |
113 | .globl ftrace_stub |
114 | ftrace_stub: | |
115 | retq | |
116 | ||
117 | trace: | |
d680fe44 | 118 | MCOUNT_SAVE_FRAME |
16444a8a ACM |
119 | |
120 | movq 0x38(%rsp), %rdi | |
121 | movq 8(%rbp), %rsi | |
395a59d0 | 122 | subq $MCOUNT_INSN_SIZE, %rdi |
16444a8a ACM |
123 | |
124 | call *ftrace_trace_function | |
125 | ||
d680fe44 | 126 | MCOUNT_RESTORE_FRAME |
16444a8a ACM |
127 | |
128 | jmp ftrace_stub | |
129 | END(mcount) | |
d61f82d0 | 130 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
606576ce | 131 | #endif /* CONFIG_FUNCTION_TRACER */ |
16444a8a | 132 | |
48d68b20 FW |
133 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
134 | ENTRY(ftrace_graph_caller) | |
135 | cmpl $0, function_trace_stop | |
136 | jne ftrace_stub | |
137 | ||
d680fe44 | 138 | MCOUNT_SAVE_FRAME |
48d68b20 FW |
139 | |
140 | leaq 8(%rbp), %rdi | |
141 | movq 0x38(%rsp), %rsi | |
bb4304c7 | 142 | subq $MCOUNT_INSN_SIZE, %rsi |
48d68b20 FW |
143 | |
144 | call prepare_ftrace_return | |
145 | ||
d680fe44 CG |
146 | MCOUNT_RESTORE_FRAME |
147 | ||
48d68b20 FW |
148 | retq |
149 | END(ftrace_graph_caller) | |
150 | ||
151 | ||
152 | .globl return_to_handler | |
153 | return_to_handler: | |
154 | subq $80, %rsp | |
155 | ||
16444a8a ACM |
156 | movq %rax, (%rsp) |
157 | movq %rcx, 8(%rsp) | |
158 | movq %rdx, 16(%rsp) | |
159 | movq %rsi, 24(%rsp) | |
160 | movq %rdi, 32(%rsp) | |
161 | movq %r8, 40(%rsp) | |
162 | movq %r9, 48(%rsp) | |
48d68b20 FW |
163 | movq %r10, 56(%rsp) |
164 | movq %r11, 64(%rsp) | |
16444a8a | 165 | |
48d68b20 | 166 | call ftrace_return_to_handler |
16444a8a | 167 | |
48d68b20 FW |
168 | movq %rax, 72(%rsp) |
169 | movq 64(%rsp), %r11 | |
170 | movq 56(%rsp), %r10 | |
16444a8a ACM |
171 | movq 48(%rsp), %r9 |
172 | movq 40(%rsp), %r8 | |
173 | movq 32(%rsp), %rdi | |
174 | movq 24(%rsp), %rsi | |
175 | movq 16(%rsp), %rdx | |
176 | movq 8(%rsp), %rcx | |
177 | movq (%rsp), %rax | |
48d68b20 FW |
178 | addq $72, %rsp |
179 | retq | |
180 | #endif | |
16444a8a | 181 | |
16444a8a | 182 | |
dc37db4d | 183 | #ifndef CONFIG_PREEMPT |
1da177e4 | 184 | #define retint_kernel retint_restore_args |
0bd7b798 | 185 | #endif |
2601e64d | 186 | |
72fe4858 | 187 | #ifdef CONFIG_PARAVIRT |
2be29982 | 188 | ENTRY(native_usergs_sysret64) |
72fe4858 GOC |
189 | swapgs |
190 | sysretq | |
191 | #endif /* CONFIG_PARAVIRT */ | |
192 | ||
2601e64d IM |
193 | |
194 | .macro TRACE_IRQS_IRETQ offset=ARGOFFSET | |
195 | #ifdef CONFIG_TRACE_IRQFLAGS | |
196 | bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */ | |
197 | jnc 1f | |
198 | TRACE_IRQS_ON | |
199 | 1: | |
200 | #endif | |
201 | .endm | |
202 | ||
1da177e4 | 203 | /* |
0bd7b798 AH |
204 | * C code is not supposed to know about undefined top of stack. Every time |
205 | * a C function with an pt_regs argument is called from the SYSCALL based | |
1da177e4 LT |
206 | * fast path FIXUP_TOP_OF_STACK is needed. |
207 | * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs | |
208 | * manipulation. | |
0bd7b798 AH |
209 | */ |
210 | ||
211 | /* %rsp:at FRAMEEND */ | |
c002a1e6 AH |
212 | .macro FIXUP_TOP_OF_STACK tmp offset=0 |
213 | movq %gs:pda_oldrsp,\tmp | |
214 | movq \tmp,RSP+\offset(%rsp) | |
215 | movq $__USER_DS,SS+\offset(%rsp) | |
216 | movq $__USER_CS,CS+\offset(%rsp) | |
217 | movq $-1,RCX+\offset(%rsp) | |
218 | movq R11+\offset(%rsp),\tmp /* get eflags */ | |
219 | movq \tmp,EFLAGS+\offset(%rsp) | |
1da177e4 LT |
220 | .endm |
221 | ||
c002a1e6 AH |
222 | .macro RESTORE_TOP_OF_STACK tmp offset=0 |
223 | movq RSP+\offset(%rsp),\tmp | |
224 | movq \tmp,%gs:pda_oldrsp | |
225 | movq EFLAGS+\offset(%rsp),\tmp | |
226 | movq \tmp,R11+\offset(%rsp) | |
1da177e4 LT |
227 | .endm |
228 | ||
229 | .macro FAKE_STACK_FRAME child_rip | |
230 | /* push in order ss, rsp, eflags, cs, rip */ | |
3829ee6b | 231 | xorl %eax, %eax |
e04e0a63 | 232 | pushq $__KERNEL_DS /* ss */ |
1da177e4 | 233 | CFI_ADJUST_CFA_OFFSET 8 |
7effaa88 | 234 | /*CFI_REL_OFFSET ss,0*/ |
1da177e4 LT |
235 | pushq %rax /* rsp */ |
236 | CFI_ADJUST_CFA_OFFSET 8 | |
7effaa88 | 237 | CFI_REL_OFFSET rsp,0 |
33454539 | 238 | pushq $X86_EFLAGS_IF /* eflags - interrupts on */ |
1da177e4 | 239 | CFI_ADJUST_CFA_OFFSET 8 |
7effaa88 | 240 | /*CFI_REL_OFFSET rflags,0*/ |
1da177e4 LT |
241 | pushq $__KERNEL_CS /* cs */ |
242 | CFI_ADJUST_CFA_OFFSET 8 | |
7effaa88 | 243 | /*CFI_REL_OFFSET cs,0*/ |
1da177e4 LT |
244 | pushq \child_rip /* rip */ |
245 | CFI_ADJUST_CFA_OFFSET 8 | |
7effaa88 | 246 | CFI_REL_OFFSET rip,0 |
1da177e4 LT |
247 | pushq %rax /* orig rax */ |
248 | CFI_ADJUST_CFA_OFFSET 8 | |
249 | .endm | |
250 | ||
251 | .macro UNFAKE_STACK_FRAME | |
252 | addq $8*6, %rsp | |
253 | CFI_ADJUST_CFA_OFFSET -(6*8) | |
254 | .endm | |
255 | ||
dcd072e2 AH |
256 | /* |
257 | * initial frame state for interrupts (and exceptions without error code) | |
258 | */ | |
259 | .macro EMPTY_FRAME start=1 offset=0 | |
7effaa88 | 260 | .if \start |
dcd072e2 | 261 | CFI_STARTPROC simple |
adf14236 | 262 | CFI_SIGNAL_FRAME |
dcd072e2 | 263 | CFI_DEF_CFA rsp,8+\offset |
7effaa88 | 264 | .else |
dcd072e2 | 265 | CFI_DEF_CFA_OFFSET 8+\offset |
7effaa88 | 266 | .endif |
1da177e4 | 267 | .endm |
d99015b1 AH |
268 | |
269 | /* | |
dcd072e2 | 270 | * initial frame state for interrupts (and exceptions without error code) |
d99015b1 | 271 | */ |
dcd072e2 | 272 | .macro INTR_FRAME start=1 offset=0 |
e8a0e276 IM |
273 | EMPTY_FRAME \start, SS+8+\offset-RIP |
274 | /*CFI_REL_OFFSET ss, SS+\offset-RIP*/ | |
275 | CFI_REL_OFFSET rsp, RSP+\offset-RIP | |
276 | /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/ | |
277 | /*CFI_REL_OFFSET cs, CS+\offset-RIP*/ | |
278 | CFI_REL_OFFSET rip, RIP+\offset-RIP | |
d99015b1 AH |
279 | .endm |
280 | ||
d99015b1 AH |
281 | /* |
282 | * initial frame state for exceptions with error code (and interrupts | |
283 | * with vector already pushed) | |
284 | */ | |
dcd072e2 | 285 | .macro XCPT_FRAME start=1 offset=0 |
e8a0e276 | 286 | INTR_FRAME \start, RIP+\offset-ORIG_RAX |
dcd072e2 AH |
287 | /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/ |
288 | .endm | |
289 | ||
290 | /* | |
291 | * frame that enables calling into C. | |
292 | */ | |
293 | .macro PARTIAL_FRAME start=1 offset=0 | |
e8a0e276 IM |
294 | XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET |
295 | CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET | |
296 | CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET | |
297 | CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET | |
298 | CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET | |
299 | CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET | |
300 | CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET | |
301 | CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET | |
302 | CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET | |
303 | CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET | |
dcd072e2 AH |
304 | .endm |
305 | ||
306 | /* | |
307 | * frame that enables passing a complete pt_regs to a C function. | |
308 | */ | |
309 | .macro DEFAULT_FRAME start=1 offset=0 | |
e8a0e276 | 310 | PARTIAL_FRAME \start, R11+\offset-R15 |
dcd072e2 AH |
311 | CFI_REL_OFFSET rbx, RBX+\offset |
312 | CFI_REL_OFFSET rbp, RBP+\offset | |
313 | CFI_REL_OFFSET r12, R12+\offset | |
314 | CFI_REL_OFFSET r13, R13+\offset | |
315 | CFI_REL_OFFSET r14, R14+\offset | |
316 | CFI_REL_OFFSET r15, R15+\offset | |
317 | .endm | |
d99015b1 AH |
318 | |
319 | /* save partial stack frame */ | |
320 | ENTRY(save_args) | |
321 | XCPT_FRAME | |
322 | cld | |
14ae22ba IM |
323 | movq_cfi rdi, RDI+16-ARGOFFSET |
324 | movq_cfi rsi, RSI+16-ARGOFFSET | |
325 | movq_cfi rdx, RDX+16-ARGOFFSET | |
326 | movq_cfi rcx, RCX+16-ARGOFFSET | |
327 | movq_cfi rax, RAX+16-ARGOFFSET | |
328 | movq_cfi r8, R8+16-ARGOFFSET | |
329 | movq_cfi r9, R9+16-ARGOFFSET | |
330 | movq_cfi r10, R10+16-ARGOFFSET | |
331 | movq_cfi r11, R11+16-ARGOFFSET | |
332 | ||
d99015b1 | 333 | leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */ |
14ae22ba | 334 | movq_cfi rbp, 8 /* push %rbp */ |
d99015b1 AH |
335 | leaq 8(%rsp), %rbp /* mov %rsp, %ebp */ |
336 | testl $3, CS(%rdi) | |
337 | je 1f | |
338 | SWAPGS | |
339 | /* | |
340 | * irqcount is used to check if a CPU is already on an interrupt stack | |
341 | * or not. While this is essentially redundant with preempt_count it is | |
342 | * a little cheaper to use a separate counter in the PDA (short of | |
343 | * moving irq_enter into assembly, which would be too much work) | |
344 | */ | |
345 | 1: incl %gs:pda_irqcount | |
346 | jne 2f | |
14ae22ba | 347 | popq_cfi %rax /* move return address... */ |
26f80bd6 | 348 | mov PER_CPU_VAR(irq_stack_ptr),%rsp |
dcd072e2 | 349 | EMPTY_FRAME 0 |
14ae22ba | 350 | pushq_cfi %rax /* ... to the new stack */ |
d99015b1 AH |
351 | /* |
352 | * We entered an interrupt context - irqs are off: | |
353 | */ | |
354 | 2: TRACE_IRQS_OFF | |
355 | ret | |
356 | CFI_ENDPROC | |
357 | END(save_args) | |
358 | ||
c002a1e6 AH |
359 | ENTRY(save_rest) |
360 | PARTIAL_FRAME 1 REST_SKIP+8 | |
361 | movq 5*8+16(%rsp), %r11 /* save return address */ | |
362 | movq_cfi rbx, RBX+16 | |
363 | movq_cfi rbp, RBP+16 | |
364 | movq_cfi r12, R12+16 | |
365 | movq_cfi r13, R13+16 | |
366 | movq_cfi r14, R14+16 | |
367 | movq_cfi r15, R15+16 | |
368 | movq %r11, 8(%rsp) /* return address */ | |
369 | FIXUP_TOP_OF_STACK %r11, 16 | |
370 | ret | |
371 | CFI_ENDPROC | |
372 | END(save_rest) | |
373 | ||
e2f6bc25 AH |
374 | /* save complete stack frame */ |
375 | ENTRY(save_paranoid) | |
376 | XCPT_FRAME 1 RDI+8 | |
377 | cld | |
378 | movq_cfi rdi, RDI+8 | |
379 | movq_cfi rsi, RSI+8 | |
380 | movq_cfi rdx, RDX+8 | |
381 | movq_cfi rcx, RCX+8 | |
382 | movq_cfi rax, RAX+8 | |
383 | movq_cfi r8, R8+8 | |
384 | movq_cfi r9, R9+8 | |
385 | movq_cfi r10, R10+8 | |
386 | movq_cfi r11, R11+8 | |
387 | movq_cfi rbx, RBX+8 | |
388 | movq_cfi rbp, RBP+8 | |
389 | movq_cfi r12, R12+8 | |
390 | movq_cfi r13, R13+8 | |
391 | movq_cfi r14, R14+8 | |
392 | movq_cfi r15, R15+8 | |
393 | movl $1,%ebx | |
394 | movl $MSR_GS_BASE,%ecx | |
395 | rdmsr | |
396 | testl %edx,%edx | |
397 | js 1f /* negative -> in kernel */ | |
398 | SWAPGS | |
399 | xorl %ebx,%ebx | |
400 | 1: ret | |
401 | CFI_ENDPROC | |
402 | END(save_paranoid) | |
403 | ||
1da177e4 | 404 | /* |
5b3eec0c IM |
405 | * A newly forked process directly context switches into this address. |
406 | * | |
407 | * rdi: prev task we switched from | |
0bd7b798 | 408 | */ |
1da177e4 | 409 | ENTRY(ret_from_fork) |
dcd072e2 | 410 | DEFAULT_FRAME |
5b3eec0c | 411 | |
658fdbef | 412 | push kernel_eflags(%rip) |
e0a5a5d9 | 413 | CFI_ADJUST_CFA_OFFSET 8 |
5b3eec0c | 414 | popf # reset kernel eflags |
e0a5a5d9 | 415 | CFI_ADJUST_CFA_OFFSET -8 |
5b3eec0c IM |
416 | |
417 | call schedule_tail # rdi: 'prev' task parameter | |
418 | ||
1da177e4 | 419 | GET_THREAD_INFO(%rcx) |
5b3eec0c | 420 | |
1cbd8b3f | 421 | CFI_REMEMBER_STATE |
1da177e4 | 422 | RESTORE_REST |
5b3eec0c IM |
423 | |
424 | testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? | |
1da177e4 | 425 | je int_ret_from_sys_call |
5b3eec0c IM |
426 | |
427 | testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET | |
1da177e4 | 428 | jnz int_ret_from_sys_call |
5b3eec0c | 429 | |
c002a1e6 | 430 | RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET |
5b3eec0c IM |
431 | jmp ret_from_sys_call # go to the SYSRET fastpath |
432 | ||
1cbd8b3f | 433 | CFI_RESTORE_STATE |
1da177e4 | 434 | CFI_ENDPROC |
4b787e0b | 435 | END(ret_from_fork) |
1da177e4 LT |
436 | |
437 | /* | |
438 | * System call entry. Upto 6 arguments in registers are supported. | |
439 | * | |
440 | * SYSCALL does not save anything on the stack and does not change the | |
441 | * stack pointer. | |
442 | */ | |
0bd7b798 | 443 | |
1da177e4 | 444 | /* |
0bd7b798 | 445 | * Register setup: |
1da177e4 LT |
446 | * rax system call number |
447 | * rdi arg0 | |
0bd7b798 | 448 | * rcx return address for syscall/sysret, C arg3 |
1da177e4 | 449 | * rsi arg1 |
0bd7b798 | 450 | * rdx arg2 |
1da177e4 LT |
451 | * r10 arg3 (--> moved to rcx for C) |
452 | * r8 arg4 | |
453 | * r9 arg5 | |
454 | * r11 eflags for syscall/sysret, temporary for C | |
0bd7b798 AH |
455 | * r12-r15,rbp,rbx saved by C code, not touched. |
456 | * | |
1da177e4 LT |
457 | * Interrupts are off on entry. |
458 | * Only called from user space. | |
459 | * | |
460 | * XXX if we had a free scratch register we could save the RSP into the stack frame | |
461 | * and report it properly in ps. Unfortunately we haven't. | |
7bf36bbc AK |
462 | * |
463 | * When user can change the frames always force IRET. That is because | |
464 | * it deals with uncanonical addresses better. SYSRET has trouble | |
465 | * with them due to bugs in both AMD and Intel CPUs. | |
0bd7b798 | 466 | */ |
1da177e4 LT |
467 | |
468 | ENTRY(system_call) | |
7effaa88 | 469 | CFI_STARTPROC simple |
adf14236 | 470 | CFI_SIGNAL_FRAME |
dffead4e | 471 | CFI_DEF_CFA rsp,PDA_STACKOFFSET |
7effaa88 JB |
472 | CFI_REGISTER rip,rcx |
473 | /*CFI_REGISTER rflags,r11*/ | |
72fe4858 GOC |
474 | SWAPGS_UNSAFE_STACK |
475 | /* | |
476 | * A hypervisor implementation might want to use a label | |
477 | * after the swapgs, so that it can do the swapgs | |
478 | * for the guest and jump here on syscall. | |
479 | */ | |
480 | ENTRY(system_call_after_swapgs) | |
481 | ||
0bd7b798 | 482 | movq %rsp,%gs:pda_oldrsp |
1da177e4 | 483 | movq %gs:pda_kernelstack,%rsp |
2601e64d IM |
484 | /* |
485 | * No need to follow this irqs off/on section - it's straight | |
486 | * and short: | |
487 | */ | |
72fe4858 | 488 | ENABLE_INTERRUPTS(CLBR_NONE) |
1da177e4 | 489 | SAVE_ARGS 8,1 |
0bd7b798 | 490 | movq %rax,ORIG_RAX-ARGOFFSET(%rsp) |
7effaa88 JB |
491 | movq %rcx,RIP-ARGOFFSET(%rsp) |
492 | CFI_REL_OFFSET rip,RIP-ARGOFFSET | |
1da177e4 | 493 | GET_THREAD_INFO(%rcx) |
d4d67150 | 494 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx) |
1da177e4 | 495 | jnz tracesys |
86a1c34a | 496 | system_call_fastpath: |
1da177e4 LT |
497 | cmpq $__NR_syscall_max,%rax |
498 | ja badsys | |
499 | movq %r10,%rcx | |
500 | call *sys_call_table(,%rax,8) # XXX: rip relative | |
501 | movq %rax,RAX-ARGOFFSET(%rsp) | |
502 | /* | |
503 | * Syscall return path ending with SYSRET (fast path) | |
0bd7b798 AH |
504 | * Has incomplete stack frame and undefined top of stack. |
505 | */ | |
1da177e4 | 506 | ret_from_sys_call: |
11b854b2 | 507 | movl $_TIF_ALLWORK_MASK,%edi |
1da177e4 | 508 | /* edi: flagmask */ |
0bd7b798 | 509 | sysret_check: |
10cd706d | 510 | LOCKDEP_SYS_EXIT |
1da177e4 | 511 | GET_THREAD_INFO(%rcx) |
72fe4858 | 512 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 513 | TRACE_IRQS_OFF |
26ccb8a7 | 514 | movl TI_flags(%rcx),%edx |
1da177e4 | 515 | andl %edi,%edx |
0bd7b798 | 516 | jnz sysret_careful |
bcddc015 | 517 | CFI_REMEMBER_STATE |
2601e64d IM |
518 | /* |
519 | * sysretq will re-enable interrupts: | |
520 | */ | |
521 | TRACE_IRQS_ON | |
1da177e4 | 522 | movq RIP-ARGOFFSET(%rsp),%rcx |
7effaa88 | 523 | CFI_REGISTER rip,rcx |
1da177e4 | 524 | RESTORE_ARGS 0,-ARG_SKIP,1 |
7effaa88 | 525 | /*CFI_REGISTER rflags,r11*/ |
c7245da6 | 526 | movq %gs:pda_oldrsp, %rsp |
2be29982 | 527 | USERGS_SYSRET64 |
1da177e4 | 528 | |
bcddc015 | 529 | CFI_RESTORE_STATE |
1da177e4 | 530 | /* Handle reschedules */ |
0bd7b798 | 531 | /* edx: work, edi: workmask */ |
1da177e4 LT |
532 | sysret_careful: |
533 | bt $TIF_NEED_RESCHED,%edx | |
534 | jnc sysret_signal | |
2601e64d | 535 | TRACE_IRQS_ON |
72fe4858 | 536 | ENABLE_INTERRUPTS(CLBR_NONE) |
1da177e4 | 537 | pushq %rdi |
7effaa88 | 538 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 LT |
539 | call schedule |
540 | popq %rdi | |
7effaa88 | 541 | CFI_ADJUST_CFA_OFFSET -8 |
1da177e4 LT |
542 | jmp sysret_check |
543 | ||
0bd7b798 | 544 | /* Handle a signal */ |
1da177e4 | 545 | sysret_signal: |
2601e64d | 546 | TRACE_IRQS_ON |
72fe4858 | 547 | ENABLE_INTERRUPTS(CLBR_NONE) |
86a1c34a RM |
548 | #ifdef CONFIG_AUDITSYSCALL |
549 | bt $TIF_SYSCALL_AUDIT,%edx | |
550 | jc sysret_audit | |
551 | #endif | |
10ffdbb8 | 552 | /* edx: work flags (arg3) */ |
1da177e4 LT |
553 | leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1 |
554 | xorl %esi,%esi # oldset -> arg2 | |
c8108411 AH |
555 | SAVE_REST |
556 | FIXUP_TOP_OF_STACK %r11 | |
557 | call do_notify_resume | |
558 | RESTORE_TOP_OF_STACK %r11 | |
559 | RESTORE_REST | |
15e8f348 | 560 | movl $_TIF_WORK_MASK,%edi |
7bf36bbc AK |
561 | /* Use IRET because user could have changed frame. This |
562 | works because ptregscall_common has called FIXUP_TOP_OF_STACK. */ | |
72fe4858 | 563 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 564 | TRACE_IRQS_OFF |
7bf36bbc | 565 | jmp int_with_check |
0bd7b798 | 566 | |
7effaa88 JB |
567 | badsys: |
568 | movq $-ENOSYS,RAX-ARGOFFSET(%rsp) | |
569 | jmp ret_from_sys_call | |
570 | ||
86a1c34a RM |
571 | #ifdef CONFIG_AUDITSYSCALL |
572 | /* | |
573 | * Fast path for syscall audit without full syscall trace. | |
574 | * We just call audit_syscall_entry() directly, and then | |
575 | * jump back to the normal fast path. | |
576 | */ | |
577 | auditsys: | |
578 | movq %r10,%r9 /* 6th arg: 4th syscall arg */ | |
579 | movq %rdx,%r8 /* 5th arg: 3rd syscall arg */ | |
580 | movq %rsi,%rcx /* 4th arg: 2nd syscall arg */ | |
581 | movq %rdi,%rdx /* 3rd arg: 1st syscall arg */ | |
582 | movq %rax,%rsi /* 2nd arg: syscall number */ | |
583 | movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */ | |
584 | call audit_syscall_entry | |
585 | LOAD_ARGS 0 /* reload call-clobbered registers */ | |
586 | jmp system_call_fastpath | |
587 | ||
588 | /* | |
589 | * Return fast path for syscall audit. Call audit_syscall_exit() | |
590 | * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT | |
591 | * masked off. | |
592 | */ | |
593 | sysret_audit: | |
594 | movq %rax,%rsi /* second arg, syscall return value */ | |
595 | cmpq $0,%rax /* is it < 0? */ | |
596 | setl %al /* 1 if so, 0 if not */ | |
597 | movzbl %al,%edi /* zero-extend that into %edi */ | |
598 | inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */ | |
599 | call audit_syscall_exit | |
600 | movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi | |
601 | jmp sysret_check | |
602 | #endif /* CONFIG_AUDITSYSCALL */ | |
603 | ||
1da177e4 | 604 | /* Do syscall tracing */ |
0bd7b798 | 605 | tracesys: |
86a1c34a RM |
606 | #ifdef CONFIG_AUDITSYSCALL |
607 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx) | |
608 | jz auditsys | |
609 | #endif | |
1da177e4 | 610 | SAVE_REST |
a31f8dd7 | 611 | movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ |
1da177e4 LT |
612 | FIXUP_TOP_OF_STACK %rdi |
613 | movq %rsp,%rdi | |
614 | call syscall_trace_enter | |
d4d67150 RM |
615 | /* |
616 | * Reload arg registers from stack in case ptrace changed them. | |
617 | * We don't reload %rax because syscall_trace_enter() returned | |
618 | * the value it wants us to use in the table lookup. | |
619 | */ | |
620 | LOAD_ARGS ARGOFFSET, 1 | |
1da177e4 LT |
621 | RESTORE_REST |
622 | cmpq $__NR_syscall_max,%rax | |
a31f8dd7 | 623 | ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */ |
1da177e4 LT |
624 | movq %r10,%rcx /* fixup for C */ |
625 | call *sys_call_table(,%rax,8) | |
a31f8dd7 | 626 | movq %rax,RAX-ARGOFFSET(%rsp) |
7bf36bbc | 627 | /* Use IRET because user could have changed frame */ |
0bd7b798 AH |
628 | |
629 | /* | |
1da177e4 LT |
630 | * Syscall return path ending with IRET. |
631 | * Has correct top of stack, but partial stack frame. | |
bcddc015 JB |
632 | */ |
633 | .globl int_ret_from_sys_call | |
5cbf1565 | 634 | .globl int_with_check |
bcddc015 | 635 | int_ret_from_sys_call: |
72fe4858 | 636 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 637 | TRACE_IRQS_OFF |
1da177e4 LT |
638 | testl $3,CS-ARGOFFSET(%rsp) |
639 | je retint_restore_args | |
640 | movl $_TIF_ALLWORK_MASK,%edi | |
641 | /* edi: mask to check */ | |
642 | int_with_check: | |
10cd706d | 643 | LOCKDEP_SYS_EXIT_IRQ |
1da177e4 | 644 | GET_THREAD_INFO(%rcx) |
26ccb8a7 | 645 | movl TI_flags(%rcx),%edx |
1da177e4 LT |
646 | andl %edi,%edx |
647 | jnz int_careful | |
26ccb8a7 | 648 | andl $~TS_COMPAT,TI_status(%rcx) |
1da177e4 LT |
649 | jmp retint_swapgs |
650 | ||
651 | /* Either reschedule or signal or syscall exit tracking needed. */ | |
652 | /* First do a reschedule test. */ | |
653 | /* edx: work, edi: workmask */ | |
654 | int_careful: | |
655 | bt $TIF_NEED_RESCHED,%edx | |
656 | jnc int_very_careful | |
2601e64d | 657 | TRACE_IRQS_ON |
72fe4858 | 658 | ENABLE_INTERRUPTS(CLBR_NONE) |
1da177e4 | 659 | pushq %rdi |
7effaa88 | 660 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 LT |
661 | call schedule |
662 | popq %rdi | |
7effaa88 | 663 | CFI_ADJUST_CFA_OFFSET -8 |
72fe4858 | 664 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 665 | TRACE_IRQS_OFF |
1da177e4 LT |
666 | jmp int_with_check |
667 | ||
668 | /* handle signals and tracing -- both require a full stack frame */ | |
669 | int_very_careful: | |
2601e64d | 670 | TRACE_IRQS_ON |
72fe4858 | 671 | ENABLE_INTERRUPTS(CLBR_NONE) |
1da177e4 | 672 | SAVE_REST |
0bd7b798 | 673 | /* Check for syscall exit trace */ |
d4d67150 | 674 | testl $_TIF_WORK_SYSCALL_EXIT,%edx |
1da177e4 LT |
675 | jz int_signal |
676 | pushq %rdi | |
7effaa88 | 677 | CFI_ADJUST_CFA_OFFSET 8 |
0bd7b798 | 678 | leaq 8(%rsp),%rdi # &ptregs -> arg1 |
1da177e4 LT |
679 | call syscall_trace_leave |
680 | popq %rdi | |
7effaa88 | 681 | CFI_ADJUST_CFA_OFFSET -8 |
d4d67150 | 682 | andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi |
1da177e4 | 683 | jmp int_restore_rest |
0bd7b798 | 684 | |
1da177e4 | 685 | int_signal: |
8f4d37ec | 686 | testl $_TIF_DO_NOTIFY_MASK,%edx |
1da177e4 LT |
687 | jz 1f |
688 | movq %rsp,%rdi # &ptregs -> arg1 | |
689 | xorl %esi,%esi # oldset -> arg2 | |
690 | call do_notify_resume | |
eca91e78 | 691 | 1: movl $_TIF_WORK_MASK,%edi |
1da177e4 LT |
692 | int_restore_rest: |
693 | RESTORE_REST | |
72fe4858 | 694 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 695 | TRACE_IRQS_OFF |
1da177e4 LT |
696 | jmp int_with_check |
697 | CFI_ENDPROC | |
bcddc015 | 698 | END(system_call) |
0bd7b798 AH |
699 | |
700 | /* | |
1da177e4 | 701 | * Certain special system calls that need to save a complete full stack frame. |
0bd7b798 | 702 | */ |
1da177e4 | 703 | .macro PTREGSCALL label,func,arg |
c002a1e6 AH |
704 | ENTRY(\label) |
705 | PARTIAL_FRAME 1 8 /* offset 8: return address */ | |
706 | subq $REST_SKIP, %rsp | |
707 | CFI_ADJUST_CFA_OFFSET REST_SKIP | |
708 | call save_rest | |
709 | DEFAULT_FRAME 0 8 /* offset 8: return address */ | |
710 | leaq 8(%rsp), \arg /* pt_regs pointer */ | |
711 | call \func | |
712 | jmp ptregscall_common | |
713 | CFI_ENDPROC | |
4b787e0b | 714 | END(\label) |
1da177e4 LT |
715 | .endm |
716 | ||
717 | PTREGSCALL stub_clone, sys_clone, %r8 | |
718 | PTREGSCALL stub_fork, sys_fork, %rdi | |
719 | PTREGSCALL stub_vfork, sys_vfork, %rdi | |
1da177e4 LT |
720 | PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx |
721 | PTREGSCALL stub_iopl, sys_iopl, %rsi | |
722 | ||
723 | ENTRY(ptregscall_common) | |
c002a1e6 AH |
724 | DEFAULT_FRAME 1 8 /* offset 8: return address */ |
725 | RESTORE_TOP_OF_STACK %r11, 8 | |
726 | movq_cfi_restore R15+8, r15 | |
727 | movq_cfi_restore R14+8, r14 | |
728 | movq_cfi_restore R13+8, r13 | |
729 | movq_cfi_restore R12+8, r12 | |
730 | movq_cfi_restore RBP+8, rbp | |
731 | movq_cfi_restore RBX+8, rbx | |
732 | ret $REST_SKIP /* pop extended registers */ | |
1da177e4 | 733 | CFI_ENDPROC |
4b787e0b | 734 | END(ptregscall_common) |
0bd7b798 | 735 | |
1da177e4 LT |
736 | ENTRY(stub_execve) |
737 | CFI_STARTPROC | |
738 | popq %r11 | |
7effaa88 JB |
739 | CFI_ADJUST_CFA_OFFSET -8 |
740 | CFI_REGISTER rip, r11 | |
1da177e4 | 741 | SAVE_REST |
1da177e4 | 742 | FIXUP_TOP_OF_STACK %r11 |
5d119b2c | 743 | movq %rsp, %rcx |
1da177e4 | 744 | call sys_execve |
1da177e4 | 745 | RESTORE_TOP_OF_STACK %r11 |
1da177e4 LT |
746 | movq %rax,RAX(%rsp) |
747 | RESTORE_REST | |
748 | jmp int_ret_from_sys_call | |
749 | CFI_ENDPROC | |
4b787e0b | 750 | END(stub_execve) |
0bd7b798 | 751 | |
1da177e4 LT |
752 | /* |
753 | * sigreturn is special because it needs to restore all registers on return. | |
754 | * This cannot be done with SYSRET, so use the IRET return path instead. | |
0bd7b798 | 755 | */ |
1da177e4 LT |
756 | ENTRY(stub_rt_sigreturn) |
757 | CFI_STARTPROC | |
7effaa88 JB |
758 | addq $8, %rsp |
759 | CFI_ADJUST_CFA_OFFSET -8 | |
1da177e4 LT |
760 | SAVE_REST |
761 | movq %rsp,%rdi | |
762 | FIXUP_TOP_OF_STACK %r11 | |
763 | call sys_rt_sigreturn | |
764 | movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer | |
765 | RESTORE_REST | |
766 | jmp int_ret_from_sys_call | |
767 | CFI_ENDPROC | |
4b787e0b | 768 | END(stub_rt_sigreturn) |
1da177e4 | 769 | |
939b7871 PA |
770 | /* |
771 | * Build the entry stubs and pointer table with some assembler magic. | |
772 | * We pack 7 stubs into a single 32-byte chunk, which will fit in a | |
773 | * single cache line on all modern x86 implementations. | |
774 | */ | |
775 | .section .init.rodata,"a" | |
776 | ENTRY(interrupt) | |
777 | .text | |
778 | .p2align 5 | |
779 | .p2align CONFIG_X86_L1_CACHE_SHIFT | |
780 | ENTRY(irq_entries_start) | |
781 | INTR_FRAME | |
782 | vector=FIRST_EXTERNAL_VECTOR | |
783 | .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7 | |
784 | .balign 32 | |
785 | .rept 7 | |
786 | .if vector < NR_VECTORS | |
8665596e | 787 | .if vector <> FIRST_EXTERNAL_VECTOR |
939b7871 PA |
788 | CFI_ADJUST_CFA_OFFSET -8 |
789 | .endif | |
790 | 1: pushq $(~vector+0x80) /* Note: always in signed byte range */ | |
791 | CFI_ADJUST_CFA_OFFSET 8 | |
8665596e | 792 | .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 |
939b7871 PA |
793 | jmp 2f |
794 | .endif | |
795 | .previous | |
796 | .quad 1b | |
797 | .text | |
798 | vector=vector+1 | |
799 | .endif | |
800 | .endr | |
801 | 2: jmp common_interrupt | |
802 | .endr | |
803 | CFI_ENDPROC | |
804 | END(irq_entries_start) | |
805 | ||
806 | .previous | |
807 | END(interrupt) | |
808 | .previous | |
809 | ||
d99015b1 | 810 | /* |
1da177e4 LT |
811 | * Interrupt entry/exit. |
812 | * | |
813 | * Interrupt entry points save only callee clobbered registers in fast path. | |
d99015b1 AH |
814 | * |
815 | * Entry runs with interrupts off. | |
816 | */ | |
1da177e4 | 817 | |
722024db | 818 | /* 0(%rsp): ~(interrupt number) */ |
1da177e4 | 819 | .macro interrupt func |
d99015b1 AH |
820 | subq $10*8, %rsp |
821 | CFI_ADJUST_CFA_OFFSET 10*8 | |
822 | call save_args | |
dcd072e2 | 823 | PARTIAL_FRAME 0 |
1da177e4 LT |
824 | call \func |
825 | .endm | |
826 | ||
722024db AH |
827 | /* |
828 | * The interrupt stubs push (~vector+0x80) onto the stack and | |
829 | * then jump to common_interrupt. | |
830 | */ | |
939b7871 PA |
831 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
832 | common_interrupt: | |
7effaa88 | 833 | XCPT_FRAME |
722024db | 834 | addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ |
1da177e4 LT |
835 | interrupt do_IRQ |
836 | /* 0(%rsp): oldrsp-ARGOFFSET */ | |
7effaa88 | 837 | ret_from_intr: |
72fe4858 | 838 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 839 | TRACE_IRQS_OFF |
3829ee6b | 840 | decl %gs:pda_irqcount |
1de9c3f6 | 841 | leaveq |
7effaa88 | 842 | CFI_DEF_CFA_REGISTER rsp |
1de9c3f6 | 843 | CFI_ADJUST_CFA_OFFSET -8 |
7effaa88 | 844 | exit_intr: |
1da177e4 LT |
845 | GET_THREAD_INFO(%rcx) |
846 | testl $3,CS-ARGOFFSET(%rsp) | |
847 | je retint_kernel | |
0bd7b798 | 848 | |
1da177e4 LT |
849 | /* Interrupt came from user space */ |
850 | /* | |
851 | * Has a correct top of stack, but a partial stack frame | |
852 | * %rcx: thread info. Interrupts off. | |
0bd7b798 | 853 | */ |
1da177e4 LT |
854 | retint_with_reschedule: |
855 | movl $_TIF_WORK_MASK,%edi | |
7effaa88 | 856 | retint_check: |
10cd706d | 857 | LOCKDEP_SYS_EXIT_IRQ |
26ccb8a7 | 858 | movl TI_flags(%rcx),%edx |
1da177e4 | 859 | andl %edi,%edx |
7effaa88 | 860 | CFI_REMEMBER_STATE |
1da177e4 | 861 | jnz retint_careful |
10cd706d PZ |
862 | |
863 | retint_swapgs: /* return to user-space */ | |
2601e64d IM |
864 | /* |
865 | * The iretq could re-enable interrupts: | |
866 | */ | |
72fe4858 | 867 | DISABLE_INTERRUPTS(CLBR_ANY) |
2601e64d | 868 | TRACE_IRQS_IRETQ |
72fe4858 | 869 | SWAPGS |
2601e64d IM |
870 | jmp restore_args |
871 | ||
10cd706d | 872 | retint_restore_args: /* return to kernel space */ |
72fe4858 | 873 | DISABLE_INTERRUPTS(CLBR_ANY) |
2601e64d IM |
874 | /* |
875 | * The iretq could re-enable interrupts: | |
876 | */ | |
877 | TRACE_IRQS_IRETQ | |
878 | restore_args: | |
3701d863 IM |
879 | RESTORE_ARGS 0,8,0 |
880 | ||
f7f3d791 | 881 | irq_return: |
72fe4858 | 882 | INTERRUPT_RETURN |
3701d863 IM |
883 | |
884 | .section __ex_table, "a" | |
885 | .quad irq_return, bad_iret | |
886 | .previous | |
887 | ||
888 | #ifdef CONFIG_PARAVIRT | |
72fe4858 | 889 | ENTRY(native_iret) |
1da177e4 LT |
890 | iretq |
891 | ||
892 | .section __ex_table,"a" | |
72fe4858 | 893 | .quad native_iret, bad_iret |
1da177e4 | 894 | .previous |
3701d863 IM |
895 | #endif |
896 | ||
1da177e4 | 897 | .section .fixup,"ax" |
1da177e4 | 898 | bad_iret: |
3aa4b37d RM |
899 | /* |
900 | * The iret traps when the %cs or %ss being restored is bogus. | |
901 | * We've lost the original trap vector and error code. | |
902 | * #GPF is the most likely one to get for an invalid selector. | |
903 | * So pretend we completed the iret and took the #GPF in user mode. | |
904 | * | |
905 | * We are now running with the kernel GS after exception recovery. | |
906 | * But error_entry expects us to have user GS to match the user %cs, | |
907 | * so swap back. | |
908 | */ | |
909 | pushq $0 | |
910 | ||
911 | SWAPGS | |
912 | jmp general_protection | |
913 | ||
72fe4858 GOC |
914 | .previous |
915 | ||
7effaa88 | 916 | /* edi: workmask, edx: work */ |
1da177e4 | 917 | retint_careful: |
7effaa88 | 918 | CFI_RESTORE_STATE |
1da177e4 LT |
919 | bt $TIF_NEED_RESCHED,%edx |
920 | jnc retint_signal | |
2601e64d | 921 | TRACE_IRQS_ON |
72fe4858 | 922 | ENABLE_INTERRUPTS(CLBR_NONE) |
1da177e4 | 923 | pushq %rdi |
7effaa88 | 924 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 | 925 | call schedule |
0bd7b798 | 926 | popq %rdi |
7effaa88 | 927 | CFI_ADJUST_CFA_OFFSET -8 |
1da177e4 | 928 | GET_THREAD_INFO(%rcx) |
72fe4858 | 929 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 930 | TRACE_IRQS_OFF |
1da177e4 | 931 | jmp retint_check |
0bd7b798 | 932 | |
1da177e4 | 933 | retint_signal: |
8f4d37ec | 934 | testl $_TIF_DO_NOTIFY_MASK,%edx |
10ffdbb8 | 935 | jz retint_swapgs |
2601e64d | 936 | TRACE_IRQS_ON |
72fe4858 | 937 | ENABLE_INTERRUPTS(CLBR_NONE) |
1da177e4 | 938 | SAVE_REST |
0bd7b798 | 939 | movq $-1,ORIG_RAX(%rsp) |
3829ee6b | 940 | xorl %esi,%esi # oldset |
1da177e4 LT |
941 | movq %rsp,%rdi # &pt_regs |
942 | call do_notify_resume | |
943 | RESTORE_REST | |
72fe4858 | 944 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 945 | TRACE_IRQS_OFF |
be9e6870 | 946 | GET_THREAD_INFO(%rcx) |
eca91e78 | 947 | jmp retint_with_reschedule |
1da177e4 LT |
948 | |
949 | #ifdef CONFIG_PREEMPT | |
950 | /* Returning to kernel space. Check if we need preemption */ | |
951 | /* rcx: threadinfo. interrupts off. */ | |
b06babac | 952 | ENTRY(retint_kernel) |
26ccb8a7 | 953 | cmpl $0,TI_preempt_count(%rcx) |
1da177e4 | 954 | jnz retint_restore_args |
26ccb8a7 | 955 | bt $TIF_NEED_RESCHED,TI_flags(%rcx) |
1da177e4 LT |
956 | jnc retint_restore_args |
957 | bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ | |
958 | jnc retint_restore_args | |
959 | call preempt_schedule_irq | |
960 | jmp exit_intr | |
0bd7b798 | 961 | #endif |
4b787e0b | 962 | |
1da177e4 | 963 | CFI_ENDPROC |
4b787e0b | 964 | END(common_interrupt) |
0bd7b798 | 965 | |
1da177e4 LT |
966 | /* |
967 | * APIC interrupts. | |
0bd7b798 | 968 | */ |
322648d1 AH |
969 | .macro apicinterrupt num sym do_sym |
970 | ENTRY(\sym) | |
7effaa88 | 971 | INTR_FRAME |
19eadf98 | 972 | pushq $~(\num) |
7effaa88 | 973 | CFI_ADJUST_CFA_OFFSET 8 |
322648d1 | 974 | interrupt \do_sym |
1da177e4 LT |
975 | jmp ret_from_intr |
976 | CFI_ENDPROC | |
322648d1 AH |
977 | END(\sym) |
978 | .endm | |
1da177e4 | 979 | |
322648d1 AH |
980 | #ifdef CONFIG_SMP |
981 | apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \ | |
982 | irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt | |
983 | #endif | |
1da177e4 | 984 | |
5ae3a139 | 985 | apicinterrupt UV_BAU_MESSAGE \ |
322648d1 AH |
986 | uv_bau_message_intr1 uv_bau_message_interrupt |
987 | apicinterrupt LOCAL_TIMER_VECTOR \ | |
988 | apic_timer_interrupt smp_apic_timer_interrupt | |
89b831ef | 989 | |
0bd7b798 | 990 | #ifdef CONFIG_SMP |
322648d1 AH |
991 | apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \ |
992 | invalidate_interrupt0 smp_invalidate_interrupt | |
993 | apicinterrupt INVALIDATE_TLB_VECTOR_START+1 \ | |
994 | invalidate_interrupt1 smp_invalidate_interrupt | |
995 | apicinterrupt INVALIDATE_TLB_VECTOR_START+2 \ | |
996 | invalidate_interrupt2 smp_invalidate_interrupt | |
997 | apicinterrupt INVALIDATE_TLB_VECTOR_START+3 \ | |
998 | invalidate_interrupt3 smp_invalidate_interrupt | |
999 | apicinterrupt INVALIDATE_TLB_VECTOR_START+4 \ | |
1000 | invalidate_interrupt4 smp_invalidate_interrupt | |
1001 | apicinterrupt INVALIDATE_TLB_VECTOR_START+5 \ | |
1002 | invalidate_interrupt5 smp_invalidate_interrupt | |
1003 | apicinterrupt INVALIDATE_TLB_VECTOR_START+6 \ | |
1004 | invalidate_interrupt6 smp_invalidate_interrupt | |
1005 | apicinterrupt INVALIDATE_TLB_VECTOR_START+7 \ | |
1006 | invalidate_interrupt7 smp_invalidate_interrupt | |
1da177e4 LT |
1007 | #endif |
1008 | ||
322648d1 AH |
1009 | apicinterrupt THRESHOLD_APIC_VECTOR \ |
1010 | threshold_interrupt mce_threshold_interrupt | |
1011 | apicinterrupt THERMAL_APIC_VECTOR \ | |
1012 | thermal_interrupt smp_thermal_interrupt | |
1812924b | 1013 | |
322648d1 AH |
1014 | #ifdef CONFIG_SMP |
1015 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \ | |
1016 | call_function_single_interrupt smp_call_function_single_interrupt | |
1017 | apicinterrupt CALL_FUNCTION_VECTOR \ | |
1018 | call_function_interrupt smp_call_function_interrupt | |
1019 | apicinterrupt RESCHEDULE_VECTOR \ | |
1020 | reschedule_interrupt smp_reschedule_interrupt | |
1021 | #endif | |
1da177e4 | 1022 | |
322648d1 AH |
1023 | apicinterrupt ERROR_APIC_VECTOR \ |
1024 | error_interrupt smp_error_interrupt | |
1025 | apicinterrupt SPURIOUS_APIC_VECTOR \ | |
1026 | spurious_interrupt smp_spurious_interrupt | |
0bd7b798 | 1027 | |
1da177e4 LT |
1028 | /* |
1029 | * Exception entry points. | |
0bd7b798 | 1030 | */ |
322648d1 AH |
1031 | .macro zeroentry sym do_sym |
1032 | ENTRY(\sym) | |
7effaa88 | 1033 | INTR_FRAME |
fab58420 | 1034 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
14ae22ba | 1035 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
d99015b1 AH |
1036 | subq $15*8,%rsp |
1037 | CFI_ADJUST_CFA_OFFSET 15*8 | |
1038 | call error_entry | |
dcd072e2 | 1039 | DEFAULT_FRAME 0 |
d99015b1 AH |
1040 | movq %rsp,%rdi /* pt_regs pointer */ |
1041 | xorl %esi,%esi /* no error code */ | |
322648d1 | 1042 | call \do_sym |
d99015b1 | 1043 | jmp error_exit /* %ebx: no swapgs flag */ |
7effaa88 | 1044 | CFI_ENDPROC |
322648d1 AH |
1045 | END(\sym) |
1046 | .endm | |
1da177e4 | 1047 | |
322648d1 | 1048 | .macro paranoidzeroentry sym do_sym |
ddeb8f21 | 1049 | ENTRY(\sym) |
b8b1d08b AH |
1050 | INTR_FRAME |
1051 | PARAVIRT_ADJUST_EXCEPTION_FRAME | |
1052 | pushq $-1 /* ORIG_RAX: no syscall to restart */ | |
1053 | CFI_ADJUST_CFA_OFFSET 8 | |
1054 | subq $15*8, %rsp | |
1055 | call save_paranoid | |
1056 | TRACE_IRQS_OFF | |
1057 | movq %rsp,%rdi /* pt_regs pointer */ | |
1058 | xorl %esi,%esi /* no error code */ | |
322648d1 | 1059 | call \do_sym |
b8b1d08b AH |
1060 | jmp paranoid_exit /* %ebx: no swapgs flag */ |
1061 | CFI_ENDPROC | |
ddeb8f21 | 1062 | END(\sym) |
322648d1 | 1063 | .endm |
b8b1d08b | 1064 | |
322648d1 | 1065 | .macro paranoidzeroentry_ist sym do_sym ist |
ddeb8f21 | 1066 | ENTRY(\sym) |
9f1e87ea | 1067 | INTR_FRAME |
b8b1d08b AH |
1068 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1069 | pushq $-1 /* ORIG_RAX: no syscall to restart */ | |
1070 | CFI_ADJUST_CFA_OFFSET 8 | |
1071 | subq $15*8, %rsp | |
1072 | call save_paranoid | |
1073 | TRACE_IRQS_OFF | |
1074 | movq %rsp,%rdi /* pt_regs pointer */ | |
1075 | xorl %esi,%esi /* no error code */ | |
9939ddaf TH |
1076 | PER_CPU(init_tss, %rbp) |
1077 | subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp) | |
322648d1 | 1078 | call \do_sym |
9939ddaf | 1079 | addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp) |
b8b1d08b AH |
1080 | jmp paranoid_exit /* %ebx: no swapgs flag */ |
1081 | CFI_ENDPROC | |
ddeb8f21 | 1082 | END(\sym) |
322648d1 | 1083 | .endm |
b8b1d08b | 1084 | |
ddeb8f21 | 1085 | .macro errorentry sym do_sym |
322648d1 | 1086 | ENTRY(\sym) |
7effaa88 | 1087 | XCPT_FRAME |
fab58420 | 1088 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
d99015b1 AH |
1089 | subq $15*8,%rsp |
1090 | CFI_ADJUST_CFA_OFFSET 15*8 | |
1091 | call error_entry | |
dcd072e2 | 1092 | DEFAULT_FRAME 0 |
d99015b1 AH |
1093 | movq %rsp,%rdi /* pt_regs pointer */ |
1094 | movq ORIG_RAX(%rsp),%rsi /* get error code */ | |
1095 | movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ | |
322648d1 | 1096 | call \do_sym |
d99015b1 | 1097 | jmp error_exit /* %ebx: no swapgs flag */ |
7effaa88 | 1098 | CFI_ENDPROC |
322648d1 | 1099 | END(\sym) |
322648d1 | 1100 | .endm |
1da177e4 LT |
1101 | |
1102 | /* error code is on the stack already */ | |
ddeb8f21 | 1103 | .macro paranoiderrorentry sym do_sym |
322648d1 | 1104 | ENTRY(\sym) |
b8b1d08b AH |
1105 | XCPT_FRAME |
1106 | PARAVIRT_ADJUST_EXCEPTION_FRAME | |
1107 | subq $15*8,%rsp | |
e2f6bc25 AH |
1108 | CFI_ADJUST_CFA_OFFSET 15*8 |
1109 | call save_paranoid | |
1110 | DEFAULT_FRAME 0 | |
7e61a793 | 1111 | TRACE_IRQS_OFF |
b8b1d08b AH |
1112 | movq %rsp,%rdi /* pt_regs pointer */ |
1113 | movq ORIG_RAX(%rsp),%rsi /* get error code */ | |
1114 | movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ | |
322648d1 | 1115 | call \do_sym |
b8b1d08b AH |
1116 | jmp paranoid_exit /* %ebx: no swapgs flag */ |
1117 | CFI_ENDPROC | |
322648d1 | 1118 | END(\sym) |
322648d1 AH |
1119 | .endm |
1120 | ||
1121 | zeroentry divide_error do_divide_error | |
322648d1 AH |
1122 | zeroentry overflow do_overflow |
1123 | zeroentry bounds do_bounds | |
1124 | zeroentry invalid_op do_invalid_op | |
1125 | zeroentry device_not_available do_device_not_available | |
ddeb8f21 | 1126 | paranoiderrorentry double_fault do_double_fault |
322648d1 AH |
1127 | zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun |
1128 | errorentry invalid_TSS do_invalid_TSS | |
1129 | errorentry segment_not_present do_segment_not_present | |
322648d1 AH |
1130 | zeroentry spurious_interrupt_bug do_spurious_interrupt_bug |
1131 | zeroentry coprocessor_error do_coprocessor_error | |
1132 | errorentry alignment_check do_alignment_check | |
322648d1 | 1133 | zeroentry simd_coprocessor_error do_simd_coprocessor_error |
2601e64d | 1134 | |
9f1e87ea CG |
1135 | /* Reload gs selector with exception handling */ |
1136 | /* edi: new selector */ | |
9f9d489a | 1137 | ENTRY(native_load_gs_index) |
7effaa88 | 1138 | CFI_STARTPROC |
1da177e4 | 1139 | pushf |
7effaa88 | 1140 | CFI_ADJUST_CFA_OFFSET 8 |
72fe4858 | 1141 | DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) |
9f1e87ea | 1142 | SWAPGS |
0bd7b798 | 1143 | gs_change: |
9f1e87ea | 1144 | movl %edi,%gs |
1da177e4 | 1145 | 2: mfence /* workaround */ |
72fe4858 | 1146 | SWAPGS |
9f1e87ea | 1147 | popf |
7effaa88 | 1148 | CFI_ADJUST_CFA_OFFSET -8 |
9f1e87ea | 1149 | ret |
7effaa88 | 1150 | CFI_ENDPROC |
6efdcfaf | 1151 | END(native_load_gs_index) |
0bd7b798 | 1152 | |
9f1e87ea CG |
1153 | .section __ex_table,"a" |
1154 | .align 8 | |
1155 | .quad gs_change,bad_gs | |
1156 | .previous | |
1157 | .section .fixup,"ax" | |
1da177e4 | 1158 | /* running with kernelgs */ |
0bd7b798 | 1159 | bad_gs: |
72fe4858 | 1160 | SWAPGS /* switch back to user gs */ |
1da177e4 | 1161 | xorl %eax,%eax |
9f1e87ea CG |
1162 | movl %eax,%gs |
1163 | jmp 2b | |
1164 | .previous | |
0bd7b798 | 1165 | |
1da177e4 LT |
1166 | /* |
1167 | * Create a kernel thread. | |
1168 | * | |
1169 | * C extern interface: | |
1170 | * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |
1171 | * | |
1172 | * asm input arguments: | |
1173 | * rdi: fn, rsi: arg, rdx: flags | |
1174 | */ | |
1175 | ENTRY(kernel_thread) | |
1176 | CFI_STARTPROC | |
1177 | FAKE_STACK_FRAME $child_rip | |
1178 | SAVE_ALL | |
1179 | ||
1180 | # rdi: flags, rsi: usp, rdx: will be &pt_regs | |
1181 | movq %rdx,%rdi | |
1182 | orq kernel_thread_flags(%rip),%rdi | |
1183 | movq $-1, %rsi | |
1184 | movq %rsp, %rdx | |
1185 | ||
1186 | xorl %r8d,%r8d | |
1187 | xorl %r9d,%r9d | |
0bd7b798 | 1188 | |
1da177e4 LT |
1189 | # clone now |
1190 | call do_fork | |
1191 | movq %rax,RAX(%rsp) | |
1192 | xorl %edi,%edi | |
1193 | ||
1194 | /* | |
1195 | * It isn't worth to check for reschedule here, | |
1196 | * so internally to the x86_64 port you can rely on kernel_thread() | |
1197 | * not to reschedule the child before returning, this avoids the need | |
1198 | * of hacks for example to fork off the per-CPU idle tasks. | |
9f1e87ea | 1199 | * [Hopefully no generic code relies on the reschedule -AK] |
1da177e4 LT |
1200 | */ |
1201 | RESTORE_ALL | |
1202 | UNFAKE_STACK_FRAME | |
1203 | ret | |
1204 | CFI_ENDPROC | |
6efdcfaf | 1205 | END(kernel_thread) |
0bd7b798 | 1206 | |
c2c631e3 | 1207 | ENTRY(child_rip) |
c05991ed AK |
1208 | pushq $0 # fake return address |
1209 | CFI_STARTPROC | |
1da177e4 LT |
1210 | /* |
1211 | * Here we are in the child and the registers are set as they were | |
1212 | * at kernel_thread() invocation in the parent. | |
1213 | */ | |
1214 | movq %rdi, %rax | |
1215 | movq %rsi, %rdi | |
1216 | call *%rax | |
1217 | # exit | |
1c5b5cfd | 1218 | mov %eax, %edi |
1da177e4 | 1219 | call do_exit |
5f5db591 | 1220 | ud2 # padding for call trace |
c05991ed | 1221 | CFI_ENDPROC |
6efdcfaf | 1222 | END(child_rip) |
1da177e4 LT |
1223 | |
1224 | /* | |
1225 | * execve(). This function needs to use IRET, not SYSRET, to set up all state properly. | |
1226 | * | |
1227 | * C extern interface: | |
1228 | * extern long execve(char *name, char **argv, char **envp) | |
1229 | * | |
1230 | * asm input arguments: | |
1231 | * rdi: name, rsi: argv, rdx: envp | |
1232 | * | |
1233 | * We want to fallback into: | |
5d119b2c | 1234 | * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs) |
1da177e4 LT |
1235 | * |
1236 | * do_sys_execve asm fallback arguments: | |
5d119b2c | 1237 | * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack |
1da177e4 | 1238 | */ |
3db03b4a | 1239 | ENTRY(kernel_execve) |
1da177e4 LT |
1240 | CFI_STARTPROC |
1241 | FAKE_STACK_FRAME $0 | |
0bd7b798 | 1242 | SAVE_ALL |
5d119b2c | 1243 | movq %rsp,%rcx |
1da177e4 | 1244 | call sys_execve |
0bd7b798 | 1245 | movq %rax, RAX(%rsp) |
1da177e4 LT |
1246 | RESTORE_REST |
1247 | testq %rax,%rax | |
1248 | je int_ret_from_sys_call | |
1249 | RESTORE_ARGS | |
1250 | UNFAKE_STACK_FRAME | |
1251 | ret | |
1252 | CFI_ENDPROC | |
6efdcfaf | 1253 | END(kernel_execve) |
1da177e4 | 1254 | |
2699500b | 1255 | /* Call softirq on interrupt stack. Interrupts are off. */ |
ed6b676c | 1256 | ENTRY(call_softirq) |
7effaa88 | 1257 | CFI_STARTPROC |
2699500b AK |
1258 | push %rbp |
1259 | CFI_ADJUST_CFA_OFFSET 8 | |
1260 | CFI_REL_OFFSET rbp,0 | |
1261 | mov %rsp,%rbp | |
1262 | CFI_DEF_CFA_REGISTER rbp | |
ed6b676c | 1263 | incl %gs:pda_irqcount |
26f80bd6 | 1264 | cmove PER_CPU_VAR(irq_stack_ptr),%rsp |
2699500b | 1265 | push %rbp # backlink for old unwinder |
ed6b676c | 1266 | call __do_softirq |
2699500b | 1267 | leaveq |
7effaa88 | 1268 | CFI_DEF_CFA_REGISTER rsp |
2699500b | 1269 | CFI_ADJUST_CFA_OFFSET -8 |
ed6b676c | 1270 | decl %gs:pda_irqcount |
ed6b676c | 1271 | ret |
7effaa88 | 1272 | CFI_ENDPROC |
6efdcfaf | 1273 | END(call_softirq) |
75154f40 | 1274 | |
3d75e1b8 | 1275 | #ifdef CONFIG_XEN |
322648d1 | 1276 | zeroentry xen_hypervisor_callback xen_do_hypervisor_callback |
3d75e1b8 JF |
1277 | |
1278 | /* | |
9f1e87ea CG |
1279 | * A note on the "critical region" in our callback handler. |
1280 | * We want to avoid stacking callback handlers due to events occurring | |
1281 | * during handling of the last event. To do this, we keep events disabled | |
1282 | * until we've done all processing. HOWEVER, we must enable events before | |
1283 | * popping the stack frame (can't be done atomically) and so it would still | |
1284 | * be possible to get enough handler activations to overflow the stack. | |
1285 | * Although unlikely, bugs of that kind are hard to track down, so we'd | |
1286 | * like to avoid the possibility. | |
1287 | * So, on entry to the handler we detect whether we interrupted an | |
1288 | * existing activation in its critical region -- if so, we pop the current | |
1289 | * activation and restart the handler using the previous one. | |
1290 | */ | |
3d75e1b8 JF |
1291 | ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) |
1292 | CFI_STARTPROC | |
9f1e87ea CG |
1293 | /* |
1294 | * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will | |
1295 | * see the correct pointer to the pt_regs | |
1296 | */ | |
3d75e1b8 JF |
1297 | movq %rdi, %rsp # we don't return, adjust the stack frame |
1298 | CFI_ENDPROC | |
dcd072e2 | 1299 | DEFAULT_FRAME |
3d75e1b8 JF |
1300 | 11: incl %gs:pda_irqcount |
1301 | movq %rsp,%rbp | |
1302 | CFI_DEF_CFA_REGISTER rbp | |
26f80bd6 | 1303 | cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp |
3d75e1b8 JF |
1304 | pushq %rbp # backlink for old unwinder |
1305 | call xen_evtchn_do_upcall | |
1306 | popq %rsp | |
1307 | CFI_DEF_CFA_REGISTER rsp | |
1308 | decl %gs:pda_irqcount | |
1309 | jmp error_exit | |
1310 | CFI_ENDPROC | |
1311 | END(do_hypervisor_callback) | |
1312 | ||
1313 | /* | |
9f1e87ea CG |
1314 | * Hypervisor uses this for application faults while it executes. |
1315 | * We get here for two reasons: | |
1316 | * 1. Fault while reloading DS, ES, FS or GS | |
1317 | * 2. Fault while executing IRET | |
1318 | * Category 1 we do not need to fix up as Xen has already reloaded all segment | |
1319 | * registers that could be reloaded and zeroed the others. | |
1320 | * Category 2 we fix up by killing the current process. We cannot use the | |
1321 | * normal Linux return path in this case because if we use the IRET hypercall | |
1322 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. | |
1323 | * We distinguish between categories by comparing each saved segment register | |
1324 | * with its current contents: any discrepancy means we in category 1. | |
1325 | */ | |
3d75e1b8 | 1326 | ENTRY(xen_failsafe_callback) |
dcd072e2 AH |
1327 | INTR_FRAME 1 (6*8) |
1328 | /*CFI_REL_OFFSET gs,GS*/ | |
1329 | /*CFI_REL_OFFSET fs,FS*/ | |
1330 | /*CFI_REL_OFFSET es,ES*/ | |
1331 | /*CFI_REL_OFFSET ds,DS*/ | |
1332 | CFI_REL_OFFSET r11,8 | |
1333 | CFI_REL_OFFSET rcx,0 | |
3d75e1b8 JF |
1334 | movw %ds,%cx |
1335 | cmpw %cx,0x10(%rsp) | |
1336 | CFI_REMEMBER_STATE | |
1337 | jne 1f | |
1338 | movw %es,%cx | |
1339 | cmpw %cx,0x18(%rsp) | |
1340 | jne 1f | |
1341 | movw %fs,%cx | |
1342 | cmpw %cx,0x20(%rsp) | |
1343 | jne 1f | |
1344 | movw %gs,%cx | |
1345 | cmpw %cx,0x28(%rsp) | |
1346 | jne 1f | |
1347 | /* All segments match their saved values => Category 2 (Bad IRET). */ | |
1348 | movq (%rsp),%rcx | |
1349 | CFI_RESTORE rcx | |
1350 | movq 8(%rsp),%r11 | |
1351 | CFI_RESTORE r11 | |
1352 | addq $0x30,%rsp | |
1353 | CFI_ADJUST_CFA_OFFSET -0x30 | |
14ae22ba IM |
1354 | pushq_cfi $0 /* RIP */ |
1355 | pushq_cfi %r11 | |
1356 | pushq_cfi %rcx | |
4a5c3e77 | 1357 | jmp general_protection |
3d75e1b8 JF |
1358 | CFI_RESTORE_STATE |
1359 | 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ | |
1360 | movq (%rsp),%rcx | |
1361 | CFI_RESTORE rcx | |
1362 | movq 8(%rsp),%r11 | |
1363 | CFI_RESTORE r11 | |
1364 | addq $0x30,%rsp | |
1365 | CFI_ADJUST_CFA_OFFSET -0x30 | |
14ae22ba | 1366 | pushq_cfi $0 |
3d75e1b8 JF |
1367 | SAVE_ALL |
1368 | jmp error_exit | |
1369 | CFI_ENDPROC | |
3d75e1b8 JF |
1370 | END(xen_failsafe_callback) |
1371 | ||
1372 | #endif /* CONFIG_XEN */ | |
ddeb8f21 AH |
1373 | |
1374 | /* | |
1375 | * Some functions should be protected against kprobes | |
1376 | */ | |
1377 | .pushsection .kprobes.text, "ax" | |
1378 | ||
1379 | paranoidzeroentry_ist debug do_debug DEBUG_STACK | |
1380 | paranoidzeroentry_ist int3 do_int3 DEBUG_STACK | |
1381 | paranoiderrorentry stack_segment do_stack_segment | |
1382 | errorentry general_protection do_general_protection | |
1383 | errorentry page_fault do_page_fault | |
1384 | #ifdef CONFIG_X86_MCE | |
1385 | paranoidzeroentry machine_check do_machine_check | |
1386 | #endif | |
1387 | ||
1388 | /* | |
9f1e87ea CG |
1389 | * "Paranoid" exit path from exception stack. |
1390 | * Paranoid because this is used by NMIs and cannot take | |
ddeb8f21 AH |
1391 | * any kernel state for granted. |
1392 | * We don't do kernel preemption checks here, because only | |
1393 | * NMI should be common and it does not enable IRQs and | |
1394 | * cannot get reschedule ticks. | |
1395 | * | |
1396 | * "trace" is 0 for the NMI handler only, because irq-tracing | |
1397 | * is fundamentally NMI-unsafe. (we cannot change the soft and | |
1398 | * hard flags at once, atomically) | |
1399 | */ | |
1400 | ||
1401 | /* ebx: no swapgs flag */ | |
1402 | ENTRY(paranoid_exit) | |
1403 | INTR_FRAME | |
1404 | DISABLE_INTERRUPTS(CLBR_NONE) | |
1405 | TRACE_IRQS_OFF | |
1406 | testl %ebx,%ebx /* swapgs needed? */ | |
1407 | jnz paranoid_restore | |
1408 | testl $3,CS(%rsp) | |
1409 | jnz paranoid_userspace | |
1410 | paranoid_swapgs: | |
1411 | TRACE_IRQS_IRETQ 0 | |
1412 | SWAPGS_UNSAFE_STACK | |
1413 | paranoid_restore: | |
1414 | RESTORE_ALL 8 | |
1415 | jmp irq_return | |
1416 | paranoid_userspace: | |
1417 | GET_THREAD_INFO(%rcx) | |
1418 | movl TI_flags(%rcx),%ebx | |
1419 | andl $_TIF_WORK_MASK,%ebx | |
1420 | jz paranoid_swapgs | |
1421 | movq %rsp,%rdi /* &pt_regs */ | |
1422 | call sync_regs | |
1423 | movq %rax,%rsp /* switch stack for scheduling */ | |
1424 | testl $_TIF_NEED_RESCHED,%ebx | |
1425 | jnz paranoid_schedule | |
1426 | movl %ebx,%edx /* arg3: thread flags */ | |
1427 | TRACE_IRQS_ON | |
1428 | ENABLE_INTERRUPTS(CLBR_NONE) | |
1429 | xorl %esi,%esi /* arg2: oldset */ | |
1430 | movq %rsp,%rdi /* arg1: &pt_regs */ | |
1431 | call do_notify_resume | |
1432 | DISABLE_INTERRUPTS(CLBR_NONE) | |
1433 | TRACE_IRQS_OFF | |
1434 | jmp paranoid_userspace | |
1435 | paranoid_schedule: | |
1436 | TRACE_IRQS_ON | |
1437 | ENABLE_INTERRUPTS(CLBR_ANY) | |
1438 | call schedule | |
1439 | DISABLE_INTERRUPTS(CLBR_ANY) | |
1440 | TRACE_IRQS_OFF | |
1441 | jmp paranoid_userspace | |
1442 | CFI_ENDPROC | |
1443 | END(paranoid_exit) | |
1444 | ||
1445 | /* | |
1446 | * Exception entry point. This expects an error code/orig_rax on the stack. | |
1447 | * returns in "no swapgs flag" in %ebx. | |
1448 | */ | |
1449 | ENTRY(error_entry) | |
1450 | XCPT_FRAME | |
1451 | CFI_ADJUST_CFA_OFFSET 15*8 | |
1452 | /* oldrax contains error code */ | |
1453 | cld | |
1454 | movq_cfi rdi, RDI+8 | |
1455 | movq_cfi rsi, RSI+8 | |
1456 | movq_cfi rdx, RDX+8 | |
1457 | movq_cfi rcx, RCX+8 | |
1458 | movq_cfi rax, RAX+8 | |
1459 | movq_cfi r8, R8+8 | |
1460 | movq_cfi r9, R9+8 | |
1461 | movq_cfi r10, R10+8 | |
1462 | movq_cfi r11, R11+8 | |
1463 | movq_cfi rbx, RBX+8 | |
1464 | movq_cfi rbp, RBP+8 | |
1465 | movq_cfi r12, R12+8 | |
1466 | movq_cfi r13, R13+8 | |
1467 | movq_cfi r14, R14+8 | |
1468 | movq_cfi r15, R15+8 | |
1469 | xorl %ebx,%ebx | |
1470 | testl $3,CS+8(%rsp) | |
1471 | je error_kernelspace | |
1472 | error_swapgs: | |
1473 | SWAPGS | |
1474 | error_sti: | |
1475 | TRACE_IRQS_OFF | |
1476 | ret | |
1477 | CFI_ENDPROC | |
1478 | ||
1479 | /* | |
1480 | * There are two places in the kernel that can potentially fault with | |
1481 | * usergs. Handle them here. The exception handlers after iret run with | |
1482 | * kernel gs again, so don't set the user space flag. B stepping K8s | |
1483 | * sometimes report an truncated RIP for IRET exceptions returning to | |
1484 | * compat mode. Check for these here too. | |
1485 | */ | |
1486 | error_kernelspace: | |
1487 | incl %ebx | |
1488 | leaq irq_return(%rip),%rcx | |
1489 | cmpq %rcx,RIP+8(%rsp) | |
1490 | je error_swapgs | |
1491 | movl %ecx,%ecx /* zero extend */ | |
1492 | cmpq %rcx,RIP+8(%rsp) | |
1493 | je error_swapgs | |
1494 | cmpq $gs_change,RIP+8(%rsp) | |
9f1e87ea | 1495 | je error_swapgs |
ddeb8f21 AH |
1496 | jmp error_sti |
1497 | END(error_entry) | |
1498 | ||
1499 | ||
1500 | /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ | |
1501 | ENTRY(error_exit) | |
1502 | DEFAULT_FRAME | |
1503 | movl %ebx,%eax | |
1504 | RESTORE_REST | |
1505 | DISABLE_INTERRUPTS(CLBR_NONE) | |
1506 | TRACE_IRQS_OFF | |
1507 | GET_THREAD_INFO(%rcx) | |
1508 | testl %eax,%eax | |
1509 | jne retint_kernel | |
1510 | LOCKDEP_SYS_EXIT_IRQ | |
1511 | movl TI_flags(%rcx),%edx | |
1512 | movl $_TIF_WORK_MASK,%edi | |
1513 | andl %edi,%edx | |
1514 | jnz retint_careful | |
1515 | jmp retint_swapgs | |
1516 | CFI_ENDPROC | |
1517 | END(error_exit) | |
1518 | ||
1519 | ||
1520 | /* runs on exception stack */ | |
1521 | ENTRY(nmi) | |
1522 | INTR_FRAME | |
1523 | PARAVIRT_ADJUST_EXCEPTION_FRAME | |
1524 | pushq_cfi $-1 | |
1525 | subq $15*8, %rsp | |
1526 | CFI_ADJUST_CFA_OFFSET 15*8 | |
1527 | call save_paranoid | |
1528 | DEFAULT_FRAME 0 | |
1529 | /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ | |
1530 | movq %rsp,%rdi | |
1531 | movq $-1,%rsi | |
1532 | call do_nmi | |
1533 | #ifdef CONFIG_TRACE_IRQFLAGS | |
1534 | /* paranoidexit; without TRACE_IRQS_OFF */ | |
1535 | /* ebx: no swapgs flag */ | |
1536 | DISABLE_INTERRUPTS(CLBR_NONE) | |
1537 | testl %ebx,%ebx /* swapgs needed? */ | |
1538 | jnz nmi_restore | |
1539 | testl $3,CS(%rsp) | |
1540 | jnz nmi_userspace | |
1541 | nmi_swapgs: | |
1542 | SWAPGS_UNSAFE_STACK | |
1543 | nmi_restore: | |
1544 | RESTORE_ALL 8 | |
1545 | jmp irq_return | |
1546 | nmi_userspace: | |
1547 | GET_THREAD_INFO(%rcx) | |
1548 | movl TI_flags(%rcx),%ebx | |
1549 | andl $_TIF_WORK_MASK,%ebx | |
1550 | jz nmi_swapgs | |
1551 | movq %rsp,%rdi /* &pt_regs */ | |
1552 | call sync_regs | |
1553 | movq %rax,%rsp /* switch stack for scheduling */ | |
1554 | testl $_TIF_NEED_RESCHED,%ebx | |
1555 | jnz nmi_schedule | |
1556 | movl %ebx,%edx /* arg3: thread flags */ | |
1557 | ENABLE_INTERRUPTS(CLBR_NONE) | |
1558 | xorl %esi,%esi /* arg2: oldset */ | |
1559 | movq %rsp,%rdi /* arg1: &pt_regs */ | |
1560 | call do_notify_resume | |
1561 | DISABLE_INTERRUPTS(CLBR_NONE) | |
1562 | jmp nmi_userspace | |
1563 | nmi_schedule: | |
1564 | ENABLE_INTERRUPTS(CLBR_ANY) | |
1565 | call schedule | |
1566 | DISABLE_INTERRUPTS(CLBR_ANY) | |
1567 | jmp nmi_userspace | |
1568 | CFI_ENDPROC | |
1569 | #else | |
1570 | jmp paranoid_exit | |
9f1e87ea | 1571 | CFI_ENDPROC |
ddeb8f21 AH |
1572 | #endif |
1573 | END(nmi) | |
1574 | ||
1575 | ENTRY(ignore_sysret) | |
1576 | CFI_STARTPROC | |
1577 | mov $-ENOSYS,%eax | |
1578 | sysret | |
1579 | CFI_ENDPROC | |
1580 | END(ignore_sysret) | |
1581 | ||
1582 | /* | |
1583 | * End of kprobes section | |
1584 | */ | |
1585 | .popsection |