1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Asm versions of Xen pv-ops, suitable for direct use.
5 * We only bother with direct forms (ie, vcpu in percpu data) of the
6 * operations here; the indirect forms are better handled in C.
10 #include <asm/asm-offsets.h>
11 #include <asm/percpu.h>
12 #include <asm/processor-flags.h>
13 #include <asm/segment.h>
14 #include <asm/thread_info.h>
16 #include <asm/frame.h>
17 #include <asm/unwind_hints.h>
19 #include <xen/interface/xen.h>
21 #include <linux/init.h>
22 #include <linux/linkage.h>
23 #include <linux/objtool.h>
24 #include <../entry/calling.h>
26 .pushsection .noinstr.text, "ax"
28 * PV hypercall interface to the hypervisor.
30 * Called via inline asm(), so better preserve %rcx and %r11.
33 * %eax: hypercall number
34 * %rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall
37 SYM_FUNC_START(xen_hypercall_pv)
47 SYM_FUNC_END(xen_hypercall_pv)
50 * Disabling events is simply a matter of making the event mask
53 SYM_FUNC_START(xen_irq_disable_direct)
54 movb $1, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
56 SYM_FUNC_END(xen_irq_disable_direct)
59 * Force an event check by making a hypercall, but preserve regs
60 * before making the call.
62 SYM_FUNC_START(check_events)
73 call xen_force_evtchn_callback
85 SYM_FUNC_END(check_events)
88 * Enable events. This clears the event mask and tests the pending
89 * event status with one and operation. If there are pending events,
90 * then enter the hypervisor to get them handled.
92 SYM_FUNC_START(xen_irq_enable_direct)
95 movb $0, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
98 * Preempt here doesn't matter because that will deal with any
99 * pending interrupts. The pending check may end up being run
100 * on the wrong CPU, but that doesn't hurt.
103 /* Test for pending */
104 testb $0xff, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_pending)
111 SYM_FUNC_END(xen_irq_enable_direct)
114 * (xen_)save_fl is used to get the current interrupt enable status.
115 * Callers expect the status to be in X86_EFLAGS_IF, and other bits
116 * may be set in the return value. We take advantage of this by
117 * making sure that X86_EFLAGS_IF has the right value (and other bits
118 * in that byte are 0), but other bits in the return value are
119 * undefined. We need to toggle the state of the bit, because Xen and
120 * x86 use opposite senses (mask vs enable).
122 SYM_FUNC_START(xen_save_fl_direct)
123 testb $0xff, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
127 SYM_FUNC_END(xen_save_fl_direct)
129 SYM_FUNC_START(xen_read_cr2)
131 _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
132 _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
135 SYM_FUNC_END(xen_read_cr2);
137 SYM_FUNC_START(xen_read_cr2_direct)
139 _ASM_MOV PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_arch_cr2), %_ASM_AX
142 SYM_FUNC_END(xen_read_cr2_direct);
145 .macro xen_pv_trap name
146 SYM_CODE_START(xen_\name)
152 SYM_CODE_END(xen_\name)
153 _ASM_NOKPROBE(xen_\name)
156 xen_pv_trap asm_exc_divide_error
157 xen_pv_trap asm_xenpv_exc_debug
158 xen_pv_trap asm_exc_int3
159 xen_pv_trap asm_xenpv_exc_nmi
160 xen_pv_trap asm_exc_overflow
161 xen_pv_trap asm_exc_bounds
162 xen_pv_trap asm_exc_invalid_op
163 xen_pv_trap asm_exc_device_not_available
164 xen_pv_trap asm_xenpv_exc_double_fault
165 xen_pv_trap asm_exc_coproc_segment_overrun
166 xen_pv_trap asm_exc_invalid_tss
167 xen_pv_trap asm_exc_segment_not_present
168 xen_pv_trap asm_exc_stack_segment
169 xen_pv_trap asm_exc_general_protection
170 xen_pv_trap asm_exc_page_fault
171 xen_pv_trap asm_exc_spurious_interrupt_bug
172 xen_pv_trap asm_exc_coprocessor_error
173 xen_pv_trap asm_exc_alignment_check
174 #ifdef CONFIG_X86_CET
175 xen_pv_trap asm_exc_control_protection
177 #ifdef CONFIG_X86_MCE
178 xen_pv_trap asm_xenpv_exc_machine_check
179 #endif /* CONFIG_X86_MCE */
180 xen_pv_trap asm_exc_simd_coprocessor_error
181 #ifdef CONFIG_IA32_EMULATION
182 xen_pv_trap asm_int80_emulation
184 xen_pv_trap asm_exc_xen_unknown_trap
185 xen_pv_trap asm_exc_xen_hypervisor_callback
188 SYM_CODE_START(xen_early_idt_handler_array)
190 .rept NUM_EXCEPTION_VECTORS
191 UNWIND_HINT_UNDEFINED
195 jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
197 .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
199 SYM_CODE_END(xen_early_idt_handler_array)
209 * rip <-- standard iret frame
211 * flags <-- xen_iret must push from here on
217 .macro xen_hypercall_iret
222 mov $__HYPERVISOR_iret, %eax
223 syscall /* Do the IRET. */
224 #ifdef CONFIG_MITIGATION_SLS
229 SYM_CODE_START(xen_iret)
230 UNWIND_HINT_UNDEFINED
233 SYM_CODE_END(xen_iret)
236 * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
237 * also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode()
238 * in XEN pv would cause %rsp to move up to the top of the kernel stack and
239 * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI
240 * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET
241 * frame at the same address is useless.
243 SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode)
247 /* stackleak_erase() can work safely on the kernel stack. */
248 STACKLEAK_ERASE_NOCLOBBER
250 addq $8, %rsp /* skip regs->orig_ax */
252 SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
255 * Xen handles syscall callbacks much like ordinary exceptions, which
259 * - an iret-like stack frame on the stack (including rcx and r11):
269 /* Normal 64-bit system call target */
270 SYM_CODE_START(xen_entry_SYSCALL_64)
277 * Neither Xen nor the kernel really knows what the old SS and
278 * CS were. The kernel expects __USER_DS and __USER_CS, so
279 * report those values even though Xen will guess its own values.
281 movq $__USER_DS, 4*8(%rsp)
282 movq $__USER_CS, 1*8(%rsp)
284 jmp entry_SYSCALL_64_after_hwframe
285 SYM_CODE_END(xen_entry_SYSCALL_64)
287 #ifdef CONFIG_IA32_EMULATION
289 /* 32-bit compat syscall target */
290 SYM_CODE_START(xen_entry_SYSCALL_compat)
297 * Neither Xen nor the kernel really knows what the old SS and
298 * CS were. The kernel expects __USER_DS and __USER32_CS, so
299 * report those values even though Xen will guess its own values.
301 movq $__USER_DS, 4*8(%rsp)
302 movq $__USER32_CS, 1*8(%rsp)
304 jmp entry_SYSCALL_compat_after_hwframe
305 SYM_CODE_END(xen_entry_SYSCALL_compat)
307 /* 32-bit compat sysenter target */
308 SYM_CODE_START(xen_entry_SYSENTER_compat)
312 * NB: Xen is polite and clears TF from EFLAGS for us. This means
313 * that we don't need to guard against single step exceptions here.
319 * Neither Xen nor the kernel really knows what the old SS and
320 * CS were. The kernel expects __USER_DS and __USER32_CS, so
321 * report those values even though Xen will guess its own values.
323 movq $__USER_DS, 4*8(%rsp)
324 movq $__USER32_CS, 1*8(%rsp)
326 jmp entry_SYSENTER_compat_after_hwframe
327 SYM_CODE_END(xen_entry_SYSENTER_compat)
329 #else /* !CONFIG_IA32_EMULATION */
331 SYM_CODE_START(xen_entry_SYSCALL_compat)
332 SYM_CODE_START(xen_entry_SYSENTER_compat)
335 lea 16(%rsp), %rsp /* strip %rcx, %r11 */
338 SYM_CODE_END(xen_entry_SYSENTER_compat)
339 SYM_CODE_END(xen_entry_SYSCALL_compat)
341 #endif /* CONFIG_IA32_EMULATION */