1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) IBM Corporation, 2011
9 #include <linux/uaccess.h>
10 #include <linux/hardirq.h>
11 #include <asm/switch_to.h>
13 int enter_vmx_usercopy(void)
20 * We need to disable page faults as they can call schedule and
21 * thus make us lose the VMX context. So on page faults, we just
22 * fail which will cause a fallback to the normal non-vmx copy.
26 enable_kernel_altivec();
32 * This function must return 0 because we tail call optimise when calling
33 * from __copy_tofrom_user_power7 which returns 0 on success.
35 int exit_vmx_usercopy(void)
37 disable_kernel_altivec();
39 preempt_enable_no_resched();
41 * Must never explicitly call schedule (including preempt_enable())
42 * while in a kuap-unlocked user copy, because the AMR register will
43 * not be saved and restored across context switch. However preempt
44 * kernels need to be preempted as soon as possible if need_resched is
45 * set and we are preemptible. The hack here is to schedule a
46 * decrementer to fire here and reschedule for us if necessary.
48 if (IS_ENABLED(CONFIG_PREEMPT) && need_resched())
53 int enter_vmx_ops(void)
60 enable_kernel_altivec();
66 * All calls to this function will be optimised into tail calls. We are
67 * passed a pointer to the destination which we return as required by a
68 * memcpy implementation.
70 void *exit_vmx_ops(void *dest)
72 disable_kernel_altivec();