static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
++
++ /*
++ * Prevent a mispredicted conditional call to set_fs from forwarding
++ * the wrong address limit to access_ok under speculation.
++ */
++ dsb(nsh);
++ isb();
++
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
}
#define __inttype(x) \
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
++ /*
++ * Sanitise a uaccess pointer such that it becomes NULL if addr+size
++ * is above the current addr_limit.
++ */
++ #define uaccess_mask_range_ptr(ptr, size) \
++ ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
++ static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
++ size_t size)
++ {
++ void __user *safe_ptr = (void __user *)ptr;
++ unsigned long tmp;
++
++ asm volatile(
++ " sub %1, %3, #1\n"
++ " subs %1, %1, %0\n"
++ " addhs %1, %1, #1\n"
++ " subhss %1, %1, %2\n"
++ " movlo %0, #0\n"
++ : "+r" (safe_ptr), "=&r" (tmp)
++ : "r" (size), "r" (current_thread_info()->addr_limit)
++ : "cc");
++
++ csdb();
++ return safe_ptr;
++ }
++
/*
* Single-value transfer routines. They automatically use the right
* size if we just have the right pointer type. Note that the functions
#define __get_user_check(x, p) \
({ \
unsigned long __limit = current_thread_info()->addr_limit - 1; \
- register const typeof(*(p)) __user *__p asm("r0") = (p);\
+ register typeof(*(p)) __user *__p asm("r0") = (p); \
register __inttype(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
__pu_err; \
})
++ #ifdef CONFIG_CPU_SPECTRE
++ /*
++ * When mitigating Spectre variant 1.1, all accessors need to include
++ * verification of the address space.
++ */
++ #define __put_user(x, ptr) put_user(x, ptr)
++
++ #else
#define __put_user(x, ptr) \
({ \
long __pu_err = 0; \
__pu_err; \
})
-- #define __put_user_error(x, ptr, err) \
-- ({ \
-- __put_user_switch((x), (ptr), (err), __put_user_nocheck); \
-- (void) 0; \
-- })
--
#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
do { \
unsigned long __pu_addr = (unsigned long)__pu_ptr; \
: "r" (x), "i" (-EFAULT) \
: "cc")
++ #endif /* !CONFIG_CPU_SPECTRE */
#ifdef CONFIG_MMU
extern unsigned long __must_check
kframe->magic = IWMMXT_MAGIC;
kframe->size = IWMMXT_STORAGE_SIZE;
iwmmxt_task_copy(current_thread_info(), &kframe->storage);
--
-- err = __copy_to_user(frame, kframe, sizeof(*frame));
} else {
/*
* For bug-compatibility with older kernels, some space
* Set the magic and size appropriately so that properly
* written userspace can skip it reliably:
*/
-- __put_user_error(DUMMY_MAGIC, &frame->magic, err);
-- __put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err);
++ *kframe = (struct iwmmxt_sigframe) {
++ .magic = DUMMY_MAGIC,
++ .size = IWMMXT_STORAGE_SIZE,
++ };
}
++ err = __copy_to_user(frame, kframe, sizeof(*kframe));
++
return err;
}
static int preserve_vfp_context(struct vfp_sigframe __user *frame)
{
-- const unsigned long magic = VFP_MAGIC;
-- const unsigned long size = VFP_STORAGE_SIZE;
++ struct vfp_sigframe kframe;
int err = 0;
-- __put_user_error(magic, &frame->magic, err);
-- __put_user_error(size, &frame->size, err);
++ memset(&kframe, 0, sizeof(kframe));
++ kframe.magic = VFP_MAGIC;
++ kframe.size = VFP_STORAGE_SIZE;
++ err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
if (err)
-- return -EFAULT;
++ return err;
-- return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
++ return __copy_to_user(frame, &kframe, sizeof(kframe));
}
static int restore_vfp_context(char __user **auxp)
setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
{
struct aux_sigframe __user *aux;
++ struct sigcontext context;
int err = 0;
-- __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
-- __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
-- __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
-- __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
-- __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
-- __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
-- __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
-- __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
-- __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
-- __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
-- __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
-- __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
-- __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
-- __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
-- __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
-- __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
-- __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
--
-- __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
-- __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
-- __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
-- __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
++ context = (struct sigcontext) {
++ .arm_r0 = regs->ARM_r0,
++ .arm_r1 = regs->ARM_r1,
++ .arm_r2 = regs->ARM_r2,
++ .arm_r3 = regs->ARM_r3,
++ .arm_r4 = regs->ARM_r4,
++ .arm_r5 = regs->ARM_r5,
++ .arm_r6 = regs->ARM_r6,
++ .arm_r7 = regs->ARM_r7,
++ .arm_r8 = regs->ARM_r8,
++ .arm_r9 = regs->ARM_r9,
++ .arm_r10 = regs->ARM_r10,
++ .arm_fp = regs->ARM_fp,
++ .arm_ip = regs->ARM_ip,
++ .arm_sp = regs->ARM_sp,
++ .arm_lr = regs->ARM_lr,
++ .arm_pc = regs->ARM_pc,
++ .arm_cpsr = regs->ARM_cpsr,
++
++ .trap_no = current->thread.trap_no,
++ .error_code = current->thread.error_code,
++ .fault_address = current->thread.address,
++ .oldmask = set->sig[0],
++ };
++
++ err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
if (err == 0)
err |= preserve_vfp_context(&aux->vfp);
#endif
-- __put_user_error(0, &aux->end_magic, err);
++ err |= __put_user(0, &aux->end_magic);
return err;
}
/*
* Set uc.uc_flags to a value which sc.trap_no would never have.
*/
-- __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
++ err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
err |= setup_sigframe(frame, regs, set);
if (err == 0)
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
-- __put_user_error(0, &frame->sig.uc.uc_flags, err);
-- __put_user_error(NULL, &frame->sig.uc.uc_link, err);
++ err |= __put_user(0, &frame->sig.uc.uc_flags);
++ err |= __put_user(NULL, &frame->sig.uc.uc_link);
err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
err |= setup_sigframe(&frame->sig, regs, set);
sigset_t *oldset = sigmask_to_save();
int ret;
+ /*
+ * Increment event counter and perform fixup for the pre-signal
+ * frame.
+ */
+ rseq_signal_deliver(ksig, regs);
+
/*
* Set up the stack frame
*/
} else {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ rseq_handle_notify_resume(NULL, regs);
}
}
local_irq_disable();
{
addr_limit_user_check();
}
+
+#ifdef CONFIG_DEBUG_RSEQ
+asmlinkage void do_rseq_syscall(struct pt_regs *regs)
+{
+ rseq_syscall(regs);
+}
+#endif
{
siginfo_t info;
- memset(&info, 0, sizeof(info));
-
+ clear_siginfo(&info);
info.si_signo = SIGFPE;
info.si_code = sicode;
info.si_addr = (void __user *)(instruction_pointer(regs) - 4);
if (exceptions == VFP_EXCEPTION_ERROR) {
vfp_panic("unhandled bounce", inst);
- vfp_raise_sigfpe(FPE_FIXME, regs);
+ vfp_raise_sigfpe(FPE_FLTINV, regs);
return;
}
* Save the current VFP state into the provided structures and prepare
* for entry into a new function (signal handler).
*/
-- int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
-- struct user_vfp_exc __user *ufp_exc)
++ int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
++ struct user_vfp_exc *ufp_exc)
{
struct thread_info *thread = current_thread_info();
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
-- int err = 0;
/* Ensure that the saved hwstate is up-to-date. */
vfp_sync_hwstate(thread);
* Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details.
*/
-- err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
-- sizeof(hwstate->fpregs));
++ memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs));
++
/*
* Copy the status and control register.
*/
-- __put_user_error(hwstate->fpscr, &ufp->fpscr, err);
++ ufp->fpscr = hwstate->fpscr;
/*
* Copy the exception registers.
*/
-- __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
-- __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
-- __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
--
-- if (err)
-- return -EFAULT;
++ ufp_exc->fpexc = hwstate->fpexc;
++ ufp_exc->fpinst = hwstate->fpinst;
++ ufp_exc->fpinst2 = ufp_exc->fpinst2;
/* Ensure that VFP is disabled. */
vfp_flush_hwstate(thread);