1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/context_tracking.h>
4 #include <linux/entry-common.h>
5 #include <linux/highmem.h>
6 #include <linux/livepatch.h>
7 #include <linux/audit.h>
9 #define CREATE_TRACE_POINTS
10 #include <trace/events/syscalls.h>
13 * enter_from_user_mode - Establish state when coming from user mode
15 * Syscall/interrupt entry disables interrupts, but user mode is traced as
16 * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
18 * 1) Tell lockdep that interrupts are disabled
19 * 2) Invoke context tracking if enabled to reactivate RCU
20 * 3) Trace interrupts off state
22 static __always_inline void enter_from_user_mode(struct pt_regs *regs)
24 arch_check_user_regs(regs);
25 lockdep_hardirqs_off(CALLER_ADDR0);
27 CT_WARN_ON(ct_state() != CONTEXT_USER);
30 instrumentation_begin();
31 trace_hardirqs_off_finish();
32 instrumentation_end();
35 static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
37 if (unlikely(audit_context())) {
38 unsigned long args[6];
40 syscall_get_arguments(current, regs, args);
41 audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]);
45 static long syscall_trace_enter(struct pt_regs *regs, long syscall,
46 unsigned long ti_work)
51 if (ti_work & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU)) {
52 ret = arch_syscall_enter_tracehook(regs);
53 if (ret || (ti_work & _TIF_SYSCALL_EMU))
57 /* Do seccomp after ptrace, to catch any tracer changes. */
58 if (ti_work & _TIF_SECCOMP) {
59 ret = __secure_computing(NULL);
64 /* Either of the above might have changed the syscall number */
65 syscall = syscall_get_nr(current, regs);
67 if (unlikely(ti_work & _TIF_SYSCALL_TRACEPOINT))
68 trace_sys_enter(regs, syscall);
70 syscall_enter_audit(regs, syscall);
72 return ret ? : syscall;
75 static __always_inline long
76 __syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
78 unsigned long ti_work;
80 ti_work = READ_ONCE(current_thread_info()->flags);
81 if (ti_work & SYSCALL_ENTER_WORK)
82 syscall = syscall_trace_enter(regs, syscall, ti_work);
87 long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
89 return __syscall_enter_from_user_work(regs, syscall);
92 noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
96 enter_from_user_mode(regs);
98 instrumentation_begin();
100 ret = __syscall_enter_from_user_work(regs, syscall);
101 instrumentation_end();
106 noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs)
108 enter_from_user_mode(regs);
109 instrumentation_begin();
111 instrumentation_end();
115 * exit_to_user_mode - Fixup state when exiting to user mode
117 * Syscall/interupt exit enables interrupts, but the kernel state is
118 * interrupts disabled when this is invoked. Also tell RCU about it.
120 * 1) Trace interrupts on state
121 * 2) Invoke context tracking if enabled to adjust RCU state
122 * 3) Invoke architecture specific last minute exit code, e.g. speculation
124 * 4) Tell lockdep that interrupts are enabled
126 static __always_inline void exit_to_user_mode(void)
128 instrumentation_begin();
129 trace_hardirqs_on_prepare();
130 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
131 instrumentation_end();
134 arch_exit_to_user_mode();
135 lockdep_hardirqs_on(CALLER_ADDR0);
138 /* Workaround to allow gradual conversion of architecture code */
139 void __weak arch_do_signal(struct pt_regs *regs) { }
141 static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
142 unsigned long ti_work)
145 * Before returning to user space ensure that all pending work
146 * items have been completed.
148 while (ti_work & EXIT_TO_USER_MODE_WORK) {
150 local_irq_enable_exit_to_user(ti_work);
152 if (ti_work & _TIF_NEED_RESCHED)
155 if (ti_work & _TIF_UPROBE)
156 uprobe_notify_resume(regs);
158 if (ti_work & _TIF_PATCH_PENDING)
159 klp_update_patch_state(current);
161 if (ti_work & _TIF_SIGPENDING)
162 arch_do_signal(regs);
164 if (ti_work & _TIF_NOTIFY_RESUME) {
165 tracehook_notify_resume(regs);
166 rseq_handle_notify_resume(NULL, regs);
169 /* Architecture specific TIF work */
170 arch_exit_to_user_mode_work(regs, ti_work);
173 * Disable interrupts and reevaluate the work flags as they
174 * might have changed while interrupts and preemption was
177 local_irq_disable_exit_to_user();
178 ti_work = READ_ONCE(current_thread_info()->flags);
181 /* Return the latest work state for arch_exit_to_user_mode() */
185 static void exit_to_user_mode_prepare(struct pt_regs *regs)
187 unsigned long ti_work = READ_ONCE(current_thread_info()->flags);
189 lockdep_assert_irqs_disabled();
191 if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
192 ti_work = exit_to_user_mode_loop(regs, ti_work);
194 arch_exit_to_user_mode_prepare(regs, ti_work);
196 /* Ensure that the address limit is intact and no locks are held */
197 addr_limit_user_check();
199 lockdep_assert_irqs_disabled();
203 #ifndef _TIF_SINGLESTEP
204 static inline bool report_single_step(unsigned long ti_work)
210 * If TIF_SYSCALL_EMU is set, then the only reason to report is when
211 * TIF_SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall
212 * instruction has been already reported in syscall_enter_from_user_mode().
214 #define SYSEMU_STEP (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU)
216 static inline bool report_single_step(unsigned long ti_work)
218 return (ti_work & SYSEMU_STEP) == _TIF_SINGLESTEP;
222 static void syscall_exit_work(struct pt_regs *regs, unsigned long ti_work)
226 audit_syscall_exit(regs);
228 if (ti_work & _TIF_SYSCALL_TRACEPOINT)
229 trace_sys_exit(regs, syscall_get_return_value(current, regs));
231 step = report_single_step(ti_work);
232 if (step || ti_work & _TIF_SYSCALL_TRACE)
233 arch_syscall_exit_tracehook(regs, step);
237 * Syscall specific exit to user mode preparation. Runs with interrupts
240 static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
242 u32 cached_flags = READ_ONCE(current_thread_info()->flags);
243 unsigned long nr = syscall_get_nr(current, regs);
245 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
247 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
248 if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
255 * Do one-time syscall specific work. If these work items are
256 * enabled, we want to run them exactly once per syscall exit with
257 * interrupts enabled.
259 if (unlikely(cached_flags & SYSCALL_EXIT_WORK))
260 syscall_exit_work(regs, cached_flags);
263 __visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)
265 instrumentation_begin();
266 syscall_exit_to_user_mode_prepare(regs);
267 local_irq_disable_exit_to_user();
268 exit_to_user_mode_prepare(regs);
269 instrumentation_end();
273 noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
275 enter_from_user_mode(regs);
278 noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
280 instrumentation_begin();
281 exit_to_user_mode_prepare(regs);
282 instrumentation_end();
286 noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
288 irqentry_state_t ret = {
292 if (user_mode(regs)) {
293 irqentry_enter_from_user_mode(regs);
298 * If this entry hit the idle task invoke rcu_irq_enter() whether
299 * RCU is watching or not.
301 * Interupts can nest when the first interrupt invokes softirq
302 * processing on return which enables interrupts.
304 * Scheduler ticks in the idle task can mark quiescent state and
305 * terminate a grace period, if and only if the timer interrupt is
306 * not nested into another interrupt.
308 * Checking for rcu_is_watching() here would prevent the nesting
309 * interrupt to invoke rcu_irq_enter(). If that nested interrupt is
310 * the tick then rcu_flavor_sched_clock_irq() would wrongfully
311 * assume that it is the first interupt and eventually claim
312 * quiescient state and end grace periods prematurely.
314 * Unconditionally invoke rcu_irq_enter() so RCU state stays
317 * TINY_RCU does not support EQS, so let the compiler eliminate
318 * this part when enabled.
320 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
322 * If RCU is not watching then the same careful
323 * sequence vs. lockdep and tracing is required
324 * as in irq_enter_from_user_mode().
326 lockdep_hardirqs_off(CALLER_ADDR0);
328 instrumentation_begin();
329 trace_hardirqs_off_finish();
330 instrumentation_end();
337 * If RCU is watching then RCU only wants to check whether it needs
338 * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
339 * already contains a warning when RCU is not watching, so no point
340 * in having another one here.
342 instrumentation_begin();
343 rcu_irq_enter_check_tick();
344 /* Use the combo lockdep/tracing function */
345 trace_hardirqs_off();
346 instrumentation_end();
351 void irqentry_exit_cond_resched(void)
353 if (!preempt_count()) {
354 /* Sanity check RCU and thread stack */
355 rcu_irq_exit_check_preempt();
356 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
357 WARN_ON_ONCE(!on_thread_stack());
359 preempt_schedule_irq();
363 noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
365 lockdep_assert_irqs_disabled();
367 /* Check whether this returns to user mode */
368 if (user_mode(regs)) {
369 irqentry_exit_to_user_mode(regs);
370 } else if (!regs_irqs_disabled(regs)) {
372 * If RCU was not watching on entry this needs to be done
373 * carefully and needs the same ordering of lockdep/tracing
374 * and RCU as the return to user mode path.
376 if (state.exit_rcu) {
377 instrumentation_begin();
378 /* Tell the tracer that IRET will enable interrupts */
379 trace_hardirqs_on_prepare();
380 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
381 instrumentation_end();
383 lockdep_hardirqs_on(CALLER_ADDR0);
387 instrumentation_begin();
388 if (IS_ENABLED(CONFIG_PREEMPTION))
389 irqentry_exit_cond_resched();
390 /* Covers both tracing and lockdep */
392 instrumentation_end();
395 * IRQ flags state is correct already. Just tell RCU if it
396 * was not watching on entry.