1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CONTEXT_TRACKING_H
3 #define _LINUX_CONTEXT_TRACKING_H
5 #include <linux/sched.h>
6 #include <linux/vtime.h>
7 #include <linux/context_tracking_state.h>
8 #include <linux/instrumentation.h>
10 #include <asm/ptrace.h>
13 #ifdef CONFIG_CONTEXT_TRACKING_USER
14 extern void ct_cpu_track_user(int cpu);
16 /* Called with interrupts disabled. */
17 extern void __ct_user_enter(enum ctx_state state);
18 extern void __ct_user_exit(enum ctx_state state);
20 extern void ct_user_enter(enum ctx_state state);
21 extern void ct_user_exit(enum ctx_state state);
23 extern void user_enter_callable(void);
24 extern void user_exit_callable(void);
26 static inline void user_enter(void)
28 if (context_tracking_enabled())
29 ct_user_enter(CT_STATE_USER);
32 static inline void user_exit(void)
34 if (context_tracking_enabled())
35 ct_user_exit(CT_STATE_USER);
38 /* Called with interrupts disabled. */
39 static __always_inline void user_enter_irqoff(void)
41 if (context_tracking_enabled())
42 __ct_user_enter(CT_STATE_USER);
45 static __always_inline void user_exit_irqoff(void)
47 if (context_tracking_enabled())
48 __ct_user_exit(CT_STATE_USER);
51 static inline enum ctx_state exception_enter(void)
53 enum ctx_state prev_ctx;
55 if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) ||
56 !context_tracking_enabled())
59 prev_ctx = __ct_state();
60 if (prev_ctx != CT_STATE_KERNEL)
61 ct_user_exit(prev_ctx);
66 static inline void exception_exit(enum ctx_state prev_ctx)
68 if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) &&
69 context_tracking_enabled()) {
70 if (prev_ctx != CT_STATE_KERNEL)
71 ct_user_enter(prev_ctx);
75 static __always_inline bool context_tracking_guest_enter(void)
77 if (context_tracking_enabled())
78 __ct_user_enter(CT_STATE_GUEST);
80 return context_tracking_enabled_this_cpu();
83 static __always_inline void context_tracking_guest_exit(void)
85 if (context_tracking_enabled())
86 __ct_user_exit(CT_STATE_GUEST);
89 #define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
92 static inline void user_enter(void) { }
93 static inline void user_exit(void) { }
94 static inline void user_enter_irqoff(void) { }
95 static inline void user_exit_irqoff(void) { }
96 static inline int exception_enter(void) { return 0; }
97 static inline void exception_exit(enum ctx_state prev_ctx) { }
98 static inline int ct_state(void) { return -1; }
99 static inline int __ct_state(void) { return -1; }
100 static __always_inline bool context_tracking_guest_enter(void) { return false; }
101 static __always_inline void context_tracking_guest_exit(void) { }
102 #define CT_WARN_ON(cond) do { } while (0)
103 #endif /* !CONFIG_CONTEXT_TRACKING_USER */
105 #ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE
106 extern void context_tracking_init(void);
108 static inline void context_tracking_init(void) { }
109 #endif /* CONFIG_CONTEXT_TRACKING_USER_FORCE */
111 #ifdef CONFIG_CONTEXT_TRACKING_IDLE
112 extern void ct_idle_enter(void);
113 extern void ct_idle_exit(void);
116 * Is RCU watching the current CPU (IOW, it is not in an extended quiescent state)?
118 * Note that this returns the actual boolean data (watching / not watching),
119 * whereas ct_rcu_watching() returns the RCU_WATCHING subvariable of
120 * context_tracking.state.
122 * No ordering, as we are sampling CPU-local information.
124 static __always_inline bool rcu_is_watching_curr_cpu(void)
126 return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING;
130 * Increment the current CPU's context_tracking structure's ->state field
131 * with ordering. Return the new value.
133 static __always_inline unsigned long ct_state_inc(int incby)
135 return raw_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
138 static __always_inline bool warn_rcu_enter(void)
143 * Horrible hack to shut up recursive RCU isn't watching fail since
144 * lots of the actual reporting also relies on RCU.
146 preempt_disable_notrace();
147 if (!rcu_is_watching_curr_cpu()) {
149 ct_state_inc(CT_RCU_WATCHING);
155 static __always_inline void warn_rcu_exit(bool rcu)
158 ct_state_inc(CT_RCU_WATCHING);
159 preempt_enable_notrace();
163 static inline void ct_idle_enter(void) { }
164 static inline void ct_idle_exit(void) { }
166 static __always_inline bool warn_rcu_enter(void) { return false; }
167 static __always_inline void warn_rcu_exit(bool rcu) { }
168 #endif /* !CONFIG_CONTEXT_TRACKING_IDLE */