1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_COMPILER_H
3 #define __LINUX_COMPILER_H
5 #include <linux/compiler_types.h>
12 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
13 * to disable branch tracing on a per file basis.
15 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
16 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
17 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
18 int expect, int is_constant);
20 #define likely_notrace(x) __builtin_expect(!!(x), 1)
21 #define unlikely_notrace(x) __builtin_expect(!!(x), 0)
23 #define __branch_check__(x, expect, is_constant) ({ \
25 static struct ftrace_likely_data \
27 __section("_ftrace_annotated_branch") \
29 .data.func = __func__, \
30 .data.file = __FILE__, \
31 .data.line = __LINE__, \
33 ______r = __builtin_expect(!!(x), expect); \
34 ftrace_likely_update(&______f, ______r, \
35 expect, is_constant); \
40 * Using __builtin_constant_p(x) to ignore cases where the return
41 * value is always the same. This idea is taken from a similar patch
42 * written by Daniel Walker.
45 # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x)))
48 # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x)))
51 #ifdef CONFIG_PROFILE_ALL_BRANCHES
53 * "Define 'is'", Bill Clinton
54 * "Define 'if'", Steven Rostedt
56 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
57 #define __trace_if(cond) \
58 if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
61 static struct ftrace_branch_data \
63 __section("_ftrace_branch") \
70 ______f.miss_hit[______r]++; \
73 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
76 # define likely(x) __builtin_expect(!!(x), 1)
77 # define unlikely(x) __builtin_expect(!!(x), 0)
80 /* Optimization barrier */
82 # define barrier() __memory_barrier()
86 # define barrier_data(ptr) barrier()
89 /* workaround for GCC PR82365 if needed */
90 #ifndef barrier_before_unreachable
91 # define barrier_before_unreachable() do { } while (0)
94 /* Unreachable code */
95 #ifdef CONFIG_STACK_VALIDATION
97 * These macros help objtool understand GCC code flow for unreachable code.
98 * The __COUNTER__ based labels are a hack to make each instance of the macros
99 * unique, to convince GCC not to merge duplicate inline asm statements.
101 #define annotate_reachable() ({ \
102 asm volatile("ANNOTATE_REACHABLE counter=%c0" \
103 : : "i" (__COUNTER__)); \
105 #define annotate_unreachable() ({ \
106 asm volatile("ANNOTATE_UNREACHABLE counter=%c0" \
107 : : "i" (__COUNTER__)); \
110 #define annotate_reachable()
111 #define annotate_unreachable()
114 #ifndef ASM_UNREACHABLE
115 # define ASM_UNREACHABLE
118 # define unreachable() do { \
119 annotate_unreachable(); \
120 __builtin_unreachable(); \
125 * KENTRY - kernel entry point
126 * This can be used to annotate symbols (functions or data) that are used
127 * without their linker symbol being referenced explicitly. For example,
128 * interrupt vector handlers, or functions in the kernel image that are found
131 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
132 * are handled in their own way (with KEEP() in linker scripts).
134 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
135 * linker script. For example an architecture could KEEP() its entire
136 * boot/exception vector code rather than annotate each function and data.
139 # define KENTRY(sym) \
140 extern typeof(sym) sym; \
141 static const unsigned long __kentry_##sym \
143 __section("___kentry" "+" #sym ) \
144 = (unsigned long)&sym;
148 # define RELOC_HIDE(ptr, off) \
149 ({ unsigned long __ptr; \
150 __ptr = (unsigned long) (ptr); \
151 (typeof(ptr)) (__ptr + (off)); })
154 #ifndef OPTIMIZER_HIDE_VAR
155 #define OPTIMIZER_HIDE_VAR(var) barrier()
158 /* Not-quite-unique ID. */
160 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
163 #include <uapi/linux/types.h>
165 #define __READ_ONCE_SIZE \
168 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
169 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
170 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
171 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
174 __builtin_memcpy((void *)res, (const void *)p, size); \
179 static __always_inline
180 void __read_once_size(const volatile void *p, void *res, int size)
187 * We can't declare function 'inline' because __no_sanitize_address confilcts
188 * with inlining. Attempt to inline it may cause a build failure.
189 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
190 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
192 # define __no_kasan_or_inline __no_sanitize_address __maybe_unused
194 # define __no_kasan_or_inline __always_inline
197 static __no_kasan_or_inline
198 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
203 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
206 case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
207 case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
208 case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
209 case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
212 __builtin_memcpy((void *)p, (const void *)res, size);
218 * Prevent the compiler from merging or refetching reads or writes. The
219 * compiler is also forbidden from reordering successive instances of
220 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
221 * particular ordering. One way to make the compiler aware of ordering is to
222 * put the two invocations of READ_ONCE or WRITE_ONCE in different C
225 * These two macros will also work on aggregate data types like structs or
226 * unions. If the size of the accessed data type exceeds the word size of
227 * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
228 * fall back to memcpy(). There's at least two memcpy()s: one for the
229 * __builtin_memcpy() and then one for the macro doing the copy of variable
230 * - '__u' allocated on the stack.
232 * Their two major use cases are: (1) Mediating communication between
233 * process-level code and irq/NMI handlers, all running on the same CPU,
234 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
235 * mutilate accesses that either do not require ordering or that interact
236 * with an explicit memory barrier or atomic instruction that provides the
239 #include <asm/barrier.h>
240 #include <linux/kasan-checks.h>
242 #define __READ_ONCE(x, check) \
244 union { typeof(x) __val; char __c[1]; } __u; \
246 __read_once_size(&(x), __u.__c, sizeof(x)); \
248 __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
249 smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
252 #define READ_ONCE(x) __READ_ONCE(x, 1)
255 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
256 * to hide memory access from KASAN.
258 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
260 static __no_kasan_or_inline
261 unsigned long read_word_at_a_time(const void *addr)
263 kasan_check_read(addr, 1);
264 return *(unsigned long *)addr;
267 #define WRITE_ONCE(x, val) \
269 union { typeof(x) __val; char __c[1]; } __u = \
270 { .__val = (__force typeof(x)) (val) }; \
271 __write_once_size(&(x), __u.__c, sizeof(x)); \
275 #endif /* __KERNEL__ */
278 * Force the compiler to emit 'sym' as a symbol, so that we can reference
279 * it from inline assembler. Necessary in case 'sym' could be inlined
280 * otherwise, or eliminated entirely due to lack of references that are
281 * visible to the compiler.
283 #define __ADDRESSABLE(sym) \
284 static void * __section(".discard.addressable") __used \
285 __PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
288 * offset_to_ptr - convert a relative memory offset to an absolute pointer
289 * @off: the address of the 32-bit offset value
291 static inline void *offset_to_ptr(const int *off)
293 return (void *)((unsigned long)off + *off);
296 #else /* __ASSEMBLY__ */
299 #ifndef LINKER_SCRIPT
301 #ifdef CONFIG_STACK_VALIDATION
302 .macro ANNOTATE_UNREACHABLE counter:req
304 .pushsection .discard.unreachable
305 .long \counter\()b -.
309 .macro ANNOTATE_REACHABLE counter:req
311 .pushsection .discard.reachable
312 .long \counter\()b -.
316 .macro ASM_UNREACHABLE
318 .pushsection .discard.unreachable
322 #else /* CONFIG_STACK_VALIDATION */
323 .macro ANNOTATE_UNREACHABLE counter:req
326 .macro ANNOTATE_REACHABLE counter:req
329 .macro ASM_UNREACHABLE
331 #endif /* CONFIG_STACK_VALIDATION */
333 #endif /* LINKER_SCRIPT */
334 #endif /* __KERNEL__ */
335 #endif /* __ASSEMBLY__ */
337 /* Compile time object size, -1 for unknown */
338 #ifndef __compiletime_object_size
339 # define __compiletime_object_size(obj) -1
341 #ifndef __compiletime_warning
342 # define __compiletime_warning(message)
344 #ifndef __compiletime_error
345 # define __compiletime_error(message)
349 # define __compiletime_assert(condition, msg, prefix, suffix) \
351 extern void prefix ## suffix(void) __compiletime_error(msg); \
353 prefix ## suffix(); \
356 # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
359 #define _compiletime_assert(condition, msg, prefix, suffix) \
360 __compiletime_assert(condition, msg, prefix, suffix)
363 * compiletime_assert - break build and emit msg if condition is false
364 * @condition: a compile-time constant condition to check
365 * @msg: a message to emit if condition is false
367 * In tradition of POSIX assert, this macro will break the build if the
368 * supplied condition is *false*, emitting the supplied error message if the
369 * compiler has support to do so.
371 #define compiletime_assert(condition, msg) \
372 _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
374 #define compiletime_assert_atomic_type(t) \
375 compiletime_assert(__native_word(t), \
376 "Need native word sized stores/loads for atomicity.")
378 /* &a[0] degrades to a pointer: a different type from an array */
379 #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
381 #endif /* __LINUX_COMPILER_H */