]>
Commit | Line | Data |
---|---|---|
1a59d1b8 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 LT |
2 | /* Rewritten by Rusty Russell, on the backs of many others... |
3 | Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM. | |
4 | ||
1da177e4 | 5 | */ |
505f2b97 | 6 | #include <linux/ftrace.h> |
f80d2d77 | 7 | #include <linux/memory.h> |
8a293be0 | 8 | #include <linux/extable.h> |
1da177e4 | 9 | #include <linux/module.h> |
505f2b97 | 10 | #include <linux/mutex.h> |
1da177e4 | 11 | #include <linux/init.h> |
5b485629 | 12 | #include <linux/kprobes.h> |
74451e66 | 13 | #include <linux/filter.h> |
505f2b97 | 14 | |
1da177e4 | 15 | #include <asm/sections.h> |
7c0f6ba6 | 16 | #include <linux/uaccess.h> |
505f2b97 IM |
17 | |
18 | /* | |
19 | * mutex protecting text section modification (dynamic code patching). | |
20 | * some users need to sleep (allocating memory...) while they hold this lock. | |
21 | * | |
e846d139 ZC |
22 | * Note: Also protects SMP-alternatives modification on x86. |
23 | * | |
505f2b97 IM |
24 | * NOT exported to modules - patching kernel text is a really delicate matter. |
25 | */ | |
26 | DEFINE_MUTEX(text_mutex); | |
1da177e4 LT |
27 | |
28 | extern struct exception_table_entry __start___ex_table[]; | |
29 | extern struct exception_table_entry __stop___ex_table[]; | |
30 | ||
d219e2e8 | 31 | /* Cleared by build time tools if the table is already sorted. */ |
00b71030 | 32 | u32 __initdata __visible main_extable_sort_needed = 1; |
d219e2e8 | 33 | |
1da177e4 LT |
34 | /* Sort the kernel's built-in exception table */ |
35 | void __init sort_main_extable(void) | |
36 | { | |
63174f61 NC |
37 | if (main_extable_sort_needed && |
38 | &__stop___ex_table > &__start___ex_table) { | |
bec1b9e7 | 39 | pr_notice("Sorting __ex_table...\n"); |
d219e2e8 | 40 | sort_extable(__start___ex_table, __stop___ex_table); |
bec1b9e7 | 41 | } |
1da177e4 LT |
42 | } |
43 | ||
49ec9177 SS |
44 | /* Given an address, look for it in the kernel exception table */ |
45 | const | |
46 | struct exception_table_entry *search_kernel_exception_table(unsigned long addr) | |
47 | { | |
48 | return search_extable(__start___ex_table, | |
49 | __stop___ex_table - __start___ex_table, addr); | |
50 | } | |
51 | ||
1da177e4 LT |
52 | /* Given an address, look for it in the exception tables. */ |
53 | const struct exception_table_entry *search_exception_tables(unsigned long addr) | |
54 | { | |
55 | const struct exception_table_entry *e; | |
56 | ||
49ec9177 | 57 | e = search_kernel_exception_table(addr); |
1da177e4 LT |
58 | if (!e) |
59 | e = search_module_extables(addr); | |
3dec541b AS |
60 | if (!e) |
61 | e = search_bpf_extables(addr); | |
1da177e4 LT |
62 | return e; |
63 | } | |
64 | ||
c0d80dda | 65 | int notrace core_kernel_text(unsigned long addr) |
1da177e4 | 66 | { |
808b6456 | 67 | if (is_kernel_text(addr)) |
1da177e4 LT |
68 | return 1; |
69 | ||
d2635f20 | 70 | if (system_state < SYSTEM_FREEING_INITMEM && |
b9ad8fe7 | 71 | is_kernel_inittext(addr)) |
1da177e4 LT |
72 | return 1; |
73 | return 0; | |
74 | } | |
75 | ||
3861a17b | 76 | int __kernel_text_address(unsigned long addr) |
1da177e4 | 77 | { |
9aadde91 | 78 | if (kernel_text_address(addr)) |
74451e66 | 79 | return 1; |
4a44bac1 IM |
80 | /* |
81 | * There might be init symbols in saved stacktraces. | |
82 | * Give those symbols a chance to be printed in | |
83 | * backtraces (such as lockdep traces). | |
84 | * | |
85 | * Since we are after the module-symbols check, there's | |
86 | * no danger of address overlap: | |
87 | */ | |
b9ad8fe7 | 88 | if (is_kernel_inittext(addr)) |
4a44bac1 IM |
89 | return 1; |
90 | return 0; | |
1da177e4 LT |
91 | } |
92 | ||
93 | int kernel_text_address(unsigned long addr) | |
94 | { | |
e8cac8b1 SRV |
95 | bool no_rcu; |
96 | int ret = 1; | |
97 | ||
1da177e4 LT |
98 | if (core_kernel_text(addr)) |
99 | return 1; | |
e8cac8b1 SRV |
100 | |
101 | /* | |
102 | * If a stack dump happens while RCU is not watching, then | |
103 | * RCU needs to be notified that it requires to start | |
104 | * watching again. This can happen either by tracing that | |
105 | * triggers a stack trace, or a WARN() that happens during | |
106 | * coming back from idle, or cpu on or offlining. | |
107 | * | |
e9b4e606 JO |
108 | * is_module_text_address() as well as the kprobe slots, |
109 | * is_bpf_text_address() and is_bpf_image_address require | |
110 | * RCU to be watching. | |
e8cac8b1 SRV |
111 | */ |
112 | no_rcu = !rcu_is_watching(); | |
113 | ||
114 | /* Treat this like an NMI as it can happen anywhere */ | |
115 | if (no_rcu) | |
116 | rcu_nmi_enter(); | |
117 | ||
aec0be2d | 118 | if (is_module_text_address(addr)) |
e8cac8b1 | 119 | goto out; |
5b485629 | 120 | if (is_ftrace_trampoline(addr)) |
e8cac8b1 | 121 | goto out; |
5b485629 | 122 | if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr)) |
e8cac8b1 | 123 | goto out; |
74451e66 | 124 | if (is_bpf_text_address(addr)) |
e8cac8b1 SRV |
125 | goto out; |
126 | ret = 0; | |
127 | out: | |
128 | if (no_rcu) | |
129 | rcu_nmi_exit(); | |
130 | ||
131 | return ret; | |
1da177e4 | 132 | } |
ab7476cf AV |
133 | |
134 | /* | |
135 | * On some architectures (PPC64, IA64) function pointers | |
136 | * are actually only tokens to some data that then holds the | |
137 | * real function address. As a result, to find if a function | |
138 | * pointer is part of the kernel text, we need to do some | |
139 | * special dereferencing first. | |
140 | */ | |
141 | int func_ptr_is_kernel_text(void *ptr) | |
142 | { | |
143 | unsigned long addr; | |
144 | addr = (unsigned long) dereference_function_descriptor(ptr); | |
145 | if (core_kernel_text(addr)) | |
146 | return 1; | |
a6e6abd5 | 147 | return is_module_text_address(addr); |
ab7476cf | 148 | } |