]>
Commit | Line | Data |
---|---|---|
1a59d1b8 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 LT |
2 | /* Rewritten by Rusty Russell, on the backs of many others... |
3 | Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM. | |
4 | ||
1da177e4 | 5 | */ |
505f2b97 | 6 | #include <linux/ftrace.h> |
f80d2d77 | 7 | #include <linux/memory.h> |
8a293be0 | 8 | #include <linux/extable.h> |
1da177e4 | 9 | #include <linux/module.h> |
505f2b97 | 10 | #include <linux/mutex.h> |
1da177e4 | 11 | #include <linux/init.h> |
5b485629 | 12 | #include <linux/kprobes.h> |
74451e66 | 13 | #include <linux/filter.h> |
505f2b97 | 14 | |
1da177e4 | 15 | #include <asm/sections.h> |
7c0f6ba6 | 16 | #include <linux/uaccess.h> |
505f2b97 IM |
17 | |
18 | /* | |
19 | * mutex protecting text section modification (dynamic code patching). | |
20 | * some users need to sleep (allocating memory...) while they hold this lock. | |
21 | * | |
e846d139 ZC |
22 | * Note: Also protects SMP-alternatives modification on x86. |
23 | * | |
505f2b97 IM |
24 | * NOT exported to modules - patching kernel text is a really delicate matter. |
25 | */ | |
26 | DEFINE_MUTEX(text_mutex); | |
1da177e4 LT |
27 | |
28 | extern struct exception_table_entry __start___ex_table[]; | |
29 | extern struct exception_table_entry __stop___ex_table[]; | |
30 | ||
d219e2e8 | 31 | /* Cleared by build time tools if the table is already sorted. */ |
00b71030 | 32 | u32 __initdata __visible main_extable_sort_needed = 1; |
d219e2e8 | 33 | |
1da177e4 LT |
34 | /* Sort the kernel's built-in exception table */ |
35 | void __init sort_main_extable(void) | |
36 | { | |
e656a634 | 37 | if (main_extable_sort_needed && __stop___ex_table > __start___ex_table) { |
bec1b9e7 | 38 | pr_notice("Sorting __ex_table...\n"); |
d219e2e8 | 39 | sort_extable(__start___ex_table, __stop___ex_table); |
bec1b9e7 | 40 | } |
1da177e4 LT |
41 | } |
42 | ||
49ec9177 SS |
43 | /* Given an address, look for it in the kernel exception table */ |
44 | const | |
45 | struct exception_table_entry *search_kernel_exception_table(unsigned long addr) | |
46 | { | |
47 | return search_extable(__start___ex_table, | |
48 | __stop___ex_table - __start___ex_table, addr); | |
49 | } | |
50 | ||
1da177e4 LT |
51 | /* Given an address, look for it in the exception tables. */ |
52 | const struct exception_table_entry *search_exception_tables(unsigned long addr) | |
53 | { | |
54 | const struct exception_table_entry *e; | |
55 | ||
49ec9177 | 56 | e = search_kernel_exception_table(addr); |
1da177e4 LT |
57 | if (!e) |
58 | e = search_module_extables(addr); | |
59 | return e; | |
60 | } | |
61 | ||
9fbcc57a | 62 | int init_kernel_text(unsigned long addr) |
4a44bac1 IM |
63 | { |
64 | if (addr >= (unsigned long)_sinittext && | |
5ecbe3c3 | 65 | addr < (unsigned long)_einittext) |
4a44bac1 IM |
66 | return 1; |
67 | return 0; | |
68 | } | |
69 | ||
c0d80dda | 70 | int notrace core_kernel_text(unsigned long addr) |
1da177e4 LT |
71 | { |
72 | if (addr >= (unsigned long)_stext && | |
5ecbe3c3 | 73 | addr < (unsigned long)_etext) |
1da177e4 LT |
74 | return 1; |
75 | ||
0594729c | 76 | if (system_state < SYSTEM_RUNNING && |
4a44bac1 | 77 | init_kernel_text(addr)) |
1da177e4 LT |
78 | return 1; |
79 | return 0; | |
80 | } | |
81 | ||
a2d063ac SR |
82 | /** |
83 | * core_kernel_data - tell if addr points to kernel data | |
84 | * @addr: address to test | |
85 | * | |
86 | * Returns true if @addr passed in is from the core kernel data | |
87 | * section. | |
88 | * | |
89 | * Note: On some archs it may return true for core RODATA, and false | |
90 | * for others. But will always be true for core RW data. | |
91 | */ | |
cdbe61bf SR |
92 | int core_kernel_data(unsigned long addr) |
93 | { | |
a2d063ac | 94 | if (addr >= (unsigned long)_sdata && |
cdbe61bf SR |
95 | addr < (unsigned long)_edata) |
96 | return 1; | |
97 | return 0; | |
98 | } | |
99 | ||
3861a17b | 100 | int __kernel_text_address(unsigned long addr) |
1da177e4 | 101 | { |
9aadde91 | 102 | if (kernel_text_address(addr)) |
74451e66 | 103 | return 1; |
4a44bac1 IM |
104 | /* |
105 | * There might be init symbols in saved stacktraces. | |
106 | * Give those symbols a chance to be printed in | |
107 | * backtraces (such as lockdep traces). | |
108 | * | |
109 | * Since we are after the module-symbols check, there's | |
110 | * no danger of address overlap: | |
111 | */ | |
112 | if (init_kernel_text(addr)) | |
113 | return 1; | |
114 | return 0; | |
1da177e4 LT |
115 | } |
116 | ||
117 | int kernel_text_address(unsigned long addr) | |
118 | { | |
e8cac8b1 SRV |
119 | bool no_rcu; |
120 | int ret = 1; | |
121 | ||
1da177e4 LT |
122 | if (core_kernel_text(addr)) |
123 | return 1; | |
e8cac8b1 SRV |
124 | |
125 | /* | |
126 | * If a stack dump happens while RCU is not watching, then | |
127 | * RCU needs to be notified that it requires to start | |
128 | * watching again. This can happen either by tracing that | |
129 | * triggers a stack trace, or a WARN() that happens during | |
130 | * coming back from idle, or cpu on or offlining. | |
131 | * | |
132 | * is_module_text_address() as well as the kprobe slots | |
133 | * and is_bpf_text_address() require RCU to be watching. | |
134 | */ | |
135 | no_rcu = !rcu_is_watching(); | |
136 | ||
137 | /* Treat this like an NMI as it can happen anywhere */ | |
138 | if (no_rcu) | |
139 | rcu_nmi_enter(); | |
140 | ||
aec0be2d | 141 | if (is_module_text_address(addr)) |
e8cac8b1 | 142 | goto out; |
5b485629 | 143 | if (is_ftrace_trampoline(addr)) |
e8cac8b1 | 144 | goto out; |
5b485629 | 145 | if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr)) |
e8cac8b1 | 146 | goto out; |
74451e66 | 147 | if (is_bpf_text_address(addr)) |
e8cac8b1 SRV |
148 | goto out; |
149 | ret = 0; | |
150 | out: | |
151 | if (no_rcu) | |
152 | rcu_nmi_exit(); | |
153 | ||
154 | return ret; | |
1da177e4 | 155 | } |
ab7476cf AV |
156 | |
157 | /* | |
158 | * On some architectures (PPC64, IA64) function pointers | |
159 | * are actually only tokens to some data that then holds the | |
160 | * real function address. As a result, to find if a function | |
161 | * pointer is part of the kernel text, we need to do some | |
162 | * special dereferencing first. | |
163 | */ | |
164 | int func_ptr_is_kernel_text(void *ptr) | |
165 | { | |
166 | unsigned long addr; | |
167 | addr = (unsigned long) dereference_function_descriptor(ptr); | |
168 | if (core_kernel_text(addr)) | |
169 | return 1; | |
a6e6abd5 | 170 | return is_module_text_address(addr); |
ab7476cf | 171 | } |