Commit | Line | Data |
---|---|---|
1a59d1b8 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
f71d20e9 | 2 | /* |
24b9f0d2 SS |
3 | * Copyright (C) 2002 Richard Henderson |
4 | * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. | |
46752820 | 5 | * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org> |
24b9f0d2 | 6 | */ |
51161bfc LR |
7 | |
8 | #define INCLUDE_VERMAGIC | |
9 | ||
9984de1a | 10 | #include <linux/export.h> |
8a293be0 | 11 | #include <linux/extable.h> |
1da177e4 | 12 | #include <linux/moduleloader.h> |
c8424e77 | 13 | #include <linux/module_signature.h> |
af658dca | 14 | #include <linux/trace_events.h> |
1da177e4 | 15 | #include <linux/init.h> |
ae84e324 | 16 | #include <linux/kallsyms.h> |
9294523e | 17 | #include <linux/buildid.h> |
3b5d5c6b | 18 | #include <linux/fs.h> |
9f158333 | 19 | #include <linux/kernel.h> |
b89999d0 | 20 | #include <linux/kernel_read_file.h> |
fbed4fea | 21 | #include <linux/kstrtox.h> |
1da177e4 LT |
22 | #include <linux/slab.h> |
23 | #include <linux/vmalloc.h> | |
24 | #include <linux/elf.h> | |
25 | #include <linux/seq_file.h> | |
26 | #include <linux/syscalls.h> | |
27 | #include <linux/fcntl.h> | |
28 | #include <linux/rcupdate.h> | |
c59ede7b | 29 | #include <linux/capability.h> |
1da177e4 LT |
30 | #include <linux/cpu.h> |
31 | #include <linux/moduleparam.h> | |
32 | #include <linux/errno.h> | |
33 | #include <linux/err.h> | |
34 | #include <linux/vermagic.h> | |
35 | #include <linux/notifier.h> | |
f6a57033 | 36 | #include <linux/sched.h> |
1da177e4 | 37 | #include <linux/device.h> |
c988d2b2 | 38 | #include <linux/string.h> |
97d1f15b | 39 | #include <linux/mutex.h> |
d72b3751 | 40 | #include <linux/rculist.h> |
7c0f6ba6 | 41 | #include <linux/uaccess.h> |
1da177e4 | 42 | #include <asm/cacheflush.h> |
563ec5cb | 43 | #include <linux/set_memory.h> |
eb8cdec4 | 44 | #include <asm/mmu_context.h> |
b817f6fe | 45 | #include <linux/license.h> |
6d762394 | 46 | #include <asm/sections.h> |
97e1c18e | 47 | #include <linux/tracepoint.h> |
90d595fe | 48 | #include <linux/ftrace.h> |
7e545d6e | 49 | #include <linux/livepatch.h> |
22a9d645 | 50 | #include <linux/async.h> |
fbf59bc9 | 51 | #include <linux/percpu.h> |
4f2294b6 | 52 | #include <linux/kmemleak.h> |
bf5438fc | 53 | #include <linux/jump_label.h> |
84e1c6bb | 54 | #include <linux/pfn.h> |
403ed278 | 55 | #include <linux/bsearch.h> |
9d5059c9 | 56 | #include <linux/dynamic_debug.h> |
ca86cad7 | 57 | #include <linux/audit.h> |
89245600 | 58 | #include <linux/cfi.h> |
df3e764d | 59 | #include <linux/debugfs.h> |
2f3238ae | 60 | #include <uapi/linux/module.h> |
cfc1d277 | 61 | #include "internal.h" |
1da177e4 | 62 | |
7ead8b83 LZ |
63 | #define CREATE_TRACE_POINTS |
64 | #include <trace/events/module.h> | |
65 | ||
75676500 RR |
66 | /* |
67 | * Mutex protects: | |
68 | * 1) List of modules (also safely readable with preempt_disable), | |
69 | * 2) module_use links, | |
55ce556d | 70 | * 3) mod_tree.addr_min/mod_tree.addr_max. |
24b9f0d2 SS |
71 | * (delete and add uses RCU list operations). |
72 | */ | |
8ab4ed08 AT |
73 | DEFINE_MUTEX(module_mutex); |
74 | LIST_HEAD(modules); | |
67fc4e0c | 75 | |
1a7b7d92 | 76 | /* Work queue for freeing init sections in success case */ |
fdf09ab8 DJ |
77 | static void do_free_init(struct work_struct *w); |
78 | static DECLARE_WORK(init_free_wq, do_free_init); | |
79 | static LLIST_HEAD(init_free_list); | |
1a7b7d92 | 80 | |
58d208de | 81 | struct mod_tree_root mod_tree __cacheline_aligned = { |
4f666546 | 82 | .addr_min = -1UL, |
106a4ee2 | 83 | }; |
106a4ee2 | 84 | |
47889798 AT |
85 | struct symsearch { |
86 | const struct kernel_symbol *start, *stop; | |
87 | const s32 *crcs; | |
88 | enum mod_license license; | |
89 | }; | |
90 | ||
4f666546 | 91 | /* |
ac3b4328 | 92 | * Bounds of module memory, for speeding up __module_address. |
4f666546 PZ |
93 | * Protected by module_mutex. |
94 | */ | |
ac3b4328 SL |
95 | static void __mod_update_bounds(enum mod_mem_type type __maybe_unused, void *base, |
96 | unsigned int size, struct mod_tree_root *tree) | |
4f666546 PZ |
97 | { |
98 | unsigned long min = (unsigned long)base; | |
99 | unsigned long max = min + size; | |
100 | ||
ac3b4328 SL |
101 | #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC |
102 | if (mod_mem_type_is_core_data(type)) { | |
103 | if (min < tree->data_addr_min) | |
104 | tree->data_addr_min = min; | |
105 | if (max > tree->data_addr_max) | |
106 | tree->data_addr_max = max; | |
107 | return; | |
108 | } | |
109 | #endif | |
446d5566 CL |
110 | if (min < tree->addr_min) |
111 | tree->addr_min = min; | |
112 | if (max > tree->addr_max) | |
113 | tree->addr_max = max; | |
4f666546 PZ |
114 | } |
115 | ||
116 | static void mod_update_bounds(struct module *mod) | |
117 | { | |
ac3b4328 SL |
118 | for_each_mod_mem_type(type) { |
119 | struct module_memory *mod_mem = &mod->mem[type]; | |
120 | ||
121 | if (mod_mem->size) | |
122 | __mod_update_bounds(type, mod_mem->base, mod_mem->size, &mod_tree); | |
123 | } | |
4f666546 PZ |
124 | } |
125 | ||
19e4529e | 126 | /* Block module loading/unloading? */ |
ecc726f1 | 127 | int modules_disabled; |
02608bef | 128 | core_param(nomodule, modules_disabled, bint, 0); |
19e4529e | 129 | |
c9a3ba55 RR |
130 | /* Waiting for a module to finish initializing? */ |
131 | static DECLARE_WAIT_QUEUE_HEAD(module_wq); | |
132 | ||
e041c683 | 133 | static BLOCKING_NOTIFIER_HEAD(module_notify_list); |
1da177e4 | 134 | |
6da0b565 | 135 | int register_module_notifier(struct notifier_block *nb) |
1da177e4 | 136 | { |
e041c683 | 137 | return blocking_notifier_chain_register(&module_notify_list, nb); |
1da177e4 LT |
138 | } |
139 | EXPORT_SYMBOL(register_module_notifier); | |
140 | ||
6da0b565 | 141 | int unregister_module_notifier(struct notifier_block *nb) |
1da177e4 | 142 | { |
e041c683 | 143 | return blocking_notifier_chain_unregister(&module_notify_list, nb); |
1da177e4 LT |
144 | } |
145 | EXPORT_SYMBOL(unregister_module_notifier); | |
146 | ||
71d9f507 MB |
147 | /* |
148 | * We require a truly strong try_module_get(): 0 means success. | |
149 | * Otherwise an error is returned due to ongoing or failed | |
150 | * initialization etc. | |
151 | */ | |
1da177e4 LT |
152 | static inline int strong_try_module_get(struct module *mod) |
153 | { | |
0d21b0e3 | 154 | BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); |
1da177e4 | 155 | if (mod && mod->state == MODULE_STATE_COMING) |
c9a3ba55 RR |
156 | return -EBUSY; |
157 | if (try_module_get(mod)) | |
1da177e4 | 158 | return 0; |
c9a3ba55 RR |
159 | else |
160 | return -ENOENT; | |
1da177e4 LT |
161 | } |
162 | ||
373d4d09 RR |
163 | static inline void add_taint_module(struct module *mod, unsigned flag, |
164 | enum lockdep_ok lockdep_ok) | |
fa3ba2e8 | 165 | { |
373d4d09 | 166 | add_taint(flag, lockdep_ok); |
7fd8329b | 167 | set_bit(flag, &mod->taints); |
fa3ba2e8 FM |
168 | } |
169 | ||
02a3e59a RD |
170 | /* |
171 | * A thread that wants to hold a reference to a module only while it | |
f49169c9 | 172 | * is running can call this to safely exit. |
1da177e4 | 173 | */ |
ca3574bd | 174 | void __noreturn __module_put_and_kthread_exit(struct module *mod, long code) |
1da177e4 LT |
175 | { |
176 | module_put(mod); | |
ca3574bd | 177 | kthread_exit(code); |
1da177e4 | 178 | } |
ca3574bd | 179 | EXPORT_SYMBOL(__module_put_and_kthread_exit); |
22a8bdeb | 180 | |
1da177e4 | 181 | /* Find a module section: 0 means not found. */ |
49668688 | 182 | static unsigned int find_sec(const struct load_info *info, const char *name) |
1da177e4 LT |
183 | { |
184 | unsigned int i; | |
185 | ||
49668688 RR |
186 | for (i = 1; i < info->hdr->e_shnum; i++) { |
187 | Elf_Shdr *shdr = &info->sechdrs[i]; | |
1da177e4 | 188 | /* Alloc bit cleared means "ignore it." */ |
49668688 RR |
189 | if ((shdr->sh_flags & SHF_ALLOC) |
190 | && strcmp(info->secstrings + shdr->sh_name, name) == 0) | |
1da177e4 | 191 | return i; |
49668688 | 192 | } |
1da177e4 LT |
193 | return 0; |
194 | } | |
195 | ||
5e458cc0 | 196 | /* Find a module section, or NULL. */ |
49668688 | 197 | static void *section_addr(const struct load_info *info, const char *name) |
5e458cc0 RR |
198 | { |
199 | /* Section 0 has sh_addr 0. */ | |
49668688 | 200 | return (void *)info->sechdrs[find_sec(info, name)].sh_addr; |
5e458cc0 RR |
201 | } |
202 | ||
203 | /* Find a module section, or NULL. Fill in number of "objects" in section. */ | |
49668688 | 204 | static void *section_objs(const struct load_info *info, |
5e458cc0 RR |
205 | const char *name, |
206 | size_t object_size, | |
207 | unsigned int *num) | |
208 | { | |
49668688 | 209 | unsigned int sec = find_sec(info, name); |
5e458cc0 RR |
210 | |
211 | /* Section 0 has sh_addr 0 and sh_size 0. */ | |
49668688 RR |
212 | *num = info->sechdrs[sec].sh_size / object_size; |
213 | return (void *)info->sechdrs[sec].sh_addr; | |
5e458cc0 RR |
214 | } |
215 | ||
36e68442 AN |
216 | /* Find a module section: 0 means not found. Ignores SHF_ALLOC flag. */ |
217 | static unsigned int find_any_sec(const struct load_info *info, const char *name) | |
218 | { | |
219 | unsigned int i; | |
220 | ||
221 | for (i = 1; i < info->hdr->e_shnum; i++) { | |
222 | Elf_Shdr *shdr = &info->sechdrs[i]; | |
223 | if (strcmp(info->secstrings + shdr->sh_name, name) == 0) | |
224 | return i; | |
225 | } | |
226 | return 0; | |
227 | } | |
228 | ||
229 | /* | |
230 | * Find a module section, or NULL. Fill in number of "objects" in section. | |
231 | * Ignores SHF_ALLOC flag. | |
232 | */ | |
233 | static __maybe_unused void *any_section_objs(const struct load_info *info, | |
234 | const char *name, | |
235 | size_t object_size, | |
236 | unsigned int *num) | |
237 | { | |
238 | unsigned int sec = find_any_sec(info, name); | |
239 | ||
240 | /* Section 0 has sh_addr 0 and sh_size 0. */ | |
241 | *num = info->sechdrs[sec].sh_size / object_size; | |
242 | return (void *)info->sechdrs[sec].sh_addr; | |
243 | } | |
244 | ||
1da177e4 LT |
245 | #ifndef CONFIG_MODVERSIONS |
246 | #define symversion(base, idx) NULL | |
247 | #else | |
f83ca9fe | 248 | #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) |
1da177e4 LT |
249 | #endif |
250 | ||
7290d580 AB |
251 | static const char *kernel_symbol_name(const struct kernel_symbol *sym) |
252 | { | |
253 | #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS | |
254 | return offset_to_ptr(&sym->name_offset); | |
255 | #else | |
256 | return sym->name; | |
257 | #endif | |
258 | } | |
259 | ||
8651ec01 MM |
260 | static const char *kernel_symbol_namespace(const struct kernel_symbol *sym) |
261 | { | |
262 | #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS | |
069e1c07 WD |
263 | if (!sym->namespace_offset) |
264 | return NULL; | |
8651ec01 MM |
265 | return offset_to_ptr(&sym->namespace_offset); |
266 | #else | |
267 | return sym->namespace; | |
268 | #endif | |
269 | } | |
270 | ||
91fb02f3 | 271 | int cmp_name(const void *name, const void *sym) |
403ed278 | 272 | { |
b605be65 | 273 | return strcmp(name, kernel_symbol_name(sym)); |
403ed278 AIB |
274 | } |
275 | ||
2d25bc55 JY |
276 | static bool find_exported_symbol_in_section(const struct symsearch *syms, |
277 | struct module *owner, | |
c6eee9df | 278 | struct find_symbol_arg *fsa) |
de4d8d53 | 279 | { |
403ed278 AIB |
280 | struct kernel_symbol *sym; |
281 | ||
cdd66eb5 MY |
282 | if (!fsa->gplok && syms->license == GPL_ONLY) |
283 | return false; | |
284 | ||
403ed278 AIB |
285 | sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, |
286 | sizeof(struct kernel_symbol), cmp_name); | |
7390b94a MY |
287 | if (!sym) |
288 | return false; | |
403ed278 | 289 | |
7390b94a MY |
290 | fsa->owner = owner; |
291 | fsa->crc = symversion(syms->crcs, sym - syms->start); | |
292 | fsa->sym = sym; | |
293 | fsa->license = syms->license; | |
de4d8d53 | 294 | |
7390b94a | 295 | return true; |
de4d8d53 RR |
296 | } |
297 | ||
24b9f0d2 SS |
298 | /* |
299 | * Find an exported symbol and return it, along with, (optional) crc and | |
300 | * (optional) module which owns it. Needs preempt disabled or module_mutex. | |
301 | */ | |
47889798 | 302 | bool find_symbol(struct find_symbol_arg *fsa) |
dafd0940 | 303 | { |
71e4b309 CH |
304 | static const struct symsearch arr[] = { |
305 | { __start___ksymtab, __stop___ksymtab, __start___kcrctab, | |
36794822 | 306 | NOT_GPL_ONLY }, |
71e4b309 CH |
307 | { __start___ksymtab_gpl, __stop___ksymtab_gpl, |
308 | __start___kcrctab_gpl, | |
36794822 | 309 | GPL_ONLY }, |
71e4b309 | 310 | }; |
71e4b309 CH |
311 | struct module *mod; |
312 | unsigned int i; | |
dafd0940 | 313 | |
71e4b309 | 314 | module_assert_mutex_or_preempt(); |
dafd0940 | 315 | |
71e4b309 | 316 | for (i = 0; i < ARRAY_SIZE(arr); i++) |
0b96615c CH |
317 | if (find_exported_symbol_in_section(&arr[i], NULL, fsa)) |
318 | return true; | |
71e4b309 CH |
319 | |
320 | list_for_each_entry_rcu(mod, &modules, list, | |
321 | lockdep_is_held(&module_mutex)) { | |
322 | struct symsearch arr[] = { | |
323 | { mod->syms, mod->syms + mod->num_syms, mod->crcs, | |
36794822 | 324 | NOT_GPL_ONLY }, |
71e4b309 CH |
325 | { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, |
326 | mod->gpl_crcs, | |
36794822 | 327 | GPL_ONLY }, |
71e4b309 CH |
328 | }; |
329 | ||
330 | if (mod->state == MODULE_STATE_UNFORMED) | |
331 | continue; | |
332 | ||
333 | for (i = 0; i < ARRAY_SIZE(arr); i++) | |
0b96615c CH |
334 | if (find_exported_symbol_in_section(&arr[i], mod, fsa)) |
335 | return true; | |
dafd0940 RR |
336 | } |
337 | ||
0b96615c CH |
338 | pr_debug("Failed to find symbol %s\n", fsa->name); |
339 | return false; | |
1da177e4 LT |
340 | } |
341 | ||
fe0d34d2 RR |
342 | /* |
343 | * Search for module by name: must hold module_mutex (or preempt disabled | |
344 | * for read-only access). | |
345 | */ | |
91fb02f3 AT |
346 | struct module *find_module_all(const char *name, size_t len, |
347 | bool even_unformed) | |
1da177e4 LT |
348 | { |
349 | struct module *mod; | |
350 | ||
fe0d34d2 | 351 | module_assert_mutex_or_preempt(); |
0be964be | 352 | |
bf08949c MH |
353 | list_for_each_entry_rcu(mod, &modules, list, |
354 | lockdep_is_held(&module_mutex)) { | |
0d21b0e3 RR |
355 | if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) |
356 | continue; | |
4f6de4d5 | 357 | if (strlen(mod->name) == len && !memcmp(mod->name, name, len)) |
1da177e4 LT |
358 | return mod; |
359 | } | |
360 | return NULL; | |
361 | } | |
0d21b0e3 RR |
362 | |
363 | struct module *find_module(const char *name) | |
364 | { | |
4f6de4d5 | 365 | return find_module_all(name, strlen(name), false); |
0d21b0e3 | 366 | } |
1da177e4 LT |
367 | |
368 | #ifdef CONFIG_SMP | |
fbf59bc9 | 369 | |
259354de | 370 | static inline void __percpu *mod_percpu(struct module *mod) |
fbf59bc9 | 371 | { |
259354de TH |
372 | return mod->percpu; |
373 | } | |
fbf59bc9 | 374 | |
9eb76d77 | 375 | static int percpu_modalloc(struct module *mod, struct load_info *info) |
259354de | 376 | { |
9eb76d77 RR |
377 | Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu]; |
378 | unsigned long align = pcpusec->sh_addralign; | |
379 | ||
380 | if (!pcpusec->sh_size) | |
381 | return 0; | |
382 | ||
fbf59bc9 | 383 | if (align > PAGE_SIZE) { |
bddb12b3 AM |
384 | pr_warn("%s: per-cpu alignment %li > %li\n", |
385 | mod->name, align, PAGE_SIZE); | |
fbf59bc9 TH |
386 | align = PAGE_SIZE; |
387 | } | |
388 | ||
9eb76d77 | 389 | mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align); |
259354de | 390 | if (!mod->percpu) { |
bddb12b3 AM |
391 | pr_warn("%s: Could not allocate %lu bytes percpu data\n", |
392 | mod->name, (unsigned long)pcpusec->sh_size); | |
259354de TH |
393 | return -ENOMEM; |
394 | } | |
9eb76d77 | 395 | mod->percpu_size = pcpusec->sh_size; |
259354de | 396 | return 0; |
fbf59bc9 TH |
397 | } |
398 | ||
259354de | 399 | static void percpu_modfree(struct module *mod) |
fbf59bc9 | 400 | { |
259354de | 401 | free_percpu(mod->percpu); |
fbf59bc9 TH |
402 | } |
403 | ||
49668688 | 404 | static unsigned int find_pcpusec(struct load_info *info) |
6b588c18 | 405 | { |
49668688 | 406 | return find_sec(info, ".data..percpu"); |
6b588c18 TH |
407 | } |
408 | ||
259354de TH |
409 | static void percpu_modcopy(struct module *mod, |
410 | const void *from, unsigned long size) | |
6b588c18 TH |
411 | { |
412 | int cpu; | |
413 | ||
414 | for_each_possible_cpu(cpu) | |
259354de | 415 | memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); |
6b588c18 TH |
416 | } |
417 | ||
383776fa | 418 | bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) |
10fad5e4 TH |
419 | { |
420 | struct module *mod; | |
421 | unsigned int cpu; | |
422 | ||
423 | preempt_disable(); | |
424 | ||
425 | list_for_each_entry_rcu(mod, &modules, list) { | |
0d21b0e3 RR |
426 | if (mod->state == MODULE_STATE_UNFORMED) |
427 | continue; | |
10fad5e4 TH |
428 | if (!mod->percpu_size) |
429 | continue; | |
430 | for_each_possible_cpu(cpu) { | |
431 | void *start = per_cpu_ptr(mod->percpu, cpu); | |
383776fa | 432 | void *va = (void *)addr; |
10fad5e4 | 433 | |
383776fa | 434 | if (va >= start && va < start + mod->percpu_size) { |
8ce371f9 | 435 | if (can_addr) { |
383776fa | 436 | *can_addr = (unsigned long) (va - start); |
8ce371f9 PZ |
437 | *can_addr += (unsigned long) |
438 | per_cpu_ptr(mod->percpu, | |
439 | get_boot_cpu_id()); | |
440 | } | |
10fad5e4 TH |
441 | preempt_enable(); |
442 | return true; | |
443 | } | |
444 | } | |
445 | } | |
446 | ||
447 | preempt_enable(); | |
448 | return false; | |
6b588c18 TH |
449 | } |
450 | ||
383776fa | 451 | /** |
24389b61 | 452 | * is_module_percpu_address() - test whether address is from module static percpu |
383776fa TG |
453 | * @addr: address to test |
454 | * | |
455 | * Test whether @addr belongs to module static percpu area. | |
456 | * | |
24389b61 | 457 | * Return: %true if @addr is from module static percpu area |
383776fa TG |
458 | */ |
459 | bool is_module_percpu_address(unsigned long addr) | |
460 | { | |
461 | return __is_module_percpu_address(addr, NULL); | |
462 | } | |
463 | ||
1da177e4 | 464 | #else /* ... !CONFIG_SMP */ |
6b588c18 | 465 | |
259354de | 466 | static inline void __percpu *mod_percpu(struct module *mod) |
1da177e4 LT |
467 | { |
468 | return NULL; | |
469 | } | |
9eb76d77 | 470 | static int percpu_modalloc(struct module *mod, struct load_info *info) |
259354de | 471 | { |
9eb76d77 RR |
472 | /* UP modules shouldn't have this section: ENOMEM isn't quite right */ |
473 | if (info->sechdrs[info->index.pcpu].sh_size != 0) | |
474 | return -ENOMEM; | |
475 | return 0; | |
259354de TH |
476 | } |
477 | static inline void percpu_modfree(struct module *mod) | |
1da177e4 | 478 | { |
1da177e4 | 479 | } |
49668688 | 480 | static unsigned int find_pcpusec(struct load_info *info) |
1da177e4 LT |
481 | { |
482 | return 0; | |
483 | } | |
259354de TH |
484 | static inline void percpu_modcopy(struct module *mod, |
485 | const void *from, unsigned long size) | |
1da177e4 LT |
486 | { |
487 | /* pcpusec should be 0, and size of that section should be 0. */ | |
488 | BUG_ON(size != 0); | |
489 | } | |
10fad5e4 TH |
490 | bool is_module_percpu_address(unsigned long addr) |
491 | { | |
492 | return false; | |
493 | } | |
6b588c18 | 494 | |
383776fa TG |
495 | bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) |
496 | { | |
497 | return false; | |
498 | } | |
499 | ||
1da177e4 LT |
500 | #endif /* CONFIG_SMP */ |
501 | ||
c988d2b2 MD |
502 | #define MODINFO_ATTR(field) \ |
503 | static void setup_modinfo_##field(struct module *mod, const char *s) \ | |
504 | { \ | |
505 | mod->field = kstrdup(s, GFP_KERNEL); \ | |
506 | } \ | |
507 | static ssize_t show_modinfo_##field(struct module_attribute *mattr, \ | |
4befb026 | 508 | struct module_kobject *mk, char *buffer) \ |
c988d2b2 | 509 | { \ |
cc56ded3 | 510 | return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \ |
c988d2b2 MD |
511 | } \ |
512 | static int modinfo_##field##_exists(struct module *mod) \ | |
513 | { \ | |
514 | return mod->field != NULL; \ | |
515 | } \ | |
516 | static void free_modinfo_##field(struct module *mod) \ | |
517 | { \ | |
22a8bdeb DW |
518 | kfree(mod->field); \ |
519 | mod->field = NULL; \ | |
c988d2b2 MD |
520 | } \ |
521 | static struct module_attribute modinfo_##field = { \ | |
7b595756 | 522 | .attr = { .name = __stringify(field), .mode = 0444 }, \ |
c988d2b2 MD |
523 | .show = show_modinfo_##field, \ |
524 | .setup = setup_modinfo_##field, \ | |
525 | .test = modinfo_##field##_exists, \ | |
526 | .free = free_modinfo_##field, \ | |
527 | }; | |
528 | ||
529 | MODINFO_ATTR(version); | |
530 | MODINFO_ATTR(srcversion); | |
531 | ||
6f1dae1d AT |
532 | static struct { |
533 | char name[MODULE_NAME_LEN + 1]; | |
534 | char taints[MODULE_FLAGS_BUF_SIZE]; | |
535 | } last_unloaded_module; | |
e14af7ee | 536 | |
03e88ae1 | 537 | #ifdef CONFIG_MODULE_UNLOAD |
eb0c5377 SR |
538 | |
539 | EXPORT_TRACEPOINT_SYMBOL(module_get); | |
540 | ||
e513cc1c MH |
541 | /* MODULE_REF_BASE is the base reference count by kmodule loader. */ |
542 | #define MODULE_REF_BASE 1 | |
543 | ||
1da177e4 | 544 | /* Init the unload section of the module. */ |
9f85a4bb | 545 | static int module_unload_init(struct module *mod) |
1da177e4 | 546 | { |
e513cc1c MH |
547 | /* |
548 | * Initialize reference counter to MODULE_REF_BASE. | |
549 | * refcnt == 0 means module is going. | |
550 | */ | |
551 | atomic_set(&mod->refcnt, MODULE_REF_BASE); | |
9f85a4bb | 552 | |
2c02dfe7 LT |
553 | INIT_LIST_HEAD(&mod->source_list); |
554 | INIT_LIST_HEAD(&mod->target_list); | |
e1783a24 | 555 | |
1da177e4 | 556 | /* Hold reference count during initialization. */ |
e513cc1c | 557 | atomic_inc(&mod->refcnt); |
9f85a4bb RR |
558 | |
559 | return 0; | |
1da177e4 LT |
560 | } |
561 | ||
1da177e4 LT |
562 | /* Does a already use b? */ |
563 | static int already_uses(struct module *a, struct module *b) | |
564 | { | |
565 | struct module_use *use; | |
566 | ||
2c02dfe7 | 567 | list_for_each_entry(use, &b->source_list, source_list) { |
33c951f6 | 568 | if (use->source == a) |
1da177e4 | 569 | return 1; |
1da177e4 | 570 | } |
5e124169 | 571 | pr_debug("%s does not use %s!\n", a->name, b->name); |
1da177e4 LT |
572 | return 0; |
573 | } | |
574 | ||
2c02dfe7 LT |
575 | /* |
576 | * Module a uses b | |
577 | * - we add 'a' as a "source", 'b' as a "target" of module use | |
578 | * - the module_use is added to the list of 'b' sources (so | |
579 | * 'b' can walk the list to see who sourced them), and of 'a' | |
580 | * targets (so 'a' can see what modules it targets). | |
581 | */ | |
582 | static int add_module_usage(struct module *a, struct module *b) | |
583 | { | |
2c02dfe7 LT |
584 | struct module_use *use; |
585 | ||
5e124169 | 586 | pr_debug("Allocating new usage for %s.\n", a->name); |
2c02dfe7 | 587 | use = kmalloc(sizeof(*use), GFP_ATOMIC); |
9ad04574 | 588 | if (!use) |
2c02dfe7 | 589 | return -ENOMEM; |
2c02dfe7 LT |
590 | |
591 | use->source = a; | |
592 | use->target = b; | |
593 | list_add(&use->source_list, &b->source_list); | |
594 | list_add(&use->target_list, &a->target_list); | |
2c02dfe7 LT |
595 | return 0; |
596 | } | |
597 | ||
75676500 | 598 | /* Module a uses b: caller needs module_mutex() */ |
7ef5264d | 599 | static int ref_module(struct module *a, struct module *b) |
1da177e4 | 600 | { |
c8e21ced | 601 | int err; |
270a6c4c | 602 | |
9bea7f23 | 603 | if (b == NULL || already_uses(a, b)) |
218ce735 | 604 | return 0; |
218ce735 | 605 | |
9bea7f23 RR |
606 | /* If module isn't available, we fail. */ |
607 | err = strong_try_module_get(b); | |
c9a3ba55 | 608 | if (err) |
9bea7f23 | 609 | return err; |
1da177e4 | 610 | |
2c02dfe7 LT |
611 | err = add_module_usage(a, b); |
612 | if (err) { | |
1da177e4 | 613 | module_put(b); |
9bea7f23 | 614 | return err; |
1da177e4 | 615 | } |
9bea7f23 | 616 | return 0; |
1da177e4 LT |
617 | } |
618 | ||
619 | /* Clear the unload stuff of the module. */ | |
620 | static void module_unload_free(struct module *mod) | |
621 | { | |
2c02dfe7 | 622 | struct module_use *use, *tmp; |
1da177e4 | 623 | |
75676500 | 624 | mutex_lock(&module_mutex); |
2c02dfe7 LT |
625 | list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) { |
626 | struct module *i = use->target; | |
5e124169 | 627 | pr_debug("%s unusing %s\n", mod->name, i->name); |
2c02dfe7 LT |
628 | module_put(i); |
629 | list_del(&use->source_list); | |
630 | list_del(&use->target_list); | |
631 | kfree(use); | |
1da177e4 | 632 | } |
75676500 | 633 | mutex_unlock(&module_mutex); |
1da177e4 LT |
634 | } |
635 | ||
636 | #ifdef CONFIG_MODULE_FORCE_UNLOAD | |
fb169793 | 637 | static inline int try_force_unload(unsigned int flags) |
1da177e4 LT |
638 | { |
639 | int ret = (flags & O_TRUNC); | |
640 | if (ret) | |
373d4d09 | 641 | add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE); |
1da177e4 LT |
642 | return ret; |
643 | } | |
644 | #else | |
fb169793 | 645 | static inline int try_force_unload(unsigned int flags) |
1da177e4 LT |
646 | { |
647 | return 0; | |
648 | } | |
649 | #endif /* CONFIG_MODULE_FORCE_UNLOAD */ | |
650 | ||
e513cc1c MH |
651 | /* Try to release refcount of module, 0 means success. */ |
652 | static int try_release_module_ref(struct module *mod) | |
1da177e4 | 653 | { |
e513cc1c | 654 | int ret; |
1da177e4 | 655 | |
e513cc1c MH |
656 | /* Try to decrement refcnt which we set at loading */ |
657 | ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt); | |
658 | BUG_ON(ret < 0); | |
659 | if (ret) | |
660 | /* Someone can put this right now, recover with checking */ | |
661 | ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0); | |
1da177e4 | 662 | |
e513cc1c MH |
663 | return ret; |
664 | } | |
1da177e4 | 665 | |
e513cc1c MH |
666 | static int try_stop_module(struct module *mod, int flags, int *forced) |
667 | { | |
da39ba5e | 668 | /* If it's not unused, quit unless we're forcing. */ |
e513cc1c MH |
669 | if (try_release_module_ref(mod) != 0) { |
670 | *forced = try_force_unload(flags); | |
671 | if (!(*forced)) | |
1da177e4 LT |
672 | return -EWOULDBLOCK; |
673 | } | |
674 | ||
675 | /* Mark it as dying. */ | |
e513cc1c | 676 | mod->state = MODULE_STATE_GOING; |
1da177e4 | 677 | |
e513cc1c | 678 | return 0; |
1da177e4 LT |
679 | } |
680 | ||
d5db139a | 681 | /** |
24389b61 | 682 | * module_refcount() - return the refcount or -1 if unloading |
d5db139a RR |
683 | * @mod: the module we're checking |
684 | * | |
24389b61 | 685 | * Return: |
d5db139a RR |
686 | * -1 if the module is in the process of unloading |
687 | * otherwise the number of references in the kernel to the module | |
688 | */ | |
689 | int module_refcount(struct module *mod) | |
1da177e4 | 690 | { |
d5db139a | 691 | return atomic_read(&mod->refcnt) - MODULE_REF_BASE; |
1da177e4 LT |
692 | } |
693 | EXPORT_SYMBOL(module_refcount); | |
694 | ||
695 | /* This exists whether we can unload or not */ | |
696 | static void free_module(struct module *mod); | |
697 | ||
17da2bd9 HC |
698 | SYSCALL_DEFINE2(delete_module, const char __user *, name_user, |
699 | unsigned int, flags) | |
1da177e4 LT |
700 | { |
701 | struct module *mod; | |
dfff0a06 | 702 | char name[MODULE_NAME_LEN]; |
6f1dae1d | 703 | char buf[MODULE_FLAGS_BUF_SIZE]; |
1da177e4 LT |
704 | int ret, forced = 0; |
705 | ||
3d43321b | 706 | if (!capable(CAP_SYS_MODULE) || modules_disabled) |
dfff0a06 GKH |
707 | return -EPERM; |
708 | ||
709 | if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0) | |
710 | return -EFAULT; | |
711 | name[MODULE_NAME_LEN-1] = '\0'; | |
712 | ||
f6276ac9 RGB |
713 | audit_log_kern_module(name); |
714 | ||
3fc1f1e2 TH |
715 | if (mutex_lock_interruptible(&module_mutex) != 0) |
716 | return -EINTR; | |
1da177e4 LT |
717 | |
718 | mod = find_module(name); | |
719 | if (!mod) { | |
720 | ret = -ENOENT; | |
721 | goto out; | |
722 | } | |
723 | ||
2c02dfe7 | 724 | if (!list_empty(&mod->source_list)) { |
1da177e4 LT |
725 | /* Other modules depend on us: get rid of them first. */ |
726 | ret = -EWOULDBLOCK; | |
727 | goto out; | |
728 | } | |
729 | ||
730 | /* Doing init or already dying? */ | |
731 | if (mod->state != MODULE_STATE_LIVE) { | |
3f2b9c9c | 732 | /* FIXME: if (force), slam module count damn the torpedoes */ |
5e124169 | 733 | pr_debug("%s already dying\n", mod->name); |
1da177e4 LT |
734 | ret = -EBUSY; |
735 | goto out; | |
736 | } | |
737 | ||
738 | /* If it has an init func, it must have an exit func to unload */ | |
af49d924 | 739 | if (mod->init && !mod->exit) { |
fb169793 | 740 | forced = try_force_unload(flags); |
1da177e4 LT |
741 | if (!forced) { |
742 | /* This module can't be removed */ | |
743 | ret = -EBUSY; | |
744 | goto out; | |
745 | } | |
746 | } | |
747 | ||
1da177e4 LT |
748 | ret = try_stop_module(mod, flags, &forced); |
749 | if (ret != 0) | |
750 | goto out; | |
751 | ||
df4b565e | 752 | mutex_unlock(&module_mutex); |
25985edc | 753 | /* Final destruction now no one is using it. */ |
df4b565e | 754 | if (mod->exit != NULL) |
1da177e4 | 755 | mod->exit(); |
df4b565e PO |
756 | blocking_notifier_call_chain(&module_notify_list, |
757 | MODULE_STATE_GOING, mod); | |
7e545d6e | 758 | klp_module_going(mod); |
7dcd182b JY |
759 | ftrace_release_mod(mod); |
760 | ||
22a9d645 | 761 | async_synchronize_full(); |
75676500 | 762 | |
6f1dae1d AT |
763 | /* Store the name and taints of the last unloaded module for diagnostic purposes */ |
764 | strscpy(last_unloaded_module.name, mod->name, sizeof(last_unloaded_module.name)); | |
765 | strscpy(last_unloaded_module.taints, module_flags(mod, buf, false), sizeof(last_unloaded_module.taints)); | |
1da177e4 | 766 | |
75676500 | 767 | free_module(mod); |
5d603311 KK |
768 | /* someone could wait for the module in add_unformed_module() */ |
769 | wake_up_all(&module_wq); | |
75676500 RR |
770 | return 0; |
771 | out: | |
6389a385 | 772 | mutex_unlock(&module_mutex); |
1da177e4 LT |
773 | return ret; |
774 | } | |
775 | ||
1da177e4 LT |
776 | void __symbol_put(const char *symbol) |
777 | { | |
0b96615c CH |
778 | struct find_symbol_arg fsa = { |
779 | .name = symbol, | |
780 | .gplok = true, | |
781 | }; | |
1da177e4 | 782 | |
24da1cbf | 783 | preempt_disable(); |
02b2fb45 | 784 | BUG_ON(!find_symbol(&fsa)); |
0b96615c | 785 | module_put(fsa.owner); |
24da1cbf | 786 | preempt_enable(); |
1da177e4 LT |
787 | } |
788 | EXPORT_SYMBOL(__symbol_put); | |
789 | ||
7d1d16e4 | 790 | /* Note this assumes addr is a function, which it currently always is. */ |
1da177e4 LT |
791 | void symbol_put_addr(void *addr) |
792 | { | |
5e376613 | 793 | struct module *modaddr; |
7d1d16e4 | 794 | unsigned long a = (unsigned long)dereference_function_descriptor(addr); |
1da177e4 | 795 | |
7d1d16e4 | 796 | if (core_kernel_text(a)) |
5e376613 | 797 | return; |
1da177e4 | 798 | |
275d7d44 PZ |
799 | /* |
800 | * Even though we hold a reference on the module; we still need to | |
801 | * disable preemption in order to safely traverse the data structure. | |
802 | */ | |
803 | preempt_disable(); | |
7d1d16e4 | 804 | modaddr = __module_text_address(a); |
a6e6abd5 | 805 | BUG_ON(!modaddr); |
5e376613 | 806 | module_put(modaddr); |
275d7d44 | 807 | preempt_enable(); |
1da177e4 LT |
808 | } |
809 | EXPORT_SYMBOL_GPL(symbol_put_addr); | |
810 | ||
811 | static ssize_t show_refcnt(struct module_attribute *mattr, | |
4befb026 | 812 | struct module_kobject *mk, char *buffer) |
1da177e4 | 813 | { |
d5db139a | 814 | return sprintf(buffer, "%i\n", module_refcount(mk->mod)); |
1da177e4 LT |
815 | } |
816 | ||
cca3e707 KS |
817 | static struct module_attribute modinfo_refcnt = |
818 | __ATTR(refcnt, 0444, show_refcnt, NULL); | |
1da177e4 | 819 | |
d53799be SR |
820 | void __module_get(struct module *module) |
821 | { | |
822 | if (module) { | |
2f35c41f | 823 | atomic_inc(&module->refcnt); |
d53799be | 824 | trace_module_get(module, _RET_IP_); |
d53799be SR |
825 | } |
826 | } | |
827 | EXPORT_SYMBOL(__module_get); | |
828 | ||
829 | bool try_module_get(struct module *module) | |
830 | { | |
831 | bool ret = true; | |
832 | ||
833 | if (module) { | |
e513cc1c MH |
834 | /* Note: here, we can fail to get a reference */ |
835 | if (likely(module_is_live(module) && | |
836 | atomic_inc_not_zero(&module->refcnt) != 0)) | |
d53799be | 837 | trace_module_get(module, _RET_IP_); |
e513cc1c | 838 | else |
d53799be | 839 | ret = false; |
d53799be SR |
840 | } |
841 | return ret; | |
842 | } | |
843 | EXPORT_SYMBOL(try_module_get); | |
844 | ||
f6a57033 AV |
845 | void module_put(struct module *module) |
846 | { | |
e513cc1c MH |
847 | int ret; |
848 | ||
f6a57033 | 849 | if (module) { |
e513cc1c MH |
850 | ret = atomic_dec_if_positive(&module->refcnt); |
851 | WARN_ON(ret < 0); /* Failed to put refcount */ | |
ae832d1e | 852 | trace_module_put(module, _RET_IP_); |
f6a57033 AV |
853 | } |
854 | } | |
855 | EXPORT_SYMBOL(module_put); | |
856 | ||
1da177e4 | 857 | #else /* !CONFIG_MODULE_UNLOAD */ |
1da177e4 LT |
858 | static inline void module_unload_free(struct module *mod) |
859 | { | |
860 | } | |
861 | ||
7ef5264d | 862 | static int ref_module(struct module *a, struct module *b) |
1da177e4 | 863 | { |
9bea7f23 | 864 | return strong_try_module_get(b); |
1da177e4 LT |
865 | } |
866 | ||
9f85a4bb | 867 | static inline int module_unload_init(struct module *mod) |
1da177e4 | 868 | { |
9f85a4bb | 869 | return 0; |
1da177e4 LT |
870 | } |
871 | #endif /* CONFIG_MODULE_UNLOAD */ | |
872 | ||
c14e522b | 873 | size_t module_flags_taint(unsigned long taints, char *buf) |
53999bf3 KW |
874 | { |
875 | size_t l = 0; | |
7fd8329b PM |
876 | int i; |
877 | ||
878 | for (i = 0; i < TAINT_FLAGS_COUNT; i++) { | |
c14e522b | 879 | if (taint_flags[i].module && test_bit(i, &taints)) |
5eb7c0d0 | 880 | buf[l++] = taint_flags[i].c_true; |
7fd8329b | 881 | } |
53999bf3 | 882 | |
53999bf3 KW |
883 | return l; |
884 | } | |
885 | ||
1f71740a | 886 | static ssize_t show_initstate(struct module_attribute *mattr, |
4befb026 | 887 | struct module_kobject *mk, char *buffer) |
1f71740a KS |
888 | { |
889 | const char *state = "unknown"; | |
890 | ||
4befb026 | 891 | switch (mk->mod->state) { |
1f71740a KS |
892 | case MODULE_STATE_LIVE: |
893 | state = "live"; | |
894 | break; | |
895 | case MODULE_STATE_COMING: | |
896 | state = "coming"; | |
897 | break; | |
898 | case MODULE_STATE_GOING: | |
899 | state = "going"; | |
900 | break; | |
0d21b0e3 RR |
901 | default: |
902 | BUG(); | |
1f71740a KS |
903 | } |
904 | return sprintf(buffer, "%s\n", state); | |
905 | } | |
906 | ||
cca3e707 KS |
907 | static struct module_attribute modinfo_initstate = |
908 | __ATTR(initstate, 0444, show_initstate, NULL); | |
1f71740a | 909 | |
88bfa324 KS |
910 | static ssize_t store_uevent(struct module_attribute *mattr, |
911 | struct module_kobject *mk, | |
912 | const char *buffer, size_t count) | |
913 | { | |
df44b479 PR |
914 | int rc; |
915 | ||
916 | rc = kobject_synth_uevent(&mk->kobj, buffer, count); | |
917 | return rc ? rc : count; | |
88bfa324 KS |
918 | } |
919 | ||
cca3e707 KS |
920 | struct module_attribute module_uevent = |
921 | __ATTR(uevent, 0200, NULL, store_uevent); | |
922 | ||
923 | static ssize_t show_coresize(struct module_attribute *mattr, | |
924 | struct module_kobject *mk, char *buffer) | |
925 | { | |
ac3b4328 SL |
926 | unsigned int size = mk->mod->mem[MOD_TEXT].size; |
927 | ||
928 | if (!IS_ENABLED(CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC)) { | |
929 | for_class_mod_mem_type(type, core_data) | |
930 | size += mk->mod->mem[type].size; | |
931 | } | |
932 | return sprintf(buffer, "%u\n", size); | |
cca3e707 KS |
933 | } |
934 | ||
935 | static struct module_attribute modinfo_coresize = | |
936 | __ATTR(coresize, 0444, show_coresize, NULL); | |
937 | ||
01dc0386 CL |
938 | #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC |
939 | static ssize_t show_datasize(struct module_attribute *mattr, | |
940 | struct module_kobject *mk, char *buffer) | |
941 | { | |
ac3b4328 SL |
942 | unsigned int size = 0; |
943 | ||
944 | for_class_mod_mem_type(type, core_data) | |
945 | size += mk->mod->mem[type].size; | |
946 | return sprintf(buffer, "%u\n", size); | |
01dc0386 CL |
947 | } |
948 | ||
949 | static struct module_attribute modinfo_datasize = | |
950 | __ATTR(datasize, 0444, show_datasize, NULL); | |
951 | #endif | |
952 | ||
cca3e707 KS |
953 | static ssize_t show_initsize(struct module_attribute *mattr, |
954 | struct module_kobject *mk, char *buffer) | |
955 | { | |
ac3b4328 SL |
956 | unsigned int size = 0; |
957 | ||
958 | for_class_mod_mem_type(type, init) | |
959 | size += mk->mod->mem[type].size; | |
960 | return sprintf(buffer, "%u\n", size); | |
cca3e707 KS |
961 | } |
962 | ||
963 | static struct module_attribute modinfo_initsize = | |
964 | __ATTR(initsize, 0444, show_initsize, NULL); | |
965 | ||
966 | static ssize_t show_taint(struct module_attribute *mattr, | |
967 | struct module_kobject *mk, char *buffer) | |
968 | { | |
969 | size_t l; | |
970 | ||
c14e522b | 971 | l = module_flags_taint(mk->mod->taints, buffer); |
cca3e707 KS |
972 | buffer[l++] = '\n'; |
973 | return l; | |
974 | } | |
975 | ||
976 | static struct module_attribute modinfo_taint = | |
977 | __ATTR(taint, 0444, show_taint, NULL); | |
88bfa324 | 978 | |
44c09535 | 979 | struct module_attribute *modinfo_attrs[] = { |
cca3e707 | 980 | &module_uevent, |
03e88ae1 GKH |
981 | &modinfo_version, |
982 | &modinfo_srcversion, | |
cca3e707 KS |
983 | &modinfo_initstate, |
984 | &modinfo_coresize, | |
01dc0386 CL |
985 | #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC |
986 | &modinfo_datasize, | |
987 | #endif | |
cca3e707 KS |
988 | &modinfo_initsize, |
989 | &modinfo_taint, | |
03e88ae1 | 990 | #ifdef CONFIG_MODULE_UNLOAD |
cca3e707 | 991 | &modinfo_refcnt, |
03e88ae1 GKH |
992 | #endif |
993 | NULL, | |
994 | }; | |
995 | ||
44c09535 AT |
996 | size_t modinfo_attrs_count = ARRAY_SIZE(modinfo_attrs); |
997 | ||
1da177e4 LT |
998 | static const char vermagic[] = VERMAGIC_STRING; |
999 | ||
47889798 | 1000 | int try_to_force_load(struct module *mod, const char *reason) |
826e4506 LT |
1001 | { |
1002 | #ifdef CONFIG_MODULE_FORCE_LOAD | |
25ddbb18 | 1003 | if (!test_taint(TAINT_FORCED_MODULE)) |
bddb12b3 | 1004 | pr_warn("%s: %s: kernel tainted.\n", mod->name, reason); |
373d4d09 | 1005 | add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE); |
826e4506 LT |
1006 | return 0; |
1007 | #else | |
1008 | return -ENOEXEC; | |
1009 | #endif | |
1010 | } | |
1011 | ||
b66973b8 | 1012 | /* Parse tag=value strings from .modinfo section */ |
feb5b784 | 1013 | char *module_next_tag_pair(char *string, unsigned long *secsize) |
b66973b8 LC |
1014 | { |
1015 | /* Skip non-zero chars */ | |
1016 | while (string[0]) { | |
1017 | string++; | |
1018 | if ((*secsize)-- <= 1) | |
1019 | return NULL; | |
1020 | } | |
1021 | ||
1022 | /* Skip any zero padding. */ | |
1023 | while (!string[0]) { | |
1024 | string++; | |
1025 | if ((*secsize)-- <= 1) | |
1026 | return NULL; | |
1027 | } | |
1028 | return string; | |
1029 | } | |
1030 | ||
8651ec01 | 1031 | static char *get_next_modinfo(const struct load_info *info, const char *tag, |
b66973b8 LC |
1032 | char *prev) |
1033 | { | |
1034 | char *p; | |
1035 | unsigned int taglen = strlen(tag); | |
1036 | Elf_Shdr *infosec = &info->sechdrs[info->index.info]; | |
1037 | unsigned long size = infosec->sh_size; | |
1038 | ||
1039 | /* | |
1040 | * get_modinfo() calls made before rewrite_section_headers() | |
1041 | * must use sh_offset, as sh_addr isn't set! | |
1042 | */ | |
1043 | char *modinfo = (char *)info->hdr + infosec->sh_offset; | |
1044 | ||
1045 | if (prev) { | |
1046 | size -= prev - modinfo; | |
feb5b784 | 1047 | modinfo = module_next_tag_pair(prev, &size); |
b66973b8 LC |
1048 | } |
1049 | ||
feb5b784 | 1050 | for (p = modinfo; p; p = module_next_tag_pair(p, &size)) { |
b66973b8 LC |
1051 | if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=') |
1052 | return p + taglen + 1; | |
1053 | } | |
1054 | return NULL; | |
1055 | } | |
1056 | ||
1057 | static char *get_modinfo(const struct load_info *info, const char *tag) | |
1058 | { | |
1059 | return get_next_modinfo(info, tag, NULL); | |
1060 | } | |
8651ec01 MM |
1061 | |
1062 | static int verify_namespace_is_imported(const struct load_info *info, | |
1063 | const struct kernel_symbol *sym, | |
1064 | struct module *mod) | |
1065 | { | |
1066 | const char *namespace; | |
1067 | char *imported_namespace; | |
1068 | ||
1069 | namespace = kernel_symbol_namespace(sym); | |
c3a6cf19 | 1070 | if (namespace && namespace[0]) { |
1e684172 | 1071 | for_each_modinfo_entry(imported_namespace, info, "import_ns") { |
8651ec01 MM |
1072 | if (strcmp(namespace, imported_namespace) == 0) |
1073 | return 0; | |
8651ec01 | 1074 | } |
3d52ec5e MM |
1075 | #ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS |
1076 | pr_warn( | |
1077 | #else | |
1078 | pr_err( | |
1079 | #endif | |
1080 | "%s: module uses symbol (%s) from namespace %s, but does not import it.\n", | |
1081 | mod->name, kernel_symbol_name(sym), namespace); | |
1082 | #ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS | |
8651ec01 | 1083 | return -EINVAL; |
3d52ec5e | 1084 | #endif |
8651ec01 MM |
1085 | } |
1086 | return 0; | |
1087 | } | |
1088 | ||
8eac910a | 1089 | static bool inherit_taint(struct module *mod, struct module *owner, const char *name) |
262e6ae7 CH |
1090 | { |
1091 | if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints)) | |
1092 | return true; | |
1093 | ||
1094 | if (mod->using_gplonly_symbols) { | |
8eac910a LC |
1095 | pr_err("%s: module using GPL-only symbols uses symbols %s from proprietary module %s.\n", |
1096 | mod->name, name, owner->name); | |
262e6ae7 CH |
1097 | return false; |
1098 | } | |
1099 | ||
1100 | if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) { | |
8eac910a LC |
1101 | pr_warn("%s: module uses symbols %s from proprietary module %s, inheriting taint.\n", |
1102 | mod->name, name, owner->name); | |
262e6ae7 CH |
1103 | set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints); |
1104 | } | |
1105 | return true; | |
1106 | } | |
8651ec01 | 1107 | |
75676500 | 1108 | /* Resolve a symbol for this module. I.e. if we find one, record usage. */ |
49668688 RR |
1109 | static const struct kernel_symbol *resolve_symbol(struct module *mod, |
1110 | const struct load_info *info, | |
414fd31b | 1111 | const char *name, |
9bea7f23 | 1112 | char ownername[]) |
1da177e4 | 1113 | { |
0b96615c CH |
1114 | struct find_symbol_arg fsa = { |
1115 | .name = name, | |
1116 | .gplok = !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), | |
1117 | .warn = true, | |
1118 | }; | |
9bea7f23 | 1119 | int err; |
1da177e4 | 1120 | |
d64810f5 PZ |
1121 | /* |
1122 | * The module_mutex should not be a heavily contended lock; | |
1123 | * if we get the occasional sleep here, we'll go an extra iteration | |
1124 | * in the wait_event_interruptible(), which is harmless. | |
1125 | */ | |
1126 | sched_annotate_sleep(); | |
75676500 | 1127 | mutex_lock(&module_mutex); |
0b96615c | 1128 | if (!find_symbol(&fsa)) |
9bea7f23 RR |
1129 | goto unlock; |
1130 | ||
0b96615c | 1131 | if (fsa.license == GPL_ONLY) |
262e6ae7 CH |
1132 | mod->using_gplonly_symbols = true; |
1133 | ||
8eac910a | 1134 | if (!inherit_taint(mod, fsa.owner, name)) { |
0b96615c | 1135 | fsa.sym = NULL; |
262e6ae7 CH |
1136 | goto getname; |
1137 | } | |
1138 | ||
0b96615c CH |
1139 | if (!check_version(info, name, mod, fsa.crc)) { |
1140 | fsa.sym = ERR_PTR(-EINVAL); | |
9bea7f23 | 1141 | goto getname; |
1da177e4 | 1142 | } |
9bea7f23 | 1143 | |
0b96615c | 1144 | err = verify_namespace_is_imported(info, fsa.sym, mod); |
8651ec01 | 1145 | if (err) { |
0b96615c | 1146 | fsa.sym = ERR_PTR(err); |
8651ec01 MM |
1147 | goto getname; |
1148 | } | |
1149 | ||
0b96615c | 1150 | err = ref_module(mod, fsa.owner); |
9bea7f23 | 1151 | if (err) { |
0b96615c | 1152 | fsa.sym = ERR_PTR(err); |
9bea7f23 RR |
1153 | goto getname; |
1154 | } | |
1155 | ||
1156 | getname: | |
1157 | /* We must make copy under the lock if we failed to get ref. */ | |
0b96615c | 1158 | strncpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN); |
9bea7f23 | 1159 | unlock: |
75676500 | 1160 | mutex_unlock(&module_mutex); |
0b96615c | 1161 | return fsa.sym; |
1da177e4 LT |
1162 | } |
1163 | ||
49668688 RR |
1164 | static const struct kernel_symbol * |
1165 | resolve_symbol_wait(struct module *mod, | |
1166 | const struct load_info *info, | |
1167 | const char *name) | |
9bea7f23 RR |
1168 | { |
1169 | const struct kernel_symbol *ksym; | |
49668688 | 1170 | char owner[MODULE_NAME_LEN]; |
9bea7f23 RR |
1171 | |
1172 | if (wait_event_interruptible_timeout(module_wq, | |
49668688 RR |
1173 | !IS_ERR(ksym = resolve_symbol(mod, info, name, owner)) |
1174 | || PTR_ERR(ksym) != -EBUSY, | |
9bea7f23 | 1175 | 30 * HZ) <= 0) { |
bddb12b3 AM |
1176 | pr_warn("%s: gave up waiting for init of module %s.\n", |
1177 | mod->name, owner); | |
9bea7f23 RR |
1178 | } |
1179 | return ksym; | |
1180 | } | |
1181 | ||
be1f221c | 1182 | void __weak module_memfree(void *module_region) |
74e08fcf | 1183 | { |
1a7b7d92 RE |
1184 | /* |
1185 | * This memory may be RO, and freeing RO memory in an interrupt is not | |
1186 | * supported by vmalloc. | |
1187 | */ | |
1188 | WARN_ON(in_interrupt()); | |
74e08fcf JB |
1189 | vfree(module_region); |
1190 | } | |
1191 | ||
1192 | void __weak module_arch_cleanup(struct module *mod) | |
1193 | { | |
1194 | } | |
1195 | ||
d453cded RR |
1196 | void __weak module_arch_freeing_init(struct module *mod) |
1197 | { | |
1198 | } | |
1199 | ||
ac3b4328 SL |
1200 | static bool mod_mem_use_vmalloc(enum mod_mem_type type) |
1201 | { | |
1202 | return IS_ENABLED(CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC) && | |
1203 | mod_mem_type_is_core_data(type); | |
1204 | } | |
1205 | ||
1206 | static void *module_memory_alloc(unsigned int size, enum mod_mem_type type) | |
1207 | { | |
1208 | if (mod_mem_use_vmalloc(type)) | |
1209 | return vzalloc(size); | |
1210 | return module_alloc(size); | |
1211 | } | |
1212 | ||
1213 | static void module_memory_free(void *ptr, enum mod_mem_type type) | |
1214 | { | |
1215 | if (mod_mem_use_vmalloc(type)) | |
1216 | vfree(ptr); | |
1217 | else | |
1218 | module_memfree(ptr); | |
1219 | } | |
1220 | ||
1221 | static void free_mod_mem(struct module *mod) | |
1222 | { | |
1223 | for_each_mod_mem_type(type) { | |
1224 | struct module_memory *mod_mem = &mod->mem[type]; | |
1225 | ||
1226 | if (type == MOD_DATA) | |
1227 | continue; | |
1228 | ||
1229 | /* Free lock-classes; relies on the preceding sync_rcu(). */ | |
1230 | lockdep_free_key_range(mod_mem->base, mod_mem->size); | |
1231 | if (mod_mem->size) | |
1232 | module_memory_free(mod_mem->base, type); | |
1233 | } | |
1234 | ||
1235 | /* MOD_DATA hosts mod, so free it at last */ | |
1236 | lockdep_free_key_range(mod->mem[MOD_DATA].base, mod->mem[MOD_DATA].size); | |
1237 | module_memory_free(mod->mem[MOD_DATA].base, MOD_DATA); | |
1238 | } | |
1239 | ||
75676500 | 1240 | /* Free a module, remove from lists, etc. */ |
1da177e4 LT |
1241 | static void free_module(struct module *mod) |
1242 | { | |
7ead8b83 LZ |
1243 | trace_module_free(mod); |
1244 | ||
36b0360d | 1245 | mod_sysfs_teardown(mod); |
1da177e4 | 1246 | |
24b9f0d2 SS |
1247 | /* |
1248 | * We leave it in list to prevent duplicate loads, but make sure | |
1249 | * that noone uses it while it's being deconstructed. | |
1250 | */ | |
d3051b48 | 1251 | mutex_lock(&module_mutex); |
944a1fa0 | 1252 | mod->state = MODULE_STATE_UNFORMED; |
d3051b48 | 1253 | mutex_unlock(&module_mutex); |
944a1fa0 | 1254 | |
1da177e4 LT |
1255 | /* Arch-specific cleanup. */ |
1256 | module_arch_cleanup(mod); | |
1257 | ||
1258 | /* Module unload stuff */ | |
1259 | module_unload_free(mod); | |
1260 | ||
e180a6b7 RR |
1261 | /* Free any allocated parameters. */ |
1262 | destroy_params(mod->kp, mod->num_kp); | |
1263 | ||
1ce15ef4 JY |
1264 | if (is_livepatch_module(mod)) |
1265 | free_module_elf(mod); | |
1266 | ||
944a1fa0 RR |
1267 | /* Now we can delete it from the lists */ |
1268 | mutex_lock(&module_mutex); | |
461e34ae MH |
1269 | /* Unlink carefully: kallsyms could be walking list. */ |
1270 | list_del_rcu(&mod->list); | |
93c2e105 | 1271 | mod_tree_remove(mod); |
0286b5ea | 1272 | /* Remove this module from bug list, this uses list_del_rcu */ |
461e34ae | 1273 | module_bug_cleanup(mod); |
0be964be | 1274 | /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ |
cb2f5536 | 1275 | synchronize_rcu(); |
99bd9956 AT |
1276 | if (try_add_tainted_module(mod)) |
1277 | pr_err("%s: adding tainted module to the unloaded tainted modules list failed.\n", | |
1278 | mod->name); | |
944a1fa0 RR |
1279 | mutex_unlock(&module_mutex); |
1280 | ||
85c898db | 1281 | /* This may be empty, but that's OK */ |
d453cded | 1282 | module_arch_freeing_init(mod); |
1da177e4 | 1283 | kfree(mod->args); |
259354de | 1284 | percpu_modfree(mod); |
9f85a4bb | 1285 | |
ac3b4328 | 1286 | free_mod_mem(mod); |
1da177e4 LT |
1287 | } |
1288 | ||
1289 | void *__symbol_get(const char *symbol) | |
1290 | { | |
0b96615c CH |
1291 | struct find_symbol_arg fsa = { |
1292 | .name = symbol, | |
1293 | .gplok = true, | |
1294 | .warn = true, | |
1295 | }; | |
1da177e4 | 1296 | |
24da1cbf | 1297 | preempt_disable(); |
9011e49d CH |
1298 | if (!find_symbol(&fsa)) |
1299 | goto fail; | |
1300 | if (fsa.license != GPL_ONLY) { | |
1301 | pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n", | |
1302 | symbol); | |
1303 | goto fail; | |
0b96615c | 1304 | } |
9011e49d CH |
1305 | if (strong_try_module_get(fsa.owner)) |
1306 | goto fail; | |
24da1cbf | 1307 | preempt_enable(); |
0b96615c | 1308 | return (void *)kernel_symbol_value(fsa.sym); |
9011e49d CH |
1309 | fail: |
1310 | preempt_enable(); | |
1311 | return NULL; | |
1da177e4 LT |
1312 | } |
1313 | EXPORT_SYMBOL_GPL(__symbol_get); | |
1314 | ||
eea8b54d AN |
1315 | /* |
1316 | * Ensure that an exported symbol [global namespace] does not already exist | |
02a3e59a | 1317 | * in the kernel or in some other module's exported symbol table. |
be593f4c RR |
1318 | * |
1319 | * You must hold the module_mutex. | |
eea8b54d | 1320 | */ |
2d25bc55 | 1321 | static int verify_exported_symbols(struct module *mod) |
eea8b54d | 1322 | { |
b211104d | 1323 | unsigned int i; |
b211104d RR |
1324 | const struct kernel_symbol *s; |
1325 | struct { | |
1326 | const struct kernel_symbol *sym; | |
1327 | unsigned int num; | |
1328 | } arr[] = { | |
1329 | { mod->syms, mod->num_syms }, | |
1330 | { mod->gpl_syms, mod->num_gpl_syms }, | |
b211104d | 1331 | }; |
eea8b54d | 1332 | |
b211104d RR |
1333 | for (i = 0; i < ARRAY_SIZE(arr); i++) { |
1334 | for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { | |
0b96615c CH |
1335 | struct find_symbol_arg fsa = { |
1336 | .name = kernel_symbol_name(s), | |
1337 | .gplok = true, | |
1338 | }; | |
1339 | if (find_symbol(&fsa)) { | |
bddb12b3 | 1340 | pr_err("%s: exports duplicate symbol %s" |
b211104d | 1341 | " (owned by %s)\n", |
7290d580 | 1342 | mod->name, kernel_symbol_name(s), |
0b96615c | 1343 | module_name(fsa.owner)); |
b211104d RR |
1344 | return -ENOEXEC; |
1345 | } | |
eea8b54d | 1346 | } |
b211104d RR |
1347 | } |
1348 | return 0; | |
eea8b54d AN |
1349 | } |
1350 | ||
ebfac7b7 FS |
1351 | static bool ignore_undef_symbol(Elf_Half emachine, const char *name) |
1352 | { | |
1353 | /* | |
1354 | * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as | |
1355 | * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64. | |
1356 | * i386 has a similar problem but may not deserve a fix. | |
1357 | * | |
1358 | * If we ever have to ignore many symbols, consider refactoring the code to | |
1359 | * only warn if referenced by a relocation. | |
1360 | */ | |
1361 | if (emachine == EM_386 || emachine == EM_X86_64) | |
1362 | return !strcmp(name, "_GLOBAL_OFFSET_TABLE_"); | |
1363 | return false; | |
1364 | } | |
1365 | ||
9a4b9708 | 1366 | /* Change all symbols so that st_value encodes the pointer directly. */ |
49668688 RR |
1367 | static int simplify_symbols(struct module *mod, const struct load_info *info) |
1368 | { | |
1369 | Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; | |
1370 | Elf_Sym *sym = (void *)symsec->sh_addr; | |
1da177e4 | 1371 | unsigned long secbase; |
49668688 | 1372 | unsigned int i; |
1da177e4 | 1373 | int ret = 0; |
414fd31b | 1374 | const struct kernel_symbol *ksym; |
1da177e4 | 1375 | |
49668688 RR |
1376 | for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { |
1377 | const char *name = info->strtab + sym[i].st_name; | |
1378 | ||
1da177e4 LT |
1379 | switch (sym[i].st_shndx) { |
1380 | case SHN_COMMON: | |
80375980 JM |
1381 | /* Ignore common symbols */ |
1382 | if (!strncmp(name, "__gnu_lto", 9)) | |
1383 | break; | |
1384 | ||
24b9f0d2 SS |
1385 | /* |
1386 | * We compiled with -fno-common. These are not | |
1387 | * supposed to happen. | |
1388 | */ | |
5e124169 | 1389 | pr_debug("Common symbol: %s\n", name); |
6da0b565 | 1390 | pr_warn("%s: please compile with -fno-common\n", |
1da177e4 LT |
1391 | mod->name); |
1392 | ret = -ENOEXEC; | |
1393 | break; | |
1394 | ||
1395 | case SHN_ABS: | |
1396 | /* Don't need to do anything */ | |
b10addf3 JC |
1397 | pr_debug("Absolute symbol: 0x%08lx %s\n", |
1398 | (long)sym[i].st_value, name); | |
1da177e4 LT |
1399 | break; |
1400 | ||
1ce15ef4 JY |
1401 | case SHN_LIVEPATCH: |
1402 | /* Livepatch symbols are resolved by livepatch */ | |
1403 | break; | |
1404 | ||
1da177e4 | 1405 | case SHN_UNDEF: |
49668688 | 1406 | ksym = resolve_symbol_wait(mod, info, name); |
1da177e4 | 1407 | /* Ok if resolved. */ |
9bea7f23 | 1408 | if (ksym && !IS_ERR(ksym)) { |
7290d580 | 1409 | sym[i].st_value = kernel_symbol_value(ksym); |
1da177e4 | 1410 | break; |
414fd31b TA |
1411 | } |
1412 | ||
ebfac7b7 FS |
1413 | /* Ok if weak or ignored. */ |
1414 | if (!ksym && | |
1415 | (ELF_ST_BIND(sym[i].st_info) == STB_WEAK || | |
1416 | ignore_undef_symbol(info->hdr->e_machine, name))) | |
1da177e4 LT |
1417 | break; |
1418 | ||
9bea7f23 | 1419 | ret = PTR_ERR(ksym) ?: -ENOENT; |
62267e0e JD |
1420 | pr_warn("%s: Unknown symbol %s (err %d)\n", |
1421 | mod->name, name, ret); | |
1da177e4 LT |
1422 | break; |
1423 | ||
1424 | default: | |
1425 | /* Divert to percpu allocation if a percpu var. */ | |
49668688 | 1426 | if (sym[i].st_shndx == info->index.pcpu) |
259354de | 1427 | secbase = (unsigned long)mod_percpu(mod); |
1da177e4 | 1428 | else |
49668688 | 1429 | secbase = info->sechdrs[sym[i].st_shndx].sh_addr; |
1da177e4 LT |
1430 | sym[i].st_value += secbase; |
1431 | break; | |
1432 | } | |
1433 | } | |
1434 | ||
1435 | return ret; | |
1436 | } | |
1437 | ||
49668688 | 1438 | static int apply_relocations(struct module *mod, const struct load_info *info) |
22e268eb RR |
1439 | { |
1440 | unsigned int i; | |
1441 | int err = 0; | |
1442 | ||
1443 | /* Now do relocations. */ | |
49668688 RR |
1444 | for (i = 1; i < info->hdr->e_shnum; i++) { |
1445 | unsigned int infosec = info->sechdrs[i].sh_info; | |
22e268eb RR |
1446 | |
1447 | /* Not a valid relocation section? */ | |
49668688 | 1448 | if (infosec >= info->hdr->e_shnum) |
22e268eb RR |
1449 | continue; |
1450 | ||
1451 | /* Don't bother with non-allocated sections */ | |
49668688 | 1452 | if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC)) |
22e268eb RR |
1453 | continue; |
1454 | ||
1ce15ef4 | 1455 | if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH) |
7c8e2bdd JP |
1456 | err = klp_apply_section_relocs(mod, info->sechdrs, |
1457 | info->secstrings, | |
1458 | info->strtab, | |
1459 | info->index.sym, i, | |
1460 | NULL); | |
1461 | else if (info->sechdrs[i].sh_type == SHT_REL) | |
49668688 RR |
1462 | err = apply_relocate(info->sechdrs, info->strtab, |
1463 | info->index.sym, i, mod); | |
1464 | else if (info->sechdrs[i].sh_type == SHT_RELA) | |
1465 | err = apply_relocate_add(info->sechdrs, info->strtab, | |
1466 | info->index.sym, i, mod); | |
22e268eb RR |
1467 | if (err < 0) |
1468 | break; | |
1469 | } | |
1470 | return err; | |
1471 | } | |
1472 | ||
088af9a6 HD |
1473 | /* Additional bytes needed by arch in front of individual sections */ |
1474 | unsigned int __weak arch_mod_section_prepend(struct module *mod, | |
1475 | unsigned int section) | |
1476 | { | |
1477 | /* default implementation just returns zero */ | |
1478 | return 0; | |
1479 | } | |
1480 | ||
ac3b4328 SL |
1481 | long module_get_offset_and_type(struct module *mod, enum mod_mem_type type, |
1482 | Elf_Shdr *sechdr, unsigned int section) | |
1da177e4 | 1483 | { |
ac3b4328 SL |
1484 | long offset; |
1485 | long mask = ((unsigned long)(type) & SH_ENTSIZE_TYPE_MASK) << SH_ENTSIZE_TYPE_SHIFT; | |
1da177e4 | 1486 | |
ac3b4328 SL |
1487 | mod->mem[type].size += arch_mod_section_prepend(mod, section); |
1488 | offset = ALIGN(mod->mem[type].size, sechdr->sh_addralign ?: 1); | |
1489 | mod->mem[type].size = offset + sechdr->sh_size; | |
1490 | ||
1491 | WARN_ON_ONCE(offset & mask); | |
1492 | return offset | mask; | |
1da177e4 LT |
1493 | } |
1494 | ||
2abcc4b5 | 1495 | bool module_init_layout_section(const char *sname) |
055f23b7 JY |
1496 | { |
1497 | #ifndef CONFIG_MODULE_UNLOAD | |
1498 | if (module_exit_section(sname)) | |
1499 | return true; | |
1500 | #endif | |
1501 | return module_init_section(sname); | |
1502 | } | |
1503 | ||
ac3b4328 | 1504 | static void __layout_sections(struct module *mod, struct load_info *info, bool is_init) |
1da177e4 | 1505 | { |
ac3b4328 SL |
1506 | unsigned int m, i; |
1507 | ||
1508 | static const unsigned long masks[][2] = { | |
24b9f0d2 SS |
1509 | /* |
1510 | * NOTE: all executable code must be the first section | |
1da177e4 | 1511 | * in this array; otherwise modify the text_size |
24b9f0d2 SS |
1512 | * finder in the two loops below |
1513 | */ | |
1da177e4 LT |
1514 | { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL }, |
1515 | { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL }, | |
444d13ff | 1516 | { SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL }, |
1da177e4 LT |
1517 | { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL }, |
1518 | { ARCH_SHF_SMALL | SHF_ALLOC, 0 } | |
1519 | }; | |
ac3b4328 SL |
1520 | static const int core_m_to_mem_type[] = { |
1521 | MOD_TEXT, | |
1522 | MOD_RODATA, | |
1523 | MOD_RO_AFTER_INIT, | |
1524 | MOD_DATA, | |
db3e33dd | 1525 | MOD_DATA, |
ac3b4328 SL |
1526 | }; |
1527 | static const int init_m_to_mem_type[] = { | |
1528 | MOD_INIT_TEXT, | |
1529 | MOD_INIT_RODATA, | |
1530 | MOD_INVALID, | |
1531 | MOD_INIT_DATA, | |
db3e33dd | 1532 | MOD_INIT_DATA, |
ac3b4328 | 1533 | }; |
1da177e4 | 1534 | |
1da177e4 | 1535 | for (m = 0; m < ARRAY_SIZE(masks); ++m) { |
ac3b4328 SL |
1536 | enum mod_mem_type type = is_init ? init_m_to_mem_type[m] : core_m_to_mem_type[m]; |
1537 | ||
49668688 RR |
1538 | for (i = 0; i < info->hdr->e_shnum; ++i) { |
1539 | Elf_Shdr *s = &info->sechdrs[i]; | |
1540 | const char *sname = info->secstrings + s->sh_name; | |
1da177e4 LT |
1541 | |
1542 | if ((s->sh_flags & masks[m][0]) != masks[m][0] | |
1543 | || (s->sh_flags & masks[m][1]) | |
1544 | || s->sh_entsize != ~0UL | |
ac3b4328 | 1545 | || is_init != module_init_layout_section(sname)) |
1da177e4 | 1546 | continue; |
1da177e4 | 1547 | |
ac3b4328 | 1548 | if (WARN_ON_ONCE(type == MOD_INVALID)) |
1da177e4 | 1549 | continue; |
ac3b4328 SL |
1550 | |
1551 | s->sh_entsize = module_get_offset_and_type(mod, type, s, i); | |
5e124169 | 1552 | pr_debug("\t%s\n", sname); |
1da177e4 | 1553 | } |
1da177e4 LT |
1554 | } |
1555 | } | |
1556 | ||
ac3b4328 SL |
1557 | /* |
1558 | * Lay out the SHF_ALLOC sections in a way not dissimilar to how ld | |
1559 | * might -- code, read-only data, read-write data, small data. Tally | |
1560 | * sizes, and place the offsets into sh_entsize fields: high bit means it | |
1561 | * belongs in init. | |
1562 | */ | |
1563 | static void layout_sections(struct module *mod, struct load_info *info) | |
1564 | { | |
1565 | unsigned int i; | |
1566 | ||
1567 | for (i = 0; i < info->hdr->e_shnum; i++) | |
1568 | info->sechdrs[i].sh_entsize = ~0UL; | |
1569 | ||
6ed81802 | 1570 | pr_debug("Core section allocation order for %s:\n", mod->name); |
ac3b4328 SL |
1571 | __layout_sections(mod, info, false); |
1572 | ||
6ed81802 | 1573 | pr_debug("Init section allocation order for %s:\n", mod->name); |
ac3b4328 SL |
1574 | __layout_sections(mod, info, true); |
1575 | } | |
1576 | ||
ad8d3a36 | 1577 | static void module_license_taint_check(struct module *mod, const char *license) |
1da177e4 LT |
1578 | { |
1579 | if (!license) | |
1580 | license = "unspecified"; | |
1581 | ||
fa3ba2e8 | 1582 | if (!license_is_gpl_compatible(license)) { |
25ddbb18 | 1583 | if (!test_taint(TAINT_PROPRIETARY_MODULE)) |
bddb12b3 AM |
1584 | pr_warn("%s: module license '%s' taints kernel.\n", |
1585 | mod->name, license); | |
373d4d09 RR |
1586 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE, |
1587 | LOCKDEP_NOW_UNRELIABLE); | |
1da177e4 LT |
1588 | } |
1589 | } | |
1590 | ||
49668688 | 1591 | static void setup_modinfo(struct module *mod, struct load_info *info) |
c988d2b2 MD |
1592 | { |
1593 | struct module_attribute *attr; | |
1594 | int i; | |
1595 | ||
1596 | for (i = 0; (attr = modinfo_attrs[i]); i++) { | |
1597 | if (attr->setup) | |
49668688 | 1598 | attr->setup(mod, get_modinfo(info, attr->attr.name)); |
c988d2b2 MD |
1599 | } |
1600 | } | |
c988d2b2 | 1601 | |
a263f776 RR |
1602 | static void free_modinfo(struct module *mod) |
1603 | { | |
1604 | struct module_attribute *attr; | |
1605 | int i; | |
1606 | ||
1607 | for (i = 0; (attr = modinfo_attrs[i]); i++) { | |
1608 | if (attr->free) | |
1609 | attr->free(mod); | |
1610 | } | |
1611 | } | |
1612 | ||
74e08fcf JB |
1613 | void * __weak module_alloc(unsigned long size) |
1614 | { | |
7a0e27b2 CH |
1615 | return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, |
1616 | GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, | |
a3a66c38 | 1617 | NUMA_NO_NODE, __builtin_return_address(0)); |
74e08fcf JB |
1618 | } |
1619 | ||
23189766 VW |
1620 | bool __weak module_init_section(const char *name) |
1621 | { | |
1622 | return strstarts(name, ".init"); | |
1623 | } | |
1624 | ||
38b37d63 MS |
1625 | bool __weak module_exit_section(const char *name) |
1626 | { | |
1627 | return strstarts(name, ".exit"); | |
1628 | } | |
1629 | ||
ec2a2959 | 1630 | static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr) |
40dd2560 | 1631 | { |
d83d42d0 SK |
1632 | #if defined(CONFIG_64BIT) |
1633 | unsigned long long secend; | |
1634 | #else | |
ec2a2959 | 1635 | unsigned long secend; |
d83d42d0 | 1636 | #endif |
ec2a2959 FL |
1637 | |
1638 | /* | |
1639 | * Check for both overflow and offset/size being | |
1640 | * too large. | |
1641 | */ | |
1642 | secend = shdr->sh_offset + shdr->sh_size; | |
1643 | if (secend < shdr->sh_offset || secend > info->len) | |
1644 | return -ENOEXEC; | |
1645 | ||
1646 | return 0; | |
1647 | } | |
1648 | ||
1649 | /* | |
3d40bb90 LC |
1650 | * Check userspace passed ELF module against our expectations, and cache |
1651 | * useful variables for further processing as we go. | |
ec2a2959 | 1652 | * |
3d40bb90 | 1653 | * This does basic validity checks against section offsets and sizes, the |
ec2a2959 | 1654 | * section name string table, and the indices used for it (sh_name). |
3d40bb90 LC |
1655 | * |
1656 | * As a last step, since we're already checking the ELF sections we cache | |
1657 | * useful variables which will be used later for our convenience: | |
1658 | * | |
1659 | * o pointers to section headers | |
1660 | * o cache the modinfo symbol section | |
1661 | * o cache the string symbol section | |
1662 | * o cache the module section | |
1663 | * | |
1664 | * As a last step we set info->mod to the temporary copy of the module in | |
1665 | * info->hdr. The final one will be allocated in move_module(). Any | |
1666 | * modifications we make to our copy of the module will be carried over | |
1667 | * to the final minted module. | |
ec2a2959 | 1668 | */ |
3d40bb90 | 1669 | static int elf_validity_cache_copy(struct load_info *info, int flags) |
ec2a2959 FL |
1670 | { |
1671 | unsigned int i; | |
1672 | Elf_Shdr *shdr, *strhdr; | |
1673 | int err; | |
46752820 | 1674 | unsigned int num_mod_secs = 0, mod_idx; |
1bb49db9 LC |
1675 | unsigned int num_info_secs = 0, info_idx; |
1676 | unsigned int num_sym_secs = 0, sym_idx; | |
ec2a2959 | 1677 | |
7fd982f3 SK |
1678 | if (info->len < sizeof(*(info->hdr))) { |
1679 | pr_err("Invalid ELF header len %lu\n", info->len); | |
1680 | goto no_exec; | |
1681 | } | |
34e1169d | 1682 | |
7fd982f3 SK |
1683 | if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0) { |
1684 | pr_err("Invalid ELF header magic: != %s\n", ELFMAG); | |
1685 | goto no_exec; | |
1686 | } | |
1687 | if (info->hdr->e_type != ET_REL) { | |
1688 | pr_err("Invalid ELF header type: %u != %u\n", | |
1689 | info->hdr->e_type, ET_REL); | |
1690 | goto no_exec; | |
1691 | } | |
1692 | if (!elf_check_arch(info->hdr)) { | |
1693 | pr_err("Invalid architecture in ELF header: %u\n", | |
1694 | info->hdr->e_machine); | |
1695 | goto no_exec; | |
1696 | } | |
f9231a99 NP |
1697 | if (!module_elf_check_arch(info->hdr)) { |
1698 | pr_err("Invalid module architecture in ELF header: %u\n", | |
1699 | info->hdr->e_machine); | |
1700 | goto no_exec; | |
1701 | } | |
7fd982f3 SK |
1702 | if (info->hdr->e_shentsize != sizeof(Elf_Shdr)) { |
1703 | pr_err("Invalid ELF section header size\n"); | |
1704 | goto no_exec; | |
1705 | } | |
34e1169d | 1706 | |
ec2a2959 FL |
1707 | /* |
1708 | * e_shnum is 16 bits, and sizeof(Elf_Shdr) is | |
1709 | * known and small. So e_shnum * sizeof(Elf_Shdr) | |
1710 | * will not overflow unsigned long on any platform. | |
1711 | */ | |
34e1169d KC |
1712 | if (info->hdr->e_shoff >= info->len |
1713 | || (info->hdr->e_shnum * sizeof(Elf_Shdr) > | |
7fd982f3 SK |
1714 | info->len - info->hdr->e_shoff)) { |
1715 | pr_err("Invalid ELF section header overflow\n"); | |
1716 | goto no_exec; | |
1717 | } | |
40dd2560 | 1718 | |
ec2a2959 FL |
1719 | info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; |
1720 | ||
1721 | /* | |
1722 | * Verify if the section name table index is valid. | |
1723 | */ | |
1724 | if (info->hdr->e_shstrndx == SHN_UNDEF | |
7fd982f3 SK |
1725 | || info->hdr->e_shstrndx >= info->hdr->e_shnum) { |
1726 | pr_err("Invalid ELF section name index: %d || e_shstrndx (%d) >= e_shnum (%d)\n", | |
1727 | info->hdr->e_shstrndx, info->hdr->e_shstrndx, | |
1728 | info->hdr->e_shnum); | |
1729 | goto no_exec; | |
1730 | } | |
ec2a2959 FL |
1731 | |
1732 | strhdr = &info->sechdrs[info->hdr->e_shstrndx]; | |
1733 | err = validate_section_offset(info, strhdr); | |
7fd982f3 SK |
1734 | if (err < 0) { |
1735 | pr_err("Invalid ELF section hdr(type %u)\n", strhdr->sh_type); | |
ec2a2959 | 1736 | return err; |
7fd982f3 | 1737 | } |
ec2a2959 FL |
1738 | |
1739 | /* | |
1740 | * The section name table must be NUL-terminated, as required | |
1741 | * by the spec. This makes strcmp and pr_* calls that access | |
1742 | * strings in the section safe. | |
1743 | */ | |
1744 | info->secstrings = (void *)info->hdr + strhdr->sh_offset; | |
391e982b AD |
1745 | if (strhdr->sh_size == 0) { |
1746 | pr_err("empty section name table\n"); | |
1747 | goto no_exec; | |
1748 | } | |
7fd982f3 SK |
1749 | if (info->secstrings[strhdr->sh_size - 1] != '\0') { |
1750 | pr_err("ELF Spec violation: section name table isn't null terminated\n"); | |
1751 | goto no_exec; | |
1752 | } | |
ec2a2959 FL |
1753 | |
1754 | /* | |
1755 | * The code assumes that section 0 has a length of zero and | |
1756 | * an addr of zero, so check for it. | |
1757 | */ | |
1758 | if (info->sechdrs[0].sh_type != SHT_NULL | |
1759 | || info->sechdrs[0].sh_size != 0 | |
7fd982f3 SK |
1760 | || info->sechdrs[0].sh_addr != 0) { |
1761 | pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n", | |
1762 | info->sechdrs[0].sh_type); | |
1763 | goto no_exec; | |
1764 | } | |
ec2a2959 FL |
1765 | |
1766 | for (i = 1; i < info->hdr->e_shnum; i++) { | |
1767 | shdr = &info->sechdrs[i]; | |
1768 | switch (shdr->sh_type) { | |
1769 | case SHT_NULL: | |
1770 | case SHT_NOBITS: | |
1771 | continue; | |
1772 | case SHT_SYMTAB: | |
1773 | if (shdr->sh_link == SHN_UNDEF | |
7fd982f3 SK |
1774 | || shdr->sh_link >= info->hdr->e_shnum) { |
1775 | pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n", | |
1776 | shdr->sh_link, shdr->sh_link, | |
1777 | info->hdr->e_shnum); | |
1778 | goto no_exec; | |
1779 | } | |
1bb49db9 LC |
1780 | num_sym_secs++; |
1781 | sym_idx = i; | |
ec2a2959 FL |
1782 | fallthrough; |
1783 | default: | |
1784 | err = validate_section_offset(info, shdr); | |
1785 | if (err < 0) { | |
1786 | pr_err("Invalid ELF section in module (section %u type %u)\n", | |
1787 | i, shdr->sh_type); | |
1788 | return err; | |
1789 | } | |
46752820 LC |
1790 | if (strcmp(info->secstrings + shdr->sh_name, |
1791 | ".gnu.linkonce.this_module") == 0) { | |
1792 | num_mod_secs++; | |
1793 | mod_idx = i; | |
1bb49db9 LC |
1794 | } else if (strcmp(info->secstrings + shdr->sh_name, |
1795 | ".modinfo") == 0) { | |
1796 | num_info_secs++; | |
1797 | info_idx = i; | |
46752820 | 1798 | } |
ec2a2959 FL |
1799 | |
1800 | if (shdr->sh_flags & SHF_ALLOC) { | |
1801 | if (shdr->sh_name >= strhdr->sh_size) { | |
1802 | pr_err("Invalid ELF section name in module (section %u type %u)\n", | |
1803 | i, shdr->sh_type); | |
1804 | return -ENOEXEC; | |
1805 | } | |
1806 | } | |
1807 | break; | |
1808 | } | |
1809 | } | |
1810 | ||
1bb49db9 LC |
1811 | if (num_info_secs > 1) { |
1812 | pr_err("Only one .modinfo section must exist.\n"); | |
1813 | goto no_exec; | |
1814 | } else if (num_info_secs == 1) { | |
1815 | /* Try to find a name early so we can log errors with a module name */ | |
1816 | info->index.info = info_idx; | |
1817 | info->name = get_modinfo(info, "name"); | |
1818 | } | |
1819 | ||
1820 | if (num_sym_secs != 1) { | |
1821 | pr_warn("%s: module has no symbols (stripped?)\n", | |
1822 | info->name ?: "(missing .modinfo section or name field)"); | |
1823 | goto no_exec; | |
1824 | } | |
1825 | ||
1826 | /* Sets internal symbols and strings. */ | |
1827 | info->index.sym = sym_idx; | |
1828 | shdr = &info->sechdrs[sym_idx]; | |
1829 | info->index.str = shdr->sh_link; | |
1830 | info->strtab = (char *)info->hdr + info->sechdrs[info->index.str].sh_offset; | |
1831 | ||
46752820 LC |
1832 | /* |
1833 | * The ".gnu.linkonce.this_module" ELF section is special. It is | |
1834 | * what modpost uses to refer to __this_module and let's use rely | |
1835 | * on THIS_MODULE to point to &__this_module properly. The kernel's | |
1836 | * modpost declares it on each modules's *.mod.c file. If the struct | |
1837 | * module of the kernel changes a full kernel rebuild is required. | |
1838 | * | |
1839 | * We have a few expectaions for this special section, the following | |
1840 | * code validates all this for us: | |
1841 | * | |
1842 | * o Only one section must exist | |
1843 | * o We expect the kernel to always have to allocate it: SHF_ALLOC | |
1844 | * o The section size must match the kernel's run time's struct module | |
1845 | * size | |
1846 | */ | |
1847 | if (num_mod_secs != 1) { | |
1bb49db9 LC |
1848 | pr_err("module %s: Only one .gnu.linkonce.this_module section must exist.\n", |
1849 | info->name ?: "(missing .modinfo section or name field)"); | |
46752820 LC |
1850 | goto no_exec; |
1851 | } | |
1852 | ||
1853 | shdr = &info->sechdrs[mod_idx]; | |
1854 | ||
1855 | /* | |
1856 | * This is already implied on the switch above, however let's be | |
1857 | * pedantic about it. | |
1858 | */ | |
1859 | if (shdr->sh_type == SHT_NOBITS) { | |
1bb49db9 LC |
1860 | pr_err("module %s: .gnu.linkonce.this_module section must have a size set\n", |
1861 | info->name ?: "(missing .modinfo section or name field)"); | |
46752820 LC |
1862 | goto no_exec; |
1863 | } | |
1864 | ||
1865 | if (!(shdr->sh_flags & SHF_ALLOC)) { | |
1bb49db9 LC |
1866 | pr_err("module %s: .gnu.linkonce.this_module must occupy memory during process execution\n", |
1867 | info->name ?: "(missing .modinfo section or name field)"); | |
46752820 LC |
1868 | goto no_exec; |
1869 | } | |
1870 | ||
1871 | if (shdr->sh_size != sizeof(struct module)) { | |
1bb49db9 LC |
1872 | pr_err("module %s: .gnu.linkonce.this_module section size must match the kernel's built struct module size at run time\n", |
1873 | info->name ?: "(missing .modinfo section or name field)"); | |
46752820 LC |
1874 | goto no_exec; |
1875 | } | |
1876 | ||
1877 | info->index.mod = mod_idx; | |
1878 | ||
1879 | /* This is temporary: point mod into copy of data. */ | |
1880 | info->mod = (void *)info->hdr + shdr->sh_offset; | |
1881 | ||
1bb49db9 LC |
1882 | /* |
1883 | * If we didn't load the .modinfo 'name' field earlier, fall back to | |
1884 | * on-disk struct mod 'name' field. | |
1885 | */ | |
1886 | if (!info->name) | |
1887 | info->name = info->mod->name; | |
1888 | ||
3d40bb90 LC |
1889 | if (flags & MODULE_INIT_IGNORE_MODVERSIONS) |
1890 | info->index.vers = 0; /* Pretend no __versions section! */ | |
1891 | else | |
1892 | info->index.vers = find_sec(info, "__versions"); | |
1893 | ||
1894 | info->index.pcpu = find_pcpusec(info); | |
1895 | ||
34e1169d | 1896 | return 0; |
7fd982f3 SK |
1897 | |
1898 | no_exec: | |
1899 | return -ENOEXEC; | |
34e1169d KC |
1900 | } |
1901 | ||
3afe9f84 LT |
1902 | #define COPY_CHUNK_SIZE (16*PAGE_SIZE) |
1903 | ||
1904 | static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len) | |
1905 | { | |
1906 | do { | |
1907 | unsigned long n = min(len, COPY_CHUNK_SIZE); | |
1908 | ||
1909 | if (copy_from_user(dst, usrc, n) != 0) | |
1910 | return -EFAULT; | |
1911 | cond_resched(); | |
1912 | dst += n; | |
1913 | usrc += n; | |
1914 | len -= n; | |
1915 | } while (len); | |
1916 | return 0; | |
1917 | } | |
1918 | ||
2992ef29 | 1919 | static int check_modinfo_livepatch(struct module *mod, struct load_info *info) |
1ce15ef4 | 1920 | { |
1be9473e AT |
1921 | if (!get_modinfo(info, "livepatch")) |
1922 | /* Nothing more to do */ | |
1923 | return 0; | |
1924 | ||
ed52cabe | 1925 | if (set_livepatch_module(mod)) |
1be9473e | 1926 | return 0; |
1ce15ef4 | 1927 | |
1be9473e AT |
1928 | pr_err("%s: module is marked as livepatch module, but livepatch support is disabled", |
1929 | mod->name); | |
1930 | return -ENOEXEC; | |
1ce15ef4 | 1931 | } |
1ce15ef4 | 1932 | |
caf7501a AK |
1933 | static void check_modinfo_retpoline(struct module *mod, struct load_info *info) |
1934 | { | |
1935 | if (retpoline_module_ok(get_modinfo(info, "retpoline"))) | |
1936 | return; | |
1937 | ||
1938 | pr_warn("%s: loading module not compiled with retpoline compiler.\n", | |
1939 | mod->name); | |
1940 | } | |
1941 | ||
34e1169d KC |
1942 | /* Sets info->hdr and info->len. */ |
1943 | static int copy_module_from_user(const void __user *umod, unsigned long len, | |
1944 | struct load_info *info) | |
40dd2560 RR |
1945 | { |
1946 | int err; | |
40dd2560 | 1947 | |
34e1169d KC |
1948 | info->len = len; |
1949 | if (info->len < sizeof(*(info->hdr))) | |
40dd2560 RR |
1950 | return -ENOEXEC; |
1951 | ||
38f90173 | 1952 | err = security_kernel_load_data(LOADING_MODULE, true); |
2e72d51b KC |
1953 | if (err) |
1954 | return err; | |
1955 | ||
40dd2560 | 1956 | /* Suck in entire file: we'll want most of it. */ |
88dca4ca | 1957 | info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN); |
34e1169d | 1958 | if (!info->hdr) |
40dd2560 RR |
1959 | return -ENOMEM; |
1960 | ||
3afe9f84 | 1961 | if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) { |
38f90173 KC |
1962 | err = -EFAULT; |
1963 | goto out; | |
40dd2560 RR |
1964 | } |
1965 | ||
38f90173 KC |
1966 | err = security_kernel_post_load_data((char *)info->hdr, info->len, |
1967 | LOADING_MODULE, "init_module"); | |
1968 | out: | |
1969 | if (err) | |
1970 | vfree(info->hdr); | |
1971 | ||
1972 | return err; | |
34e1169d KC |
1973 | } |
1974 | ||
b1ae6dc4 | 1975 | static void free_copy(struct load_info *info, int flags) |
d913188c | 1976 | { |
b1ae6dc4 DT |
1977 | if (flags & MODULE_INIT_COMPRESSED_FILE) |
1978 | module_decompress_cleanup(info); | |
1979 | else | |
1980 | vfree(info->hdr); | |
d913188c RR |
1981 | } |
1982 | ||
2f3238ae | 1983 | static int rewrite_section_headers(struct load_info *info, int flags) |
8b5f61a7 RR |
1984 | { |
1985 | unsigned int i; | |
1986 | ||
1987 | /* This should always be true, but let's be sure. */ | |
1988 | info->sechdrs[0].sh_addr = 0; | |
1989 | ||
1990 | for (i = 1; i < info->hdr->e_shnum; i++) { | |
1991 | Elf_Shdr *shdr = &info->sechdrs[i]; | |
8b5f61a7 | 1992 | |
24b9f0d2 SS |
1993 | /* |
1994 | * Mark all sections sh_addr with their address in the | |
1995 | * temporary image. | |
1996 | */ | |
8b5f61a7 RR |
1997 | shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset; |
1998 | ||
8b5f61a7 | 1999 | } |
d6df72a0 RR |
2000 | |
2001 | /* Track but don't keep modinfo and version sections. */ | |
3e2e857f | 2002 | info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; |
d6df72a0 | 2003 | info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; |
3e2e857f | 2004 | |
8b5f61a7 RR |
2005 | return 0; |
2006 | } | |
2007 | ||
437c1f9c LC |
2008 | /* |
2009 | * These calls taint the kernel depending certain module circumstances */ | |
2010 | static void module_augment_kernel_taints(struct module *mod, struct load_info *info) | |
40dd2560 | 2011 | { |
72f08b3c LC |
2012 | int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE); |
2013 | ||
3205c36c LP |
2014 | if (!get_modinfo(info, "intree")) { |
2015 | if (!test_taint(TAINT_OOT_MODULE)) | |
2016 | pr_warn("%s: loading out-of-tree module taints kernel.\n", | |
2017 | mod->name); | |
373d4d09 | 2018 | add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); |
3205c36c | 2019 | } |
2449b8ba | 2020 | |
caf7501a AK |
2021 | check_modinfo_retpoline(mod, info); |
2022 | ||
49668688 | 2023 | if (get_modinfo(info, "staging")) { |
373d4d09 | 2024 | add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK); |
bddb12b3 AM |
2025 | pr_warn("%s: module is from the staging directory, the quality " |
2026 | "is unknown, you have been warned.\n", mod->name); | |
40dd2560 | 2027 | } |
22e268eb | 2028 | |
ed52cabe LC |
2029 | if (is_livepatch_module(mod)) { |
2030 | add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK); | |
2031 | pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n", | |
2032 | mod->name); | |
2033 | } | |
437c1f9c | 2034 | |
ad8d3a36 | 2035 | module_license_taint_check(mod, get_modinfo(info, "license")); |
22e268eb | 2036 | |
74829ddf DG |
2037 | if (get_modinfo(info, "test")) { |
2038 | if (!test_taint(TAINT_TEST)) | |
2039 | pr_warn("%s: loading test module taints kernel.\n", | |
2040 | mod->name); | |
2041 | add_taint_module(mod, TAINT_TEST, LOCKDEP_STILL_OK); | |
2042 | } | |
c3bbf62e LC |
2043 | #ifdef CONFIG_MODULE_SIG |
2044 | mod->sig_ok = info->sig_ok; | |
2045 | if (!mod->sig_ok) { | |
2046 | pr_notice_once("%s: module verification failed: signature " | |
2047 | "and/or required key missing - tainting " | |
2048 | "kernel\n", mod->name); | |
2049 | add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK); | |
2050 | } | |
2051 | #endif | |
72f08b3c LC |
2052 | |
2053 | /* | |
2054 | * ndiswrapper is under GPL by itself, but loads proprietary modules. | |
2055 | * Don't use add_taint_module(), as it would prevent ndiswrapper from | |
2056 | * using GPL-only symbols it needs. | |
2057 | */ | |
2058 | if (strcmp(mod->name, "ndiswrapper") == 0) | |
2059 | add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); | |
2060 | ||
2061 | /* driverloader was caught wrongly pretending to be under GPL */ | |
2062 | if (strcmp(mod->name, "driverloader") == 0) | |
2063 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE, | |
2064 | LOCKDEP_NOW_UNRELIABLE); | |
2065 | ||
2066 | /* lve claims to be GPL but upstream won't provide source */ | |
2067 | if (strcmp(mod->name, "lve") == 0) | |
2068 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE, | |
2069 | LOCKDEP_NOW_UNRELIABLE); | |
2070 | ||
2071 | if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE)) | |
2072 | pr_warn("%s: module license taints kernel.\n", mod->name); | |
2073 | ||
437c1f9c LC |
2074 | } |
2075 | ||
2076 | static int check_modinfo(struct module *mod, struct load_info *info, int flags) | |
2077 | { | |
2078 | const char *modmagic = get_modinfo(info, "vermagic"); | |
2079 | int err; | |
2080 | ||
2081 | if (flags & MODULE_INIT_IGNORE_VERMAGIC) | |
2082 | modmagic = NULL; | |
2083 | ||
2084 | /* This is allowed: modprobe --force will invalidate it. */ | |
2085 | if (!modmagic) { | |
2086 | err = try_to_force_load(mod, "bad vermagic"); | |
2087 | if (err) | |
2088 | return err; | |
2089 | } else if (!same_magic(modmagic, vermagic, info->index.vers)) { | |
2090 | pr_err("%s: version magic '%s' should be '%s'\n", | |
2091 | info->name, modmagic, vermagic); | |
2092 | return -ENOEXEC; | |
2093 | } | |
2094 | ||
2095 | err = check_modinfo_livepatch(mod, info); | |
2096 | if (err) | |
2097 | return err; | |
2098 | ||
40dd2560 RR |
2099 | return 0; |
2100 | } | |
2101 | ||
eb3057df | 2102 | static int find_module_sections(struct module *mod, struct load_info *info) |
f91a13bb | 2103 | { |
49668688 | 2104 | mod->kp = section_objs(info, "__param", |
f91a13bb | 2105 | sizeof(*mod->kp), &mod->num_kp); |
49668688 | 2106 | mod->syms = section_objs(info, "__ksymtab", |
f91a13bb | 2107 | sizeof(*mod->syms), &mod->num_syms); |
49668688 RR |
2108 | mod->crcs = section_addr(info, "__kcrctab"); |
2109 | mod->gpl_syms = section_objs(info, "__ksymtab_gpl", | |
f91a13bb LT |
2110 | sizeof(*mod->gpl_syms), |
2111 | &mod->num_gpl_syms); | |
49668688 | 2112 | mod->gpl_crcs = section_addr(info, "__kcrctab_gpl"); |
f91a13bb | 2113 | |
f91a13bb | 2114 | #ifdef CONFIG_CONSTRUCTORS |
49668688 | 2115 | mod->ctors = section_objs(info, ".ctors", |
f91a13bb | 2116 | sizeof(*mod->ctors), &mod->num_ctors); |
eb3057df FH |
2117 | if (!mod->ctors) |
2118 | mod->ctors = section_objs(info, ".init_array", | |
2119 | sizeof(*mod->ctors), &mod->num_ctors); | |
2120 | else if (find_sec(info, ".init_array")) { | |
2121 | /* | |
2122 | * This shouldn't happen with same compiler and binutils | |
2123 | * building all parts of the module. | |
2124 | */ | |
6da0b565 | 2125 | pr_warn("%s: has both .ctors and .init_array.\n", |
eb3057df FH |
2126 | mod->name); |
2127 | return -EINVAL; | |
2128 | } | |
f91a13bb LT |
2129 | #endif |
2130 | ||
66e9b071 TG |
2131 | mod->noinstr_text_start = section_objs(info, ".noinstr.text", 1, |
2132 | &mod->noinstr_text_size); | |
2133 | ||
f91a13bb | 2134 | #ifdef CONFIG_TRACEPOINTS |
65498646 MD |
2135 | mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs", |
2136 | sizeof(*mod->tracepoints_ptrs), | |
2137 | &mod->num_tracepoints); | |
f91a13bb | 2138 | #endif |
fe15b50c PM |
2139 | #ifdef CONFIG_TREE_SRCU |
2140 | mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs", | |
2141 | sizeof(*mod->srcu_struct_ptrs), | |
2142 | &mod->num_srcu_structs); | |
2143 | #endif | |
a38d1107 MM |
2144 | #ifdef CONFIG_BPF_EVENTS |
2145 | mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map", | |
2146 | sizeof(*mod->bpf_raw_events), | |
2147 | &mod->num_bpf_raw_events); | |
2148 | #endif | |
36e68442 AN |
2149 | #ifdef CONFIG_DEBUG_INFO_BTF_MODULES |
2150 | mod->btf_data = any_section_objs(info, ".BTF", 1, &mod->btf_data_size); | |
2151 | #endif | |
e9666d10 | 2152 | #ifdef CONFIG_JUMP_LABEL |
bf5438fc JB |
2153 | mod->jump_entries = section_objs(info, "__jump_table", |
2154 | sizeof(*mod->jump_entries), | |
2155 | &mod->num_jump_entries); | |
2156 | #endif | |
f91a13bb | 2157 | #ifdef CONFIG_EVENT_TRACING |
49668688 | 2158 | mod->trace_events = section_objs(info, "_ftrace_events", |
f91a13bb LT |
2159 | sizeof(*mod->trace_events), |
2160 | &mod->num_trace_events); | |
99be647c JL |
2161 | mod->trace_evals = section_objs(info, "_ftrace_eval_map", |
2162 | sizeof(*mod->trace_evals), | |
2163 | &mod->num_trace_evals); | |
f91a13bb | 2164 | #endif |
13b9b6e7 SR |
2165 | #ifdef CONFIG_TRACING |
2166 | mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", | |
2167 | sizeof(*mod->trace_bprintk_fmt_start), | |
2168 | &mod->num_trace_bprintk_fmt); | |
13b9b6e7 | 2169 | #endif |
f91a13bb LT |
2170 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
2171 | /* sechdrs[0].sh_size is always zero */ | |
a1326b17 | 2172 | mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION, |
f91a13bb LT |
2173 | sizeof(*mod->ftrace_callsites), |
2174 | &mod->num_ftrace_callsites); | |
2175 | #endif | |
540adea3 MH |
2176 | #ifdef CONFIG_FUNCTION_ERROR_INJECTION |
2177 | mod->ei_funcs = section_objs(info, "_error_injection_whitelist", | |
2178 | sizeof(*mod->ei_funcs), | |
2179 | &mod->num_ei_funcs); | |
1e6769b0 MH |
2180 | #endif |
2181 | #ifdef CONFIG_KPROBES | |
2182 | mod->kprobes_text_start = section_objs(info, ".kprobes.text", 1, | |
2183 | &mod->kprobes_text_size); | |
16db6264 MH |
2184 | mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist", |
2185 | sizeof(unsigned long), | |
2186 | &mod->num_kprobe_blacklist); | |
9183c3f9 | 2187 | #endif |
33701557 CD |
2188 | #ifdef CONFIG_PRINTK_INDEX |
2189 | mod->printk_index_start = section_objs(info, ".printk_index", | |
2190 | sizeof(*mod->printk_index_start), | |
2191 | &mod->printk_index_size); | |
2192 | #endif | |
9183c3f9 JP |
2193 | #ifdef CONFIG_HAVE_STATIC_CALL_INLINE |
2194 | mod->static_call_sites = section_objs(info, ".static_call_sites", | |
2195 | sizeof(*mod->static_call_sites), | |
2196 | &mod->num_static_call_sites); | |
92ace999 | 2197 | #endif |
41a55567 | 2198 | #if IS_ENABLED(CONFIG_KUNIT) |
3d6e4462 JK |
2199 | mod->kunit_suites = section_objs(info, ".kunit_test_suites", |
2200 | sizeof(*mod->kunit_suites), | |
2201 | &mod->num_kunit_suites); | |
2202 | #endif | |
2203 | ||
811d66a0 RR |
2204 | mod->extable = section_objs(info, "__ex_table", |
2205 | sizeof(*mod->extable), &mod->num_exentries); | |
2206 | ||
49668688 | 2207 | if (section_addr(info, "__obsparm")) |
bddb12b3 | 2208 | pr_warn("%s: Ignoring obsolete parameters\n", mod->name); |
811d66a0 | 2209 | |
7deabd67 JB |
2210 | #ifdef CONFIG_DYNAMIC_DEBUG_CORE |
2211 | mod->dyndbg_info.descs = section_objs(info, "__dyndbg", | |
2212 | sizeof(*mod->dyndbg_info.descs), | |
2213 | &mod->dyndbg_info.num_descs); | |
2214 | mod->dyndbg_info.classes = section_objs(info, "__dyndbg_classes", | |
2215 | sizeof(*mod->dyndbg_info.classes), | |
2216 | &mod->dyndbg_info.num_classes); | |
2217 | #endif | |
eb3057df FH |
2218 | |
2219 | return 0; | |
f91a13bb LT |
2220 | } |
2221 | ||
49668688 | 2222 | static int move_module(struct module *mod, struct load_info *info) |
65b8a9b4 LT |
2223 | { |
2224 | int i; | |
2225 | void *ptr; | |
c7ee8aeb LC |
2226 | enum mod_mem_type t = 0; |
2227 | int ret = -ENOMEM; | |
65b8a9b4 | 2228 | |
ac3b4328 SL |
2229 | for_each_mod_mem_type(type) { |
2230 | if (!mod->mem[type].size) { | |
2231 | mod->mem[type].base = NULL; | |
2232 | continue; | |
2233 | } | |
2234 | mod->mem[type].size = PAGE_ALIGN(mod->mem[type].size); | |
2235 | ptr = module_memory_alloc(mod->mem[type].size, type); | |
82fab442 | 2236 | /* |
430bb0d1 LC |
2237 | * The pointer to these blocks of memory are stored on the module |
2238 | * structure and we keep that around so long as the module is | |
2239 | * around. We only free that memory when we unload the module. | |
2240 | * Just mark them as not being a leak then. The .init* ELF | |
2241 | * sections *do* get freed after boot so we *could* treat them | |
2242 | * slightly differently with kmemleak_ignore() and only grey | |
2243 | * them out as they work as typical memory allocations which | |
2244 | * *do* eventually get freed, but let's just keep things simple | |
2245 | * and avoid *any* false positives. | |
82fab442 | 2246 | */ |
430bb0d1 | 2247 | kmemleak_not_leak(ptr); |
82fab442 | 2248 | if (!ptr) { |
ac3b4328 SL |
2249 | t = type; |
2250 | goto out_enomem; | |
82fab442 | 2251 | } |
ac3b4328 SL |
2252 | memset(ptr, 0, mod->mem[type].size); |
2253 | mod->mem[type].base = ptr; | |
01dc0386 CL |
2254 | } |
2255 | ||
65b8a9b4 | 2256 | /* Transfer each section which specifies SHF_ALLOC */ |
6ed81802 | 2257 | pr_debug("Final section addresses for %s:\n", mod->name); |
49668688 | 2258 | for (i = 0; i < info->hdr->e_shnum; i++) { |
65b8a9b4 | 2259 | void *dest; |
49668688 | 2260 | Elf_Shdr *shdr = &info->sechdrs[i]; |
ac3b4328 | 2261 | enum mod_mem_type type = shdr->sh_entsize >> SH_ENTSIZE_TYPE_SHIFT; |
65b8a9b4 | 2262 | |
49668688 | 2263 | if (!(shdr->sh_flags & SHF_ALLOC)) |
65b8a9b4 LT |
2264 | continue; |
2265 | ||
ac3b4328 | 2266 | dest = mod->mem[type].base + (shdr->sh_entsize & SH_ENTSIZE_OFFSET_MASK); |
65b8a9b4 | 2267 | |
c7ee8aeb LC |
2268 | if (shdr->sh_type != SHT_NOBITS) { |
2269 | /* | |
2270 | * Our ELF checker already validated this, but let's | |
2271 | * be pedantic and make the goal clearer. We actually | |
2272 | * end up copying over all modifications made to the | |
2273 | * userspace copy of the entire struct module. | |
2274 | */ | |
2275 | if (i == info->index.mod && | |
2276 | (WARN_ON_ONCE(shdr->sh_size != sizeof(struct module)))) { | |
2277 | ret = -ENOEXEC; | |
2278 | goto out_enomem; | |
2279 | } | |
49668688 | 2280 | memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); |
c7ee8aeb LC |
2281 | } |
2282 | /* | |
2283 | * Update the userspace copy's ELF section address to point to | |
2284 | * our newly allocated memory as a pure convenience so that | |
2285 | * users of info can keep taking advantage and using the newly | |
2286 | * minted official memory area. | |
2287 | */ | |
49668688 | 2288 | shdr->sh_addr = (unsigned long)dest; |
66a2301e JC |
2289 | pr_debug("\t0x%lx 0x%.8lx %s\n", (long)shdr->sh_addr, |
2290 | (long)shdr->sh_size, info->secstrings + shdr->sh_name); | |
65b8a9b4 | 2291 | } |
d913188c RR |
2292 | |
2293 | return 0; | |
ac3b4328 SL |
2294 | out_enomem: |
2295 | for (t--; t >= 0; t--) | |
2296 | module_memory_free(mod->mem[t].base, t); | |
c7ee8aeb | 2297 | return ret; |
65b8a9b4 LT |
2298 | } |
2299 | ||
419e1a20 | 2300 | static int check_export_symbol_versions(struct module *mod) |
22e268eb | 2301 | { |
22e268eb | 2302 | #ifdef CONFIG_MODVERSIONS |
36794822 CH |
2303 | if ((mod->num_syms && !mod->crcs) || |
2304 | (mod->num_gpl_syms && !mod->gpl_crcs)) { | |
22e268eb RR |
2305 | return try_to_force_load(mod, |
2306 | "no versions for exported symbols"); | |
2307 | } | |
2308 | #endif | |
2309 | return 0; | |
2310 | } | |
2311 | ||
2312 | static void flush_module_icache(const struct module *mod) | |
2313 | { | |
22e268eb RR |
2314 | /* |
2315 | * Flush the instruction cache, since we've played with text. | |
2316 | * Do it before processing of module parameters, so the module | |
2317 | * can provide parameter accessor functions of its own. | |
2318 | */ | |
ac3b4328 SL |
2319 | for_each_mod_mem_type(type) { |
2320 | const struct module_memory *mod_mem = &mod->mem[type]; | |
2321 | ||
2322 | if (mod_mem->size) { | |
2323 | flush_icache_range((unsigned long)mod_mem->base, | |
2324 | (unsigned long)mod_mem->base + mod_mem->size); | |
2325 | } | |
2326 | } | |
22e268eb RR |
2327 | } |
2328 | ||
f9231a99 NP |
2329 | bool __weak module_elf_check_arch(Elf_Ehdr *hdr) |
2330 | { | |
2331 | return true; | |
2332 | } | |
2333 | ||
74e08fcf JB |
2334 | int __weak module_frob_arch_sections(Elf_Ehdr *hdr, |
2335 | Elf_Shdr *sechdrs, | |
2336 | char *secstrings, | |
2337 | struct module *mod) | |
2338 | { | |
2339 | return 0; | |
2340 | } | |
2341 | ||
be7de5f9 PB |
2342 | /* module_blacklist is a comma-separated list of module names */ |
2343 | static char *module_blacklist; | |
96b5b194 | 2344 | static bool blacklisted(const char *module_name) |
be7de5f9 PB |
2345 | { |
2346 | const char *p; | |
2347 | size_t len; | |
2348 | ||
2349 | if (!module_blacklist) | |
2350 | return false; | |
2351 | ||
2352 | for (p = module_blacklist; *p; p += len) { | |
2353 | len = strcspn(p, ","); | |
2354 | if (strlen(module_name) == len && !memcmp(module_name, p, len)) | |
2355 | return true; | |
2356 | if (p[len] == ',') | |
2357 | len++; | |
2358 | } | |
2359 | return false; | |
2360 | } | |
2361 | core_param(module_blacklist, module_blacklist, charp, 0400); | |
2362 | ||
2f3238ae | 2363 | static struct module *layout_and_allocate(struct load_info *info, int flags) |
1da177e4 | 2364 | { |
1da177e4 | 2365 | struct module *mod; |
444d13ff | 2366 | unsigned int ndx; |
d913188c | 2367 | int err; |
3ae91c21 | 2368 | |
1da177e4 | 2369 | /* Allow arches to frob section contents and sizes. */ |
49668688 | 2370 | err = module_frob_arch_sections(info->hdr, info->sechdrs, |
81a0abd9 | 2371 | info->secstrings, info->mod); |
1da177e4 | 2372 | if (err < 0) |
8d8022e8 | 2373 | return ERR_PTR(err); |
1da177e4 | 2374 | |
5c3a7db0 PZ |
2375 | err = module_enforce_rwx_sections(info->hdr, info->sechdrs, |
2376 | info->secstrings, info->mod); | |
2377 | if (err < 0) | |
2378 | return ERR_PTR(err); | |
2379 | ||
8d8022e8 RR |
2380 | /* We will do a special allocation for per-cpu sections later. */ |
2381 | info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; | |
1da177e4 | 2382 | |
444d13ff JY |
2383 | /* |
2384 | * Mark ro_after_init section with SHF_RO_AFTER_INIT so that | |
2385 | * layout_sections() can put it in the right place. | |
2386 | * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set. | |
2387 | */ | |
2388 | ndx = find_sec(info, ".data..ro_after_init"); | |
e872267b AB |
2389 | if (ndx) |
2390 | info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; | |
2391 | /* | |
2392 | * Mark the __jump_table section as ro_after_init as well: these data | |
2393 | * structures are never modified, with the exception of entries that | |
2394 | * refer to code in the __init section, which are annotated as such | |
2395 | * at module load time. | |
2396 | */ | |
2397 | ndx = find_sec(info, "__jump_table"); | |
444d13ff JY |
2398 | if (ndx) |
2399 | info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; | |
2400 | ||
24b9f0d2 SS |
2401 | /* |
2402 | * Determine total sizes, and put offsets in sh_entsize. For now | |
2403 | * this is done generically; there doesn't appear to be any | |
2404 | * special cases for the architectures. | |
2405 | */ | |
81a0abd9 JY |
2406 | layout_sections(info->mod, info); |
2407 | layout_symtab(info->mod, info); | |
1da177e4 | 2408 | |
65b8a9b4 | 2409 | /* Allocate and move to the final place */ |
81a0abd9 | 2410 | err = move_module(info->mod, info); |
d913188c | 2411 | if (err) |
8d8022e8 | 2412 | return ERR_PTR(err); |
d913188c RR |
2413 | |
2414 | /* Module has been copied to its final place now: return it. */ | |
2415 | mod = (void *)info->sechdrs[info->index.mod].sh_addr; | |
49668688 | 2416 | kmemleak_load_module(mod, info); |
d913188c | 2417 | return mod; |
d913188c RR |
2418 | } |
2419 | ||
2420 | /* mod is no longer valid after this! */ | |
2421 | static void module_deallocate(struct module *mod, struct load_info *info) | |
2422 | { | |
d913188c | 2423 | percpu_modfree(mod); |
d453cded | 2424 | module_arch_freeing_init(mod); |
ac3b4328 SL |
2425 | |
2426 | free_mod_mem(mod); | |
d913188c RR |
2427 | } |
2428 | ||
74e08fcf JB |
2429 | int __weak module_finalize(const Elf_Ehdr *hdr, |
2430 | const Elf_Shdr *sechdrs, | |
2431 | struct module *me) | |
2432 | { | |
2433 | return 0; | |
2434 | } | |
2435 | ||
811d66a0 RR |
2436 | static int post_relocation(struct module *mod, const struct load_info *info) |
2437 | { | |
51f3d0f4 | 2438 | /* Sort exception table now relocations are done. */ |
811d66a0 RR |
2439 | sort_extable(mod->extable, mod->extable + mod->num_exentries); |
2440 | ||
2441 | /* Copy relocated percpu area over. */ | |
2442 | percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, | |
2443 | info->sechdrs[info->index.pcpu].sh_size); | |
2444 | ||
51f3d0f4 | 2445 | /* Setup kallsyms-specific fields. */ |
811d66a0 RR |
2446 | add_kallsyms(mod, info); |
2447 | ||
2448 | /* Arch-specific module finalizing. */ | |
2449 | return module_finalize(info->hdr, info->sechdrs, mod); | |
2450 | } | |
2451 | ||
34e1169d KC |
2452 | /* Call module constructors. */ |
2453 | static void do_mod_ctors(struct module *mod) | |
2454 | { | |
2455 | #ifdef CONFIG_CONSTRUCTORS | |
2456 | unsigned long i; | |
2457 | ||
2458 | for (i = 0; i < mod->num_ctors; i++) | |
2459 | mod->ctors[i](); | |
2460 | #endif | |
2461 | } | |
2462 | ||
c7496379 RR |
2463 | /* For freeing module_init on success, in case kallsyms traversing */ |
2464 | struct mod_initfree { | |
1a7b7d92 | 2465 | struct llist_node node; |
ac3b4328 SL |
2466 | void *init_text; |
2467 | void *init_data; | |
2468 | void *init_rodata; | |
c7496379 RR |
2469 | }; |
2470 | ||
1a7b7d92 | 2471 | static void do_free_init(struct work_struct *w) |
c7496379 | 2472 | { |
1a7b7d92 RE |
2473 | struct llist_node *pos, *n, *list; |
2474 | struct mod_initfree *initfree; | |
2475 | ||
2476 | list = llist_del_all(&init_free_list); | |
2477 | ||
2478 | synchronize_rcu(); | |
2479 | ||
2480 | llist_for_each_safe(pos, n, list) { | |
2481 | initfree = container_of(pos, struct mod_initfree, node); | |
ac3b4328 SL |
2482 | module_memfree(initfree->init_text); |
2483 | module_memfree(initfree->init_data); | |
2484 | module_memfree(initfree->init_rodata); | |
1a7b7d92 RE |
2485 | kfree(initfree); |
2486 | } | |
c7496379 RR |
2487 | } |
2488 | ||
ae39e9ed SK |
2489 | #undef MODULE_PARAM_PREFIX |
2490 | #define MODULE_PARAM_PREFIX "module." | |
2491 | /* Default value for module->async_probe_requested */ | |
2492 | static bool async_probe; | |
2493 | module_param(async_probe, bool, 0644); | |
2494 | ||
be02a186 JK |
2495 | /* |
2496 | * This is where the real work happens. | |
2497 | * | |
2498 | * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb | |
2499 | * helper command 'lx-symbols'. | |
2500 | */ | |
2501 | static noinline int do_init_module(struct module *mod) | |
34e1169d KC |
2502 | { |
2503 | int ret = 0; | |
c7496379 | 2504 | struct mod_initfree *freeinit; |
df3e764d LC |
2505 | #if defined(CONFIG_MODULE_STATS) |
2506 | unsigned int text_size = 0, total_size = 0; | |
2507 | ||
2508 | for_each_mod_mem_type(type) { | |
2509 | const struct module_memory *mod_mem = &mod->mem[type]; | |
2510 | if (mod_mem->size) { | |
2511 | total_size += mod_mem->size; | |
2512 | if (type == MOD_TEXT || type == MOD_INIT_TEXT) | |
2513 | text_size += mod_mem->size; | |
2514 | } | |
2515 | } | |
2516 | #endif | |
c7496379 RR |
2517 | |
2518 | freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL); | |
2519 | if (!freeinit) { | |
2520 | ret = -ENOMEM; | |
2521 | goto fail; | |
2522 | } | |
ac3b4328 SL |
2523 | freeinit->init_text = mod->mem[MOD_INIT_TEXT].base; |
2524 | freeinit->init_data = mod->mem[MOD_INIT_DATA].base; | |
2525 | freeinit->init_rodata = mod->mem[MOD_INIT_RODATA].base; | |
34e1169d | 2526 | |
34e1169d KC |
2527 | do_mod_ctors(mod); |
2528 | /* Start the module */ | |
2529 | if (mod->init != NULL) | |
2530 | ret = do_one_initcall(mod->init); | |
2531 | if (ret < 0) { | |
c7496379 | 2532 | goto fail_free_freeinit; |
34e1169d KC |
2533 | } |
2534 | if (ret > 0) { | |
bddb12b3 AM |
2535 | pr_warn("%s: '%s'->init suspiciously returned %d, it should " |
2536 | "follow 0/-E convention\n" | |
2537 | "%s: loading module anyway...\n", | |
2538 | __func__, mod->name, ret, __func__); | |
34e1169d KC |
2539 | dump_stack(); |
2540 | } | |
2541 | ||
2542 | /* Now it's a first class citizen! */ | |
2543 | mod->state = MODULE_STATE_LIVE; | |
2544 | blocking_notifier_call_chain(&module_notify_list, | |
2545 | MODULE_STATE_LIVE, mod); | |
2546 | ||
38dc717e JY |
2547 | /* Delay uevent until module has finished its init routine */ |
2548 | kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); | |
2549 | ||
774a1221 TH |
2550 | /* |
2551 | * We need to finish all async code before the module init sequence | |
67d6212a IP |
2552 | * is done. This has potential to deadlock if synchronous module |
2553 | * loading is requested from async (which is not allowed!). | |
774a1221 | 2554 | * |
67d6212a IP |
2555 | * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous |
2556 | * request_module() from async workers") for more details. | |
774a1221 | 2557 | */ |
67d6212a | 2558 | if (!mod->async_probe_requested) |
774a1221 | 2559 | async_synchronize_full(); |
34e1169d | 2560 | |
ac3b4328 SL |
2561 | ftrace_free_mem(mod, mod->mem[MOD_INIT_TEXT].base, |
2562 | mod->mem[MOD_INIT_TEXT].base + mod->mem[MOD_INIT_TEXT].size); | |
34e1169d KC |
2563 | mutex_lock(&module_mutex); |
2564 | /* Drop initial reference. */ | |
2565 | module_put(mod); | |
2566 | trim_init_extable(mod); | |
2567 | #ifdef CONFIG_KALLSYMS | |
8244062e RR |
2568 | /* Switch to core kallsyms now init is done: kallsyms may be walking! */ |
2569 | rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); | |
34e1169d | 2570 | #endif |
444d13ff | 2571 | module_enable_ro(mod, true); |
93c2e105 | 2572 | mod_tree_remove_init(mod); |
d453cded | 2573 | module_arch_freeing_init(mod); |
ac3b4328 SL |
2574 | for_class_mod_mem_type(type, init) { |
2575 | mod->mem[type].base = NULL; | |
2576 | mod->mem[type].size = 0; | |
2577 | } | |
df3e764d | 2578 | |
607c543f AN |
2579 | #ifdef CONFIG_DEBUG_INFO_BTF_MODULES |
2580 | /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointer */ | |
2581 | mod->btf_data = NULL; | |
607c543f | 2582 | #endif |
c7496379 RR |
2583 | /* |
2584 | * We want to free module_init, but be aware that kallsyms may be | |
0be964be | 2585 | * walking this with preempt disabled. In all the failure paths, we |
cb2f5536 | 2586 | * call synchronize_rcu(), but we don't want to slow down the success |
1a7b7d92 RE |
2587 | * path. module_memfree() cannot be called in an interrupt, so do the |
2588 | * work and call synchronize_rcu() in a work queue. | |
2589 | * | |
ae646f0b JH |
2590 | * Note that module_alloc() on most architectures creates W+X page |
2591 | * mappings which won't be cleaned up until do_free_init() runs. Any | |
2592 | * code such as mark_rodata_ro() which depends on those mappings to | |
2593 | * be cleaned up needs to sync with the queued work - ie | |
cb2f5536 | 2594 | * rcu_barrier() |
c7496379 | 2595 | */ |
1a7b7d92 RE |
2596 | if (llist_add(&freeinit->node, &init_free_list)) |
2597 | schedule_work(&init_free_wq); | |
2598 | ||
34e1169d KC |
2599 | mutex_unlock(&module_mutex); |
2600 | wake_up_all(&module_wq); | |
2601 | ||
df3e764d LC |
2602 | mod_stat_add_long(text_size, &total_text_size); |
2603 | mod_stat_add_long(total_size, &total_mod_size); | |
2604 | ||
2605 | mod_stat_inc(&modcount); | |
2606 | ||
34e1169d | 2607 | return 0; |
c7496379 RR |
2608 | |
2609 | fail_free_freeinit: | |
2610 | kfree(freeinit); | |
2611 | fail: | |
2612 | /* Try to protect us from buggy refcounters. */ | |
2613 | mod->state = MODULE_STATE_GOING; | |
cb2f5536 | 2614 | synchronize_rcu(); |
c7496379 RR |
2615 | module_put(mod); |
2616 | blocking_notifier_call_chain(&module_notify_list, | |
2617 | MODULE_STATE_GOING, mod); | |
7e545d6e | 2618 | klp_module_going(mod); |
7dcd182b | 2619 | ftrace_release_mod(mod); |
c7496379 RR |
2620 | free_module(mod); |
2621 | wake_up_all(&module_wq); | |
df3e764d | 2622 | |
c7496379 | 2623 | return ret; |
34e1169d KC |
2624 | } |
2625 | ||
2626 | static int may_init_module(void) | |
2627 | { | |
2628 | if (!capable(CAP_SYS_MODULE) || modules_disabled) | |
2629 | return -EPERM; | |
2630 | ||
2631 | return 0; | |
2632 | } | |
2633 | ||
f71afa6a LC |
2634 | /* Is this module of this name done loading? No locks held. */ |
2635 | static bool finished_loading(const char *name) | |
2636 | { | |
2637 | struct module *mod; | |
2638 | bool ret; | |
2639 | ||
2640 | /* | |
2641 | * The module_mutex should not be a heavily contended lock; | |
2642 | * if we get the occasional sleep here, we'll go an extra iteration | |
2643 | * in the wait_event_interruptible(), which is harmless. | |
2644 | */ | |
2645 | sched_annotate_sleep(); | |
2646 | mutex_lock(&module_mutex); | |
2647 | mod = find_module_all(name, strlen(name), true); | |
2648 | ret = !mod || mod->state == MODULE_STATE_LIVE | |
2649 | || mod->state == MODULE_STATE_GOING; | |
2650 | mutex_unlock(&module_mutex); | |
2651 | ||
2652 | return ret; | |
2653 | } | |
2654 | ||
2655 | /* Must be called with module_mutex held */ | |
df3e764d LC |
2656 | static int module_patient_check_exists(const char *name, |
2657 | enum fail_dup_mod_reason reason) | |
f71afa6a LC |
2658 | { |
2659 | struct module *old; | |
2660 | int err = 0; | |
2661 | ||
2662 | old = find_module_all(name, strlen(name), true); | |
2663 | if (old == NULL) | |
2664 | return 0; | |
2665 | ||
2666 | if (old->state == MODULE_STATE_COMING || | |
2667 | old->state == MODULE_STATE_UNFORMED) { | |
2668 | /* Wait in case it fails to load. */ | |
2669 | mutex_unlock(&module_mutex); | |
2670 | err = wait_event_interruptible(module_wq, | |
2671 | finished_loading(name)); | |
2672 | mutex_lock(&module_mutex); | |
2673 | if (err) | |
2674 | return err; | |
2675 | ||
2676 | /* The module might have gone in the meantime. */ | |
2677 | old = find_module_all(name, strlen(name), true); | |
2678 | } | |
2679 | ||
df3e764d LC |
2680 | if (try_add_failed_module(name, reason)) |
2681 | pr_warn("Could not add fail-tracking for module: %s\n", name); | |
2682 | ||
f71afa6a LC |
2683 | /* |
2684 | * We are here only when the same module was being loaded. Do | |
2685 | * not try to load it again right now. It prevents long delays | |
2686 | * caused by serialized module load failures. It might happen | |
2687 | * when more devices of the same type trigger load of | |
2688 | * a particular module. | |
2689 | */ | |
2690 | if (old && old->state == MODULE_STATE_LIVE) | |
2691 | return -EEXIST; | |
2692 | return -EBUSY; | |
2693 | } | |
2694 | ||
a3535c7e RR |
2695 | /* |
2696 | * We try to place it in the list now to make sure it's unique before | |
2697 | * we dedicate too many resources. In particular, temporary percpu | |
2698 | * memory exhaustion. | |
2699 | */ | |
2700 | static int add_unformed_module(struct module *mod) | |
2701 | { | |
2702 | int err; | |
a3535c7e RR |
2703 | |
2704 | mod->state = MODULE_STATE_UNFORMED; | |
2705 | ||
a3535c7e | 2706 | mutex_lock(&module_mutex); |
df3e764d | 2707 | err = module_patient_check_exists(mod->name, FAIL_DUP_MOD_LOAD); |
f71afa6a | 2708 | if (err) |
a3535c7e | 2709 | goto out; |
f71afa6a | 2710 | |
4f666546 | 2711 | mod_update_bounds(mod); |
a3535c7e | 2712 | list_add_rcu(&mod->list, &modules); |
93c2e105 | 2713 | mod_tree_insert(mod); |
a3535c7e RR |
2714 | err = 0; |
2715 | ||
2716 | out: | |
2717 | mutex_unlock(&module_mutex); | |
a3535c7e RR |
2718 | return err; |
2719 | } | |
2720 | ||
2721 | static int complete_formation(struct module *mod, struct load_info *info) | |
2722 | { | |
2723 | int err; | |
2724 | ||
2725 | mutex_lock(&module_mutex); | |
2726 | ||
2727 | /* Find duplicate symbols (must be called under lock). */ | |
2d25bc55 | 2728 | err = verify_exported_symbols(mod); |
a3535c7e RR |
2729 | if (err < 0) |
2730 | goto out; | |
2731 | ||
89245600 | 2732 | /* These rely on module_mutex for list integrity. */ |
a3535c7e | 2733 | module_bug_finalize(info->hdr, info->sechdrs, mod); |
89245600 | 2734 | module_cfi_finalize(info->hdr, info->sechdrs, mod); |
a3535c7e | 2735 | |
444d13ff | 2736 | module_enable_ro(mod, false); |
85c898db | 2737 | module_enable_nx(mod); |
af742623 | 2738 | module_enable_x(mod); |
4982223e | 2739 | |
24b9f0d2 SS |
2740 | /* |
2741 | * Mark state as coming so strong_try_module_get() ignores us, | |
2742 | * but kallsyms etc. can see us. | |
2743 | */ | |
a3535c7e | 2744 | mod->state = MODULE_STATE_COMING; |
4982223e RR |
2745 | mutex_unlock(&module_mutex); |
2746 | ||
4982223e | 2747 | return 0; |
a3535c7e RR |
2748 | |
2749 | out: | |
2750 | mutex_unlock(&module_mutex); | |
2751 | return err; | |
2752 | } | |
2753 | ||
4c973d16 JY |
2754 | static int prepare_coming_module(struct module *mod) |
2755 | { | |
7e545d6e JY |
2756 | int err; |
2757 | ||
4c973d16 | 2758 | ftrace_module_enable(mod); |
7e545d6e JY |
2759 | err = klp_module_coming(mod); |
2760 | if (err) | |
2761 | return err; | |
2762 | ||
59cc8e0a PZ |
2763 | err = blocking_notifier_call_chain_robust(&module_notify_list, |
2764 | MODULE_STATE_COMING, MODULE_STATE_GOING, mod); | |
2765 | err = notifier_to_errno(err); | |
2766 | if (err) | |
2767 | klp_module_going(mod); | |
2768 | ||
2769 | return err; | |
4c973d16 JY |
2770 | } |
2771 | ||
ecc86170 LR |
2772 | static int unknown_module_param_cb(char *param, char *val, const char *modname, |
2773 | void *arg) | |
54041d8a | 2774 | { |
f2411da7 LR |
2775 | struct module *mod = arg; |
2776 | int ret; | |
2777 | ||
2778 | if (strcmp(param, "async_probe") == 0) { | |
fbed4fea | 2779 | if (kstrtobool(val, &mod->async_probe_requested)) |
ae39e9ed | 2780 | mod->async_probe_requested = true; |
f2411da7 LR |
2781 | return 0; |
2782 | } | |
2783 | ||
6da0b565 | 2784 | /* Check for magic 'dyndbg' arg */ |
f2411da7 | 2785 | ret = ddebug_dyndbg_module_param_cb(param, val, modname); |
bddb12b3 AM |
2786 | if (ret != 0) |
2787 | pr_warn("%s: unknown parameter '%s' ignored\n", modname, param); | |
54041d8a RR |
2788 | return 0; |
2789 | } | |
2790 | ||
85e6f61c LC |
2791 | /* Module within temporary copy, this doesn't do any allocation */ |
2792 | static int early_mod_check(struct load_info *info, int flags) | |
2793 | { | |
2794 | int err; | |
2795 | ||
2796 | /* | |
2797 | * Now that we know we have the correct module name, check | |
2798 | * if it's blacklisted. | |
2799 | */ | |
2800 | if (blacklisted(info->name)) { | |
2801 | pr_err("Module %s is blacklisted\n", info->name); | |
2802 | return -EPERM; | |
2803 | } | |
2804 | ||
2805 | err = rewrite_section_headers(info, flags); | |
2806 | if (err) | |
2807 | return err; | |
2808 | ||
2809 | /* Check module struct version now, before we try to use module. */ | |
2810 | if (!check_modstruct_version(info, info->mod)) | |
2811 | return -ENOEXEC; | |
2812 | ||
02da2cba LC |
2813 | err = check_modinfo(info->mod, info, flags); |
2814 | if (err) | |
2815 | return err; | |
2816 | ||
064f4536 LC |
2817 | mutex_lock(&module_mutex); |
2818 | err = module_patient_check_exists(info->mod->name, FAIL_DUP_MOD_BECOMING); | |
2819 | mutex_unlock(&module_mutex); | |
2820 | ||
2821 | return err; | |
85e6f61c LC |
2822 | } |
2823 | ||
24b9f0d2 SS |
2824 | /* |
2825 | * Allocate and load the module: note that size of section 0 is always | |
2826 | * zero, and we rely on this for optional sections. | |
2827 | */ | |
2f3238ae RR |
2828 | static int load_module(struct load_info *info, const char __user *uargs, |
2829 | int flags) | |
d913188c | 2830 | { |
a3535c7e | 2831 | struct module *mod; |
df3e764d | 2832 | bool module_allocated = false; |
5fdc7db6 | 2833 | long err = 0; |
51e158c1 | 2834 | char *after_dashes; |
d913188c | 2835 | |
ec2a2959 FL |
2836 | /* |
2837 | * Do the signature check (if any) first. All that | |
2838 | * the signature check needs is info->len, it does | |
2839 | * not need any of the section info. That can be | |
2840 | * set up later. This will minimize the chances | |
2841 | * of a corrupt module causing problems before | |
2842 | * we even get to the signature check. | |
2843 | * | |
2844 | * The check will also adjust info->len by stripping | |
2845 | * off the sig length at the end of the module, making | |
2846 | * checks against info->len more correct. | |
2847 | */ | |
2848 | err = module_sig_check(info, flags); | |
2849 | if (err) | |
2850 | goto free_copy; | |
2851 | ||
2852 | /* | |
2853 | * Do basic sanity checks against the ELF header and | |
3d40bb90 LC |
2854 | * sections. Cache useful sections and set the |
2855 | * info->mod to the userspace passed struct module. | |
ec2a2959 | 2856 | */ |
3d40bb90 | 2857 | err = elf_validity_cache_copy(info, flags); |
5fdc7db6 JY |
2858 | if (err) |
2859 | goto free_copy; | |
2860 | ||
85e6f61c | 2861 | err = early_mod_check(info, flags); |
d913188c | 2862 | if (err) |
34e1169d | 2863 | goto free_copy; |
d913188c RR |
2864 | |
2865 | /* Figure out module layout, and allocate all the memory. */ | |
2f3238ae | 2866 | mod = layout_and_allocate(info, flags); |
65b8a9b4 LT |
2867 | if (IS_ERR(mod)) { |
2868 | err = PTR_ERR(mod); | |
d913188c | 2869 | goto free_copy; |
1da177e4 | 2870 | } |
1da177e4 | 2871 | |
df3e764d LC |
2872 | module_allocated = true; |
2873 | ||
ca86cad7 RGB |
2874 | audit_log_kern_module(mod->name); |
2875 | ||
a3535c7e RR |
2876 | /* Reserve our place in the list. */ |
2877 | err = add_unformed_module(mod); | |
2878 | if (err) | |
1fb9341a | 2879 | goto free_module; |
1fb9341a | 2880 | |
a12b9451 LC |
2881 | /* |
2882 | * We are tainting your kernel if your module gets into | |
2883 | * the modules linked list somehow. | |
a12b9451 LC |
2884 | */ |
2885 | module_augment_kernel_taints(mod, info); | |
106a4ee2 | 2886 | |
8d8022e8 | 2887 | /* To avoid stressing percpu allocator, do this once we're unique. */ |
9eb76d77 | 2888 | err = percpu_modalloc(mod, info); |
8d8022e8 RR |
2889 | if (err) |
2890 | goto unlink_mod; | |
2891 | ||
49668688 | 2892 | /* Now module is in final location, initialize linked lists, etc. */ |
9f85a4bb RR |
2893 | err = module_unload_init(mod); |
2894 | if (err) | |
1fb9341a | 2895 | goto unlink_mod; |
1da177e4 | 2896 | |
cf2fde7b | 2897 | init_param_lock(mod); |
b51d23e4 | 2898 | |
24b9f0d2 SS |
2899 | /* |
2900 | * Now we've got everything in the final locations, we can | |
2901 | * find optional sections. | |
2902 | */ | |
eb3057df FH |
2903 | err = find_module_sections(mod, info); |
2904 | if (err) | |
2905 | goto free_unload; | |
9b37ccfc | 2906 | |
419e1a20 | 2907 | err = check_export_symbol_versions(mod); |
22e268eb RR |
2908 | if (err) |
2909 | goto free_unload; | |
9841d61d | 2910 | |
c988d2b2 | 2911 | /* Set up MODINFO_ATTR fields */ |
34e1169d | 2912 | setup_modinfo(mod, info); |
c988d2b2 | 2913 | |
1da177e4 | 2914 | /* Fix up syms, so that st_value is a pointer to location. */ |
34e1169d | 2915 | err = simplify_symbols(mod, info); |
1da177e4 | 2916 | if (err < 0) |
d913188c | 2917 | goto free_modinfo; |
1da177e4 | 2918 | |
34e1169d | 2919 | err = apply_relocations(mod, info); |
22e268eb | 2920 | if (err < 0) |
d913188c | 2921 | goto free_modinfo; |
1da177e4 | 2922 | |
34e1169d | 2923 | err = post_relocation(mod, info); |
1da177e4 | 2924 | if (err < 0) |
d913188c | 2925 | goto free_modinfo; |
1da177e4 | 2926 | |
22e268eb | 2927 | flush_module_icache(mod); |
378bac82 | 2928 | |
6526c534 RR |
2929 | /* Now copy in args */ |
2930 | mod->args = strndup_user(uargs, ~0UL >> 1); | |
2931 | if (IS_ERR(mod->args)) { | |
2932 | err = PTR_ERR(mod->args); | |
2933 | goto free_arch_cleanup; | |
2934 | } | |
8d3b33f6 | 2935 | |
9294523e | 2936 | init_build_id(mod, info); |
ff49d74a | 2937 | |
a949ae56 SRRH |
2938 | /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */ |
2939 | ftrace_module_init(mod); | |
2940 | ||
a3535c7e RR |
2941 | /* Finally it's fully formed, ready to start executing. */ |
2942 | err = complete_formation(mod, info); | |
2943 | if (err) | |
1fb9341a | 2944 | goto ddebug_cleanup; |
be593f4c | 2945 | |
4c973d16 JY |
2946 | err = prepare_coming_module(mod); |
2947 | if (err) | |
2948 | goto bug_cleanup; | |
2949 | ||
ae39e9ed SK |
2950 | mod->async_probe_requested = async_probe; |
2951 | ||
51f3d0f4 | 2952 | /* Module is ready to execute: parsing args may do that. */ |
51e158c1 | 2953 | after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, |
4355efbd | 2954 | -32768, 32767, mod, |
ecc86170 | 2955 | unknown_module_param_cb); |
51e158c1 RR |
2956 | if (IS_ERR(after_dashes)) { |
2957 | err = PTR_ERR(after_dashes); | |
4c973d16 | 2958 | goto coming_cleanup; |
51e158c1 RR |
2959 | } else if (after_dashes) { |
2960 | pr_warn("%s: parameters '%s' after `--' ignored\n", | |
2961 | mod->name, after_dashes); | |
2962 | } | |
1da177e4 | 2963 | |
ca86cad7 | 2964 | /* Link in to sysfs. */ |
34e1169d | 2965 | err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); |
1da177e4 | 2966 | if (err < 0) |
4c973d16 | 2967 | goto coming_cleanup; |
80a3d1bb | 2968 | |
1ce15ef4 JY |
2969 | if (is_livepatch_module(mod)) { |
2970 | err = copy_module_elf(mod, info); | |
2971 | if (err < 0) | |
2972 | goto sysfs_cleanup; | |
2973 | } | |
2974 | ||
48fd1188 | 2975 | /* Get rid of temporary copy. */ |
b1ae6dc4 | 2976 | free_copy(info, flags); |
1da177e4 LT |
2977 | |
2978 | /* Done! */ | |
51f3d0f4 | 2979 | trace_module_load(mod); |
34e1169d KC |
2980 | |
2981 | return do_init_module(mod); | |
1da177e4 | 2982 | |
1ce15ef4 JY |
2983 | sysfs_cleanup: |
2984 | mod_sysfs_teardown(mod); | |
4c973d16 | 2985 | coming_cleanup: |
885a78d4 | 2986 | mod->state = MODULE_STATE_GOING; |
a5544880 | 2987 | destroy_params(mod->kp, mod->num_kp); |
4c973d16 JY |
2988 | blocking_notifier_call_chain(&module_notify_list, |
2989 | MODULE_STATE_GOING, mod); | |
7e545d6e | 2990 | klp_module_going(mod); |
1fb9341a | 2991 | bug_cleanup: |
5e8ed280 | 2992 | mod->state = MODULE_STATE_GOING; |
1fb9341a | 2993 | /* module_bug_cleanup needs module_mutex protection */ |
75676500 | 2994 | mutex_lock(&module_mutex); |
5336377d | 2995 | module_bug_cleanup(mod); |
ee61abb3 | 2996 | mutex_unlock(&module_mutex); |
ff7e0055 | 2997 | |
a3535c7e | 2998 | ddebug_cleanup: |
1323eac7 | 2999 | ftrace_release_mod(mod); |
cb2f5536 | 3000 | synchronize_rcu(); |
6526c534 RR |
3001 | kfree(mod->args); |
3002 | free_arch_cleanup: | |
1da177e4 | 3003 | module_arch_cleanup(mod); |
d913188c | 3004 | free_modinfo: |
a263f776 | 3005 | free_modinfo(mod); |
22e268eb | 3006 | free_unload: |
1da177e4 | 3007 | module_unload_free(mod); |
1fb9341a RR |
3008 | unlink_mod: |
3009 | mutex_lock(&module_mutex); | |
3010 | /* Unlink carefully: kallsyms could be walking list. */ | |
3011 | list_del_rcu(&mod->list); | |
758556bd | 3012 | mod_tree_remove(mod); |
1fb9341a | 3013 | wake_up_all(&module_wq); |
0be964be | 3014 | /* Wait for RCU-sched synchronizing before releasing mod->list. */ |
cb2f5536 | 3015 | synchronize_rcu(); |
1fb9341a | 3016 | mutex_unlock(&module_mutex); |
d913188c | 3017 | free_module: |
df3e764d | 3018 | mod_stat_bump_invalid(info, flags); |
35a9393c | 3019 | /* Free lock-classes; relies on the preceding sync_rcu() */ |
ac3b4328 SL |
3020 | for_class_mod_mem_type(type, core_data) { |
3021 | lockdep_free_key_range(mod->mem[type].base, | |
3022 | mod->mem[type].size); | |
3023 | } | |
35a9393c | 3024 | |
34e1169d | 3025 | module_deallocate(mod, info); |
d913188c | 3026 | free_copy: |
df3e764d LC |
3027 | /* |
3028 | * The info->len is always set. We distinguish between | |
3029 | * failures once the proper module was allocated and | |
3030 | * before that. | |
3031 | */ | |
3032 | if (!module_allocated) | |
3033 | mod_stat_bump_becoming(info, flags); | |
b1ae6dc4 | 3034 | free_copy(info, flags); |
34e1169d | 3035 | return err; |
b99b87f7 PO |
3036 | } |
3037 | ||
17da2bd9 HC |
3038 | SYSCALL_DEFINE3(init_module, void __user *, umod, |
3039 | unsigned long, len, const char __user *, uargs) | |
1da177e4 | 3040 | { |
34e1169d KC |
3041 | int err; |
3042 | struct load_info info = { }; | |
1da177e4 | 3043 | |
34e1169d KC |
3044 | err = may_init_module(); |
3045 | if (err) | |
3046 | return err; | |
1da177e4 | 3047 | |
34e1169d KC |
3048 | pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n", |
3049 | umod, len, uargs); | |
1da177e4 | 3050 | |
34e1169d | 3051 | err = copy_module_from_user(umod, len, &info); |
df3e764d LC |
3052 | if (err) { |
3053 | mod_stat_inc(&failed_kreads); | |
3054 | mod_stat_add_long(len, &invalid_kread_bytes); | |
34e1169d | 3055 | return err; |
df3e764d | 3056 | } |
1da177e4 | 3057 | |
2f3238ae | 3058 | return load_module(&info, uargs, 0); |
34e1169d | 3059 | } |
94462ad3 | 3060 | |
9b9879fc LT |
3061 | struct idempotent { |
3062 | const void *cookie; | |
3063 | struct hlist_node entry; | |
3064 | struct completion complete; | |
3065 | int ret; | |
3066 | }; | |
3067 | ||
3068 | #define IDEM_HASH_BITS 8 | |
3069 | static struct hlist_head idem_hash[1 << IDEM_HASH_BITS]; | |
3070 | static DEFINE_SPINLOCK(idem_lock); | |
3071 | ||
3072 | static bool idempotent(struct idempotent *u, const void *cookie) | |
3073 | { | |
3074 | int hash = hash_ptr(cookie, IDEM_HASH_BITS); | |
3075 | struct hlist_head *head = idem_hash + hash; | |
3076 | struct idempotent *existing; | |
3077 | bool first; | |
3078 | ||
3079 | u->ret = 0; | |
3080 | u->cookie = cookie; | |
3081 | init_completion(&u->complete); | |
3082 | ||
3083 | spin_lock(&idem_lock); | |
3084 | first = true; | |
3085 | hlist_for_each_entry(existing, head, entry) { | |
3086 | if (existing->cookie != cookie) | |
3087 | continue; | |
3088 | first = false; | |
3089 | break; | |
3090 | } | |
3091 | hlist_add_head(&u->entry, idem_hash + hash); | |
3092 | spin_unlock(&idem_lock); | |
3093 | ||
3094 | return !first; | |
3095 | } | |
3096 | ||
3097 | /* | |
3098 | * We were the first one with 'cookie' on the list, and we ended | |
3099 | * up completing the operation. We now need to walk the list, | |
3100 | * remove everybody - which includes ourselves - fill in the return | |
3101 | * value, and then complete the operation. | |
3102 | */ | |
f1962207 | 3103 | static int idempotent_complete(struct idempotent *u, int ret) |
9b9879fc LT |
3104 | { |
3105 | const void *cookie = u->cookie; | |
3106 | int hash = hash_ptr(cookie, IDEM_HASH_BITS); | |
3107 | struct hlist_head *head = idem_hash + hash; | |
3108 | struct hlist_node *next; | |
3109 | struct idempotent *pos; | |
3110 | ||
3111 | spin_lock(&idem_lock); | |
3112 | hlist_for_each_entry_safe(pos, next, head, entry) { | |
3113 | if (pos->cookie != cookie) | |
3114 | continue; | |
3115 | hlist_del(&pos->entry); | |
3116 | pos->ret = ret; | |
3117 | complete(&pos->complete); | |
3118 | } | |
3119 | spin_unlock(&idem_lock); | |
f1962207 | 3120 | return ret; |
9b9879fc LT |
3121 | } |
3122 | ||
054a7300 | 3123 | static int init_module_from_file(struct file *f, const char __user * uargs, int flags) |
34e1169d | 3124 | { |
34e1169d | 3125 | struct load_info info = { }; |
b1ae6dc4 | 3126 | void *buf = NULL; |
f1962207 | 3127 | int len; |
9b9879fc | 3128 | |
054a7300 | 3129 | len = kernel_read_file(f, 0, &buf, INT_MAX, NULL, READING_MODULE); |
df3e764d LC |
3130 | if (len < 0) { |
3131 | mod_stat_inc(&failed_kreads); | |
b1ae6dc4 | 3132 | return len; |
df3e764d | 3133 | } |
b1ae6dc4 DT |
3134 | |
3135 | if (flags & MODULE_INIT_COMPRESSED_FILE) { | |
054a7300 | 3136 | int err = module_decompress(&info, buf, len); |
b1ae6dc4 | 3137 | vfree(buf); /* compressed data is no longer needed */ |
df3e764d LC |
3138 | if (err) { |
3139 | mod_stat_inc(&failed_decompress); | |
3140 | mod_stat_add_long(len, &invalid_decompress_bytes); | |
b1ae6dc4 | 3141 | return err; |
df3e764d | 3142 | } |
b1ae6dc4 DT |
3143 | } else { |
3144 | info.hdr = buf; | |
3145 | info.len = len; | |
3146 | } | |
1da177e4 | 3147 | |
f1962207 LT |
3148 | return load_module(&info, uargs, flags); |
3149 | } | |
3150 | ||
3151 | static int idempotent_init_module(struct file *f, const char __user * uargs, int flags) | |
3152 | { | |
3153 | struct idempotent idem; | |
3154 | ||
3155 | if (!f || !(f->f_mode & FMODE_READ)) | |
3156 | return -EBADF; | |
3157 | ||
3158 | /* See if somebody else is doing the operation? */ | |
3159 | if (idempotent(&idem, file_inode(f))) { | |
3160 | wait_for_completion(&idem.complete); | |
3161 | return idem.ret; | |
3162 | } | |
3163 | ||
3164 | /* Otherwise, we'll do it and complete others */ | |
3165 | return idempotent_complete(&idem, | |
3166 | init_module_from_file(f, uargs, flags)); | |
1da177e4 LT |
3167 | } |
3168 | ||
054a7300 LT |
3169 | SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) |
3170 | { | |
3171 | int err; | |
3172 | struct fd f; | |
3173 | ||
3174 | err = may_init_module(); | |
3175 | if (err) | |
3176 | return err; | |
3177 | ||
3178 | pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags); | |
3179 | ||
3180 | if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS | |
3181 | |MODULE_INIT_IGNORE_VERMAGIC | |
3182 | |MODULE_INIT_COMPRESSED_FILE)) | |
3183 | return -EINVAL; | |
3184 | ||
3185 | f = fdget(fd); | |
f1962207 | 3186 | err = idempotent_init_module(f.file, uargs, flags); |
054a7300 LT |
3187 | fdput(f); |
3188 | return err; | |
3189 | } | |
3190 | ||
7fd8329b | 3191 | /* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */ |
17dd25c2 | 3192 | char *module_flags(struct module *mod, char *buf, bool show_state) |
fa3ba2e8 FM |
3193 | { |
3194 | int bx = 0; | |
3195 | ||
0d21b0e3 | 3196 | BUG_ON(mod->state == MODULE_STATE_UNFORMED); |
17dd25c2 AT |
3197 | if (!mod->taints && !show_state) |
3198 | goto out; | |
21aa9280 AV |
3199 | if (mod->taints || |
3200 | mod->state == MODULE_STATE_GOING || | |
3201 | mod->state == MODULE_STATE_COMING) { | |
fa3ba2e8 | 3202 | buf[bx++] = '('; |
c14e522b | 3203 | bx += module_flags_taint(mod->taints, buf + bx); |
21aa9280 | 3204 | /* Show a - for module-is-being-unloaded */ |
17dd25c2 | 3205 | if (mod->state == MODULE_STATE_GOING && show_state) |
21aa9280 AV |
3206 | buf[bx++] = '-'; |
3207 | /* Show a + for module-is-being-loaded */ | |
17dd25c2 | 3208 | if (mod->state == MODULE_STATE_COMING && show_state) |
21aa9280 | 3209 | buf[bx++] = '+'; |
fa3ba2e8 FM |
3210 | buf[bx++] = ')'; |
3211 | } | |
17dd25c2 | 3212 | out: |
fa3ba2e8 FM |
3213 | buf[bx] = '\0'; |
3214 | ||
3215 | return buf; | |
3216 | } | |
3217 | ||
1da177e4 LT |
3218 | /* Given an address, look for it in the module exception tables. */ |
3219 | const struct exception_table_entry *search_module_extables(unsigned long addr) | |
3220 | { | |
1da177e4 LT |
3221 | const struct exception_table_entry *e = NULL; |
3222 | struct module *mod; | |
3223 | ||
24da1cbf | 3224 | preempt_disable(); |
5ff22646 PZ |
3225 | mod = __module_address(addr); |
3226 | if (!mod) | |
3227 | goto out; | |
22a8bdeb | 3228 | |
5ff22646 PZ |
3229 | if (!mod->num_exentries) |
3230 | goto out; | |
3231 | ||
3232 | e = search_extable(mod->extable, | |
a94c33dd | 3233 | mod->num_exentries, |
5ff22646 PZ |
3234 | addr); |
3235 | out: | |
24da1cbf | 3236 | preempt_enable(); |
1da177e4 | 3237 | |
5ff22646 PZ |
3238 | /* |
3239 | * Now, if we found one, we are running inside it now, hence | |
3240 | * we cannot unload the module, hence no refcnt needed. | |
3241 | */ | |
1da177e4 LT |
3242 | return e; |
3243 | } | |
3244 | ||
2541743e SS |
3245 | /** |
3246 | * is_module_address() - is this address inside a module? | |
e610499e RR |
3247 | * @addr: the address to check. |
3248 | * | |
3249 | * See is_module_text_address() if you simply want to see if the address | |
3250 | * is code (not data). | |
4d435f9d | 3251 | */ |
e610499e | 3252 | bool is_module_address(unsigned long addr) |
4d435f9d | 3253 | { |
e610499e | 3254 | bool ret; |
4d435f9d | 3255 | |
24da1cbf | 3256 | preempt_disable(); |
e610499e | 3257 | ret = __module_address(addr) != NULL; |
24da1cbf | 3258 | preempt_enable(); |
4d435f9d | 3259 | |
e610499e | 3260 | return ret; |
4d435f9d IM |
3261 | } |
3262 | ||
2541743e SS |
3263 | /** |
3264 | * __module_address() - get the module which contains an address. | |
e610499e RR |
3265 | * @addr: the address. |
3266 | * | |
3267 | * Must be called with preempt disabled or module mutex held so that | |
3268 | * module doesn't get freed during this. | |
3269 | */ | |
714f83d5 | 3270 | struct module *__module_address(unsigned long addr) |
1da177e4 LT |
3271 | { |
3272 | struct module *mod; | |
3273 | ||
01dc0386 | 3274 | if (addr >= mod_tree.addr_min && addr <= mod_tree.addr_max) |
ac3b4328 SL |
3275 | goto lookup; |
3276 | ||
01dc0386 | 3277 | #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC |
ac3b4328 SL |
3278 | if (addr >= mod_tree.data_addr_min && addr <= mod_tree.data_addr_max) |
3279 | goto lookup; | |
01dc0386 | 3280 | #endif |
3a642e99 | 3281 | |
ac3b4328 SL |
3282 | return NULL; |
3283 | ||
3284 | lookup: | |
0be964be PZ |
3285 | module_assert_mutex_or_preempt(); |
3286 | ||
ac3b4328 | 3287 | mod = mod_find(addr, &mod_tree); |
93c2e105 PZ |
3288 | if (mod) { |
3289 | BUG_ON(!within_module(addr, mod)); | |
0d21b0e3 | 3290 | if (mod->state == MODULE_STATE_UNFORMED) |
93c2e105 | 3291 | mod = NULL; |
0d21b0e3 | 3292 | } |
93c2e105 | 3293 | return mod; |
1da177e4 LT |
3294 | } |
3295 | ||
2541743e SS |
3296 | /** |
3297 | * is_module_text_address() - is this address inside module code? | |
e610499e RR |
3298 | * @addr: the address to check. |
3299 | * | |
3300 | * See is_module_address() if you simply want to see if the address is | |
3301 | * anywhere in a module. See kernel_text_address() for testing if an | |
3302 | * address corresponds to kernel or module code. | |
3303 | */ | |
3304 | bool is_module_text_address(unsigned long addr) | |
3305 | { | |
3306 | bool ret; | |
3307 | ||
3308 | preempt_disable(); | |
3309 | ret = __module_text_address(addr) != NULL; | |
3310 | preempt_enable(); | |
3311 | ||
3312 | return ret; | |
3313 | } | |
3314 | ||
2541743e SS |
3315 | /** |
3316 | * __module_text_address() - get the module whose code contains an address. | |
e610499e RR |
3317 | * @addr: the address. |
3318 | * | |
3319 | * Must be called with preempt disabled or module mutex held so that | |
3320 | * module doesn't get freed during this. | |
3321 | */ | |
3322 | struct module *__module_text_address(unsigned long addr) | |
3323 | { | |
3324 | struct module *mod = __module_address(addr); | |
3325 | if (mod) { | |
3326 | /* Make sure it's within the text section. */ | |
ac3b4328 SL |
3327 | if (!within_module_mem_type(addr, mod, MOD_TEXT) && |
3328 | !within_module_mem_type(addr, mod, MOD_INIT_TEXT)) | |
e610499e RR |
3329 | mod = NULL; |
3330 | } | |
3331 | return mod; | |
3332 | } | |
3333 | ||
1da177e4 LT |
3334 | /* Don't grab lock, we're oopsing. */ |
3335 | void print_modules(void) | |
3336 | { | |
3337 | struct module *mod; | |
7fd8329b | 3338 | char buf[MODULE_FLAGS_BUF_SIZE]; |
1da177e4 | 3339 | |
b231125a | 3340 | printk(KERN_DEFAULT "Modules linked in:"); |
d72b3751 AK |
3341 | /* Most callers should already have preempt disabled, but make sure */ |
3342 | preempt_disable(); | |
0d21b0e3 RR |
3343 | list_for_each_entry_rcu(mod, &modules, list) { |
3344 | if (mod->state == MODULE_STATE_UNFORMED) | |
3345 | continue; | |
17dd25c2 | 3346 | pr_cont(" %s%s", mod->name, module_flags(mod, buf, true)); |
0d21b0e3 | 3347 | } |
99bd9956 AT |
3348 | |
3349 | print_unloaded_tainted_modules(); | |
d72b3751 | 3350 | preempt_enable(); |
6f1dae1d AT |
3351 | if (last_unloaded_module.name[0]) |
3352 | pr_cont(" [last unloaded: %s%s]", last_unloaded_module.name, | |
3353 | last_unloaded_module.taints); | |
27bba4d6 | 3354 | pr_cont("\n"); |
1da177e4 | 3355 | } |
df3e764d LC |
3356 | |
3357 | #ifdef CONFIG_MODULE_DEBUGFS | |
3358 | struct dentry *mod_debugfs_root; | |
3359 | ||
3360 | static int module_debugfs_init(void) | |
3361 | { | |
3362 | mod_debugfs_root = debugfs_create_dir("modules", NULL); | |
3363 | return 0; | |
3364 | } | |
3365 | module_init(module_debugfs_init); | |
3366 | #endif |