1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Kernel module help for x86.
3 Copyright (C) 2001 Rusty Russell.
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/moduleloader.h>
10 #include <linux/elf.h>
11 #include <linux/vmalloc.h>
13 #include <linux/string.h>
14 #include <linux/kernel.h>
15 #include <linux/kasan.h>
16 #include <linux/bug.h>
18 #include <linux/gfp.h>
19 #include <linux/jump_label.h>
20 #include <linux/random.h>
21 #include <linux/memory.h>
23 #include <asm/text-patching.h>
25 #include <asm/setup.h>
26 #include <asm/unwind.h>
29 #define DEBUGP(fmt, ...) \
30 printk(KERN_DEBUG fmt, ##__VA_ARGS__)
32 #define DEBUGP(fmt, ...) \
35 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
39 #ifdef CONFIG_RANDOMIZE_BASE
40 static unsigned long module_load_offset;
42 /* Mutex protects the module_load_offset. */
43 static DEFINE_MUTEX(module_kaslr_mutex);
45 static unsigned long int get_module_load_offset(void)
47 if (kaslr_enabled()) {
48 mutex_lock(&module_kaslr_mutex);
50 * Calculate the module_load_offset the first time this
51 * code is called. Once calculated it stays the same until
54 if (module_load_offset == 0)
56 get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
57 mutex_unlock(&module_kaslr_mutex);
59 return module_load_offset;
62 static unsigned long int get_module_load_offset(void)
68 void *module_alloc(unsigned long size)
70 gfp_t gfp_mask = GFP_KERNEL;
73 if (PAGE_ALIGN(size) > MODULES_LEN)
76 p = __vmalloc_node_range(size, MODULE_ALIGN,
77 MODULES_VADDR + get_module_load_offset(),
78 MODULES_END, gfp_mask, PAGE_KERNEL,
79 VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK,
80 NUMA_NO_NODE, __builtin_return_address(0));
82 if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
91 int apply_relocate(Elf32_Shdr *sechdrs,
93 unsigned int symindex,
98 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
102 DEBUGP("Applying relocate section %u to %u\n",
103 relsec, sechdrs[relsec].sh_info);
104 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
105 /* This is where to make the change */
106 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
108 /* This is the symbol it is referring to. Note that all
109 undefined symbols have been resolved. */
110 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
111 + ELF32_R_SYM(rel[i].r_info);
113 switch (ELF32_R_TYPE(rel[i].r_info)) {
115 /* We add the value into the location given */
116 *location += sym->st_value;
120 /* Add the value, subtract its position */
121 *location += sym->st_value - (uint32_t)location;
124 pr_err("%s: Unknown relocation: %u\n",
125 me->name, ELF32_R_TYPE(rel[i].r_info));
132 static int __apply_relocate_add(Elf64_Shdr *sechdrs,
134 unsigned int symindex,
137 void *(*write)(void *dest, const void *src, size_t len))
140 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
145 DEBUGP("Applying relocate section %u to %u\n",
146 relsec, sechdrs[relsec].sh_info);
147 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
148 /* This is where to make the change */
149 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
152 /* This is the symbol it is referring to. Note that all
153 undefined symbols have been resolved. */
154 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
155 + ELF64_R_SYM(rel[i].r_info);
157 DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
158 (int)ELF64_R_TYPE(rel[i].r_info),
159 sym->st_value, rel[i].r_addend, (u64)loc);
161 val = sym->st_value + rel[i].r_addend;
163 switch (ELF64_R_TYPE(rel[i].r_info)) {
167 if (*(u64 *)loc != 0)
168 goto invalid_relocation;
172 if (*(u32 *)loc != 0)
173 goto invalid_relocation;
175 if (val != *(u32 *)loc)
179 if (*(s32 *)loc != 0)
180 goto invalid_relocation;
182 if ((s64)val != *(s32 *)loc)
187 if (*(u32 *)loc != 0)
188 goto invalid_relocation;
192 if ((s64)val != *(s32 *)loc)
197 if (*(u64 *)loc != 0)
198 goto invalid_relocation;
203 pr_err("%s: Unknown rela relocation: %llu\n",
204 me->name, ELF64_R_TYPE(rel[i].r_info));
211 pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
212 (int)ELF64_R_TYPE(rel[i].r_info), loc, val);
216 pr_err("overflow in relocation type %d val %Lx\n",
217 (int)ELF64_R_TYPE(rel[i].r_info), val);
218 pr_err("`%s' likely not compiled with -mcmodel=kernel\n",
223 int apply_relocate_add(Elf64_Shdr *sechdrs,
225 unsigned int symindex,
230 bool early = me->state == MODULE_STATE_UNFORMED;
231 void *(*write)(void *, const void *, size_t) = memcpy;
235 mutex_lock(&text_mutex);
238 ret = __apply_relocate_add(sechdrs, strtab, symindex, relsec, me,
243 mutex_unlock(&text_mutex);
251 int module_finalize(const Elf_Ehdr *hdr,
252 const Elf_Shdr *sechdrs,
255 const Elf_Shdr *s, *alt = NULL, *locks = NULL,
256 *para = NULL, *orc = NULL, *orc_ip = NULL,
257 *retpolines = NULL, *returns = NULL, *ibt_endbr = NULL,
258 *calls = NULL, *cfi = NULL;
259 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
261 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
262 if (!strcmp(".altinstructions", secstrings + s->sh_name))
264 if (!strcmp(".smp_locks", secstrings + s->sh_name))
266 if (!strcmp(".parainstructions", secstrings + s->sh_name))
268 if (!strcmp(".orc_unwind", secstrings + s->sh_name))
270 if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name))
272 if (!strcmp(".retpoline_sites", secstrings + s->sh_name))
274 if (!strcmp(".return_sites", secstrings + s->sh_name))
276 if (!strcmp(".call_sites", secstrings + s->sh_name))
278 if (!strcmp(".cfi_sites", secstrings + s->sh_name))
280 if (!strcmp(".ibt_endbr_seal", secstrings + s->sh_name))
285 * See alternative_instructions() for the ordering rules between the
286 * various patching types.
289 void *pseg = (void *)para->sh_addr;
290 apply_paravirt(pseg, pseg + para->sh_size);
292 if (retpolines || cfi) {
293 void *rseg = NULL, *cseg = NULL;
294 unsigned int rsize = 0, csize = 0;
297 rseg = (void *)retpolines->sh_addr;
298 rsize = retpolines->sh_size;
302 cseg = (void *)cfi->sh_addr;
303 csize = cfi->sh_size;
306 apply_fineibt(rseg, rseg + rsize, cseg, cseg + csize);
309 void *rseg = (void *)retpolines->sh_addr;
310 apply_retpolines(rseg, rseg + retpolines->sh_size);
313 void *rseg = (void *)returns->sh_addr;
314 apply_returns(rseg, rseg + returns->sh_size);
317 /* patch .altinstructions */
318 void *aseg = (void *)alt->sh_addr;
319 apply_alternatives(aseg, aseg + alt->sh_size);
322 struct callthunk_sites cs = {};
325 cs.call_start = (void *)calls->sh_addr;
326 cs.call_end = (void *)calls->sh_addr + calls->sh_size;
330 cs.pv_start = (void *)para->sh_addr;
331 cs.pv_end = (void *)para->sh_addr + para->sh_size;
334 callthunks_patch_module_calls(&cs, me);
337 void *iseg = (void *)ibt_endbr->sh_addr;
338 apply_ibt_endbr(iseg, iseg + ibt_endbr->sh_size);
341 void *lseg = (void *)locks->sh_addr;
342 void *text = me->core_layout.base;
343 void *text_end = text + me->core_layout.text_size;
344 alternatives_smp_module_add(me, me->name,
345 lseg, lseg + locks->sh_size,
350 unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size,
351 (void *)orc->sh_addr, orc->sh_size);
356 void module_arch_cleanup(struct module *mod)
358 alternatives_smp_module_del(mod);