1 /* Kernel module help for x86.
2 Copyright (C) 2001 Rusty Russell.
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/moduleloader.h>
22 #include <linux/elf.h>
23 #include <linux/vmalloc.h>
25 #include <linux/string.h>
26 #include <linux/kernel.h>
27 #include <linux/kasan.h>
28 #include <linux/bug.h>
30 #include <linux/gfp.h>
31 #include <linux/jump_label.h>
32 #include <linux/random.h>
34 #include <asm/text-patching.h>
36 #include <asm/pgtable.h>
37 #include <asm/setup.h>
38 #include <asm/unwind.h>
41 #define DEBUGP(fmt, ...) \
42 printk(KERN_DEBUG fmt, ##__VA_ARGS__)
44 #define DEBUGP(fmt, ...) \
47 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
51 #ifdef CONFIG_RANDOMIZE_BASE
52 static unsigned long module_load_offset;
54 /* Mutex protects the module_load_offset. */
55 static DEFINE_MUTEX(module_kaslr_mutex);
57 static unsigned long int get_module_load_offset(void)
59 if (kaslr_enabled()) {
60 mutex_lock(&module_kaslr_mutex);
62 * Calculate the module_load_offset the first time this
63 * code is called. Once calculated it stays the same until
66 if (module_load_offset == 0)
68 (get_random_int() % 1024 + 1) * PAGE_SIZE;
69 mutex_unlock(&module_kaslr_mutex);
71 return module_load_offset;
74 static unsigned long int get_module_load_offset(void)
80 void *module_alloc(unsigned long size)
84 if (PAGE_ALIGN(size) > MODULES_LEN)
87 p = __vmalloc_node_range(size, MODULE_ALIGN,
88 MODULES_VADDR + get_module_load_offset(),
89 MODULES_END, GFP_KERNEL,
90 PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
91 __builtin_return_address(0));
92 if (p && (kasan_module_alloc(p, size) < 0)) {
101 int apply_relocate(Elf32_Shdr *sechdrs,
103 unsigned int symindex,
108 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
112 DEBUGP("Applying relocate section %u to %u\n",
113 relsec, sechdrs[relsec].sh_info);
114 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
115 /* This is where to make the change */
116 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
118 /* This is the symbol it is referring to. Note that all
119 undefined symbols have been resolved. */
120 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
121 + ELF32_R_SYM(rel[i].r_info);
123 switch (ELF32_R_TYPE(rel[i].r_info)) {
125 /* We add the value into the location given */
126 *location += sym->st_value;
129 /* Add the value, subtract its position */
130 *location += sym->st_value - (uint32_t)location;
133 pr_err("%s: Unknown relocation: %u\n",
134 me->name, ELF32_R_TYPE(rel[i].r_info));
141 int apply_relocate_add(Elf64_Shdr *sechdrs,
143 unsigned int symindex,
148 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
153 DEBUGP("Applying relocate section %u to %u\n",
154 relsec, sechdrs[relsec].sh_info);
155 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
156 /* This is where to make the change */
157 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
160 /* This is the symbol it is referring to. Note that all
161 undefined symbols have been resolved. */
162 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
163 + ELF64_R_SYM(rel[i].r_info);
165 DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
166 (int)ELF64_R_TYPE(rel[i].r_info),
167 sym->st_value, rel[i].r_addend, (u64)loc);
169 val = sym->st_value + rel[i].r_addend;
171 switch (ELF64_R_TYPE(rel[i].r_info)) {
175 if (*(u64 *)loc != 0)
176 goto invalid_relocation;
180 if (*(u32 *)loc != 0)
181 goto invalid_relocation;
183 if (val != *(u32 *)loc)
187 if (*(s32 *)loc != 0)
188 goto invalid_relocation;
190 if ((s64)val != *(s32 *)loc)
195 if (*(u32 *)loc != 0)
196 goto invalid_relocation;
200 if ((s64)val != *(s32 *)loc)
205 if (*(u64 *)loc != 0)
206 goto invalid_relocation;
211 pr_err("%s: Unknown rela relocation: %llu\n",
212 me->name, ELF64_R_TYPE(rel[i].r_info));
219 pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
220 (int)ELF64_R_TYPE(rel[i].r_info), loc, val);
224 pr_err("overflow in relocation type %d val %Lx\n",
225 (int)ELF64_R_TYPE(rel[i].r_info), val);
226 pr_err("`%s' likely not compiled with -mcmodel=kernel\n",
232 int module_finalize(const Elf_Ehdr *hdr,
233 const Elf_Shdr *sechdrs,
236 const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
237 *para = NULL, *orc = NULL, *orc_ip = NULL;
238 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
240 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
241 if (!strcmp(".text", secstrings + s->sh_name))
243 if (!strcmp(".altinstructions", secstrings + s->sh_name))
245 if (!strcmp(".smp_locks", secstrings + s->sh_name))
247 if (!strcmp(".parainstructions", secstrings + s->sh_name))
249 if (!strcmp(".orc_unwind", secstrings + s->sh_name))
251 if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name))
256 /* patch .altinstructions */
257 void *aseg = (void *)alt->sh_addr;
258 apply_alternatives(aseg, aseg + alt->sh_size);
261 void *lseg = (void *)locks->sh_addr;
262 void *tseg = (void *)text->sh_addr;
263 alternatives_smp_module_add(me, me->name,
264 lseg, lseg + locks->sh_size,
265 tseg, tseg + text->sh_size);
269 void *pseg = (void *)para->sh_addr;
270 apply_paravirt(pseg, pseg + para->sh_size);
273 /* make jump label nops */
274 jump_label_apply_nops(me);
277 unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size,
278 (void *)orc->sh_addr, orc->sh_size);
283 void module_arch_cleanup(struct module *mod)
285 alternatives_smp_module_del(mod);