1 // SPDX-License-Identifier: GPL-2.0-only
3 * AArch64 loadable module support.
5 * Copyright (C) 2012 ARM Limited
10 #include <linux/bitops.h>
11 #include <linux/elf.h>
12 #include <linux/ftrace.h>
13 #include <linux/gfp.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
17 #include <linux/moduleloader.h>
18 #include <linux/vmalloc.h>
19 #include <asm/alternative.h>
21 #include <asm/sections.h>
23 void *module_alloc(unsigned long size)
25 u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
26 gfp_t gfp_mask = GFP_KERNEL;
29 /* Silence the initial allocation */
30 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
31 gfp_mask |= __GFP_NOWARN;
33 if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
34 IS_ENABLED(CONFIG_KASAN_SW_TAGS))
35 /* don't exceed the static module region - see below */
36 module_alloc_end = MODULES_END;
38 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
39 module_alloc_end, gfp_mask, PAGE_KERNEL, VM_DEFER_KMEMLEAK,
40 NUMA_NO_NODE, __builtin_return_address(0));
42 if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
43 (IS_ENABLED(CONFIG_KASAN_VMALLOC) ||
44 (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
45 !IS_ENABLED(CONFIG_KASAN_SW_TAGS))))
47 * KASAN without KASAN_VMALLOC can only deal with module
48 * allocations being served from the reserved module region,
49 * since the remainder of the vmalloc region is already
50 * backed by zero shadow pages, and punching holes into it
51 * is non-trivial. Since the module region is not randomized
52 * when KASAN is enabled without KASAN_VMALLOC, it is even
53 * less likely that the module region gets exhausted, so we
54 * can simply omit this fallback in that case.
56 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
57 module_alloc_base + SZ_2G, GFP_KERNEL,
58 PAGE_KERNEL, 0, NUMA_NO_NODE,
59 __builtin_return_address(0));
61 if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
66 /* Memory is intended to be executable, reset the pointer tag. */
67 return kasan_reset_tag(p);
70 enum aarch64_reloc_op {
77 static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
83 return val - (u64)place;
85 return (val & ~0xfff) - ((u64)place & ~0xfff);
90 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
94 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
96 s64 sval = do_reloc(op, place, val);
99 * The ELF psABI for AArch64 documents the 16-bit and 32-bit place
100 * relative and absolute relocations as having a range of [-2^15, 2^16)
101 * or [-2^31, 2^32), respectively. However, in order to be able to
102 * detect overflows reliably, we have to choose whether we interpret
103 * such quantities as signed or as unsigned, and stick with it.
104 * The way we organize our address space requires a signed
105 * interpretation of 32-bit relative references, so let's use that
106 * for all R_AARCH64_PRELxx relocations. This means our upper
107 * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
112 *(s16 *)place = sval;
115 if (sval < 0 || sval > U16_MAX)
119 if (sval < S16_MIN || sval > S16_MAX)
123 pr_err("Invalid 16-bit data relocation (%d)\n", op);
128 *(s32 *)place = sval;
131 if (sval < 0 || sval > U32_MAX)
135 if (sval < S32_MIN || sval > S32_MAX)
139 pr_err("Invalid 32-bit data relocation (%d)\n", op);
144 *(s64 *)place = sval;
147 pr_err("Invalid length (%d) for data relocation\n", len);
153 enum aarch64_insn_movw_imm_type {
154 AARCH64_INSN_IMM_MOVNZ,
155 AARCH64_INSN_IMM_MOVKZ,
158 static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
159 int lsb, enum aarch64_insn_movw_imm_type imm_type)
163 u32 insn = le32_to_cpu(*place);
165 sval = do_reloc(op, place, val);
168 if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
170 * For signed MOVW relocations, we have to manipulate the
171 * instruction encoding depending on whether or not the
172 * immediate is less than zero.
176 /* >=0: Set the instruction to MOVZ (opcode 10b). */
180 * <0: Set the instruction to MOVN (opcode 00b).
181 * Since we've masked the opcode already, we
182 * don't need to do anything other than
183 * inverting the new immediate field.
189 /* Update the instruction with the new encoding. */
190 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
191 *place = cpu_to_le32(insn);
199 static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
200 int lsb, int len, enum aarch64_insn_imm_type imm_type)
204 u32 insn = le32_to_cpu(*place);
206 /* Calculate the relocation value. */
207 sval = do_reloc(op, place, val);
210 /* Extract the value bits and shift them to bit 0. */
211 imm_mask = (BIT(lsb + len) - 1) >> lsb;
212 imm = sval & imm_mask;
214 /* Update the instruction's immediate field. */
215 insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
216 *place = cpu_to_le32(insn);
219 * Extract the upper value bits (including the sign bit) and
220 * shift them to bit 0.
222 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
225 * Overflow has occurred if the upper bits are not all equal to
226 * the sign bit of the value.
228 if ((u64)(sval + 1) >= 2)
234 static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
235 __le32 *place, u64 val)
239 if (!is_forbidden_offset_for_adrp(place))
240 return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
241 AARCH64_INSN_IMM_ADR);
243 /* patch ADRP to ADR if it is in range */
244 if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
245 AARCH64_INSN_IMM_ADR)) {
246 insn = le32_to_cpu(*place);
249 /* out of range for ADR -> emit a veneer */
250 val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
253 insn = aarch64_insn_gen_branch_imm((u64)place, val,
254 AARCH64_INSN_BRANCH_NOLINK);
257 *place = cpu_to_le32(insn);
261 int apply_relocate_add(Elf64_Shdr *sechdrs,
263 unsigned int symindex,
273 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
275 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
276 /* loc corresponds to P in the AArch64 ELF document. */
277 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
280 /* sym is the ELF symbol we're referring to. */
281 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
282 + ELF64_R_SYM(rel[i].r_info);
284 /* val corresponds to (S + A) in the AArch64 ELF document. */
285 val = sym->st_value + rel[i].r_addend;
287 /* Check for overflow by default. */
288 overflow_check = true;
290 /* Perform the static relocation. */
291 switch (ELF64_R_TYPE(rel[i].r_info)) {
292 /* Null relocations. */
298 /* Data relocations. */
299 case R_AARCH64_ABS64:
300 overflow_check = false;
301 ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
303 case R_AARCH64_ABS32:
304 ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
306 case R_AARCH64_ABS16:
307 ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
309 case R_AARCH64_PREL64:
310 overflow_check = false;
311 ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
313 case R_AARCH64_PREL32:
314 ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
316 case R_AARCH64_PREL16:
317 ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
320 /* MOVW instruction relocations. */
321 case R_AARCH64_MOVW_UABS_G0_NC:
322 overflow_check = false;
324 case R_AARCH64_MOVW_UABS_G0:
325 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
326 AARCH64_INSN_IMM_MOVKZ);
328 case R_AARCH64_MOVW_UABS_G1_NC:
329 overflow_check = false;
331 case R_AARCH64_MOVW_UABS_G1:
332 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
333 AARCH64_INSN_IMM_MOVKZ);
335 case R_AARCH64_MOVW_UABS_G2_NC:
336 overflow_check = false;
338 case R_AARCH64_MOVW_UABS_G2:
339 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
340 AARCH64_INSN_IMM_MOVKZ);
342 case R_AARCH64_MOVW_UABS_G3:
343 /* We're using the top bits so we can't overflow. */
344 overflow_check = false;
345 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
346 AARCH64_INSN_IMM_MOVKZ);
348 case R_AARCH64_MOVW_SABS_G0:
349 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
350 AARCH64_INSN_IMM_MOVNZ);
352 case R_AARCH64_MOVW_SABS_G1:
353 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
354 AARCH64_INSN_IMM_MOVNZ);
356 case R_AARCH64_MOVW_SABS_G2:
357 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
358 AARCH64_INSN_IMM_MOVNZ);
360 case R_AARCH64_MOVW_PREL_G0_NC:
361 overflow_check = false;
362 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
363 AARCH64_INSN_IMM_MOVKZ);
365 case R_AARCH64_MOVW_PREL_G0:
366 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
367 AARCH64_INSN_IMM_MOVNZ);
369 case R_AARCH64_MOVW_PREL_G1_NC:
370 overflow_check = false;
371 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
372 AARCH64_INSN_IMM_MOVKZ);
374 case R_AARCH64_MOVW_PREL_G1:
375 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
376 AARCH64_INSN_IMM_MOVNZ);
378 case R_AARCH64_MOVW_PREL_G2_NC:
379 overflow_check = false;
380 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
381 AARCH64_INSN_IMM_MOVKZ);
383 case R_AARCH64_MOVW_PREL_G2:
384 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
385 AARCH64_INSN_IMM_MOVNZ);
387 case R_AARCH64_MOVW_PREL_G3:
388 /* We're using the top bits so we can't overflow. */
389 overflow_check = false;
390 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
391 AARCH64_INSN_IMM_MOVNZ);
394 /* Immediate instruction relocations. */
395 case R_AARCH64_LD_PREL_LO19:
396 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
397 AARCH64_INSN_IMM_19);
399 case R_AARCH64_ADR_PREL_LO21:
400 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
401 AARCH64_INSN_IMM_ADR);
403 case R_AARCH64_ADR_PREL_PG_HI21_NC:
404 overflow_check = false;
406 case R_AARCH64_ADR_PREL_PG_HI21:
407 ovf = reloc_insn_adrp(me, sechdrs, loc, val);
408 if (ovf && ovf != -ERANGE)
411 case R_AARCH64_ADD_ABS_LO12_NC:
412 case R_AARCH64_LDST8_ABS_LO12_NC:
413 overflow_check = false;
414 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
415 AARCH64_INSN_IMM_12);
417 case R_AARCH64_LDST16_ABS_LO12_NC:
418 overflow_check = false;
419 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
420 AARCH64_INSN_IMM_12);
422 case R_AARCH64_LDST32_ABS_LO12_NC:
423 overflow_check = false;
424 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
425 AARCH64_INSN_IMM_12);
427 case R_AARCH64_LDST64_ABS_LO12_NC:
428 overflow_check = false;
429 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
430 AARCH64_INSN_IMM_12);
432 case R_AARCH64_LDST128_ABS_LO12_NC:
433 overflow_check = false;
434 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
435 AARCH64_INSN_IMM_12);
437 case R_AARCH64_TSTBR14:
438 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
439 AARCH64_INSN_IMM_14);
441 case R_AARCH64_CONDBR19:
442 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
443 AARCH64_INSN_IMM_19);
445 case R_AARCH64_JUMP26:
446 case R_AARCH64_CALL26:
447 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
448 AARCH64_INSN_IMM_26);
450 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
452 val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
455 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
456 26, AARCH64_INSN_IMM_26);
461 pr_err("module %s: unsupported RELA relocation: %llu\n",
462 me->name, ELF64_R_TYPE(rel[i].r_info));
466 if (overflow_check && ovf == -ERANGE)
474 pr_err("module %s: overflow in relocation type %d val %Lx\n",
475 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
479 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
480 const Elf_Shdr *sechdrs,
483 const Elf_Shdr *s, *se;
484 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
486 for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
487 if (strcmp(name, secstrs + s->sh_name) == 0)
494 static inline void __init_plt(struct plt_entry *plt, unsigned long addr)
496 *plt = get_plt_entry(addr, plt);
499 static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
500 const Elf_Shdr *sechdrs,
503 #if defined(CONFIG_ARM64_MODULE_PLTS) && defined(CONFIG_DYNAMIC_FTRACE)
505 struct plt_entry *plts;
507 s = find_section(hdr, sechdrs, ".text.ftrace_trampoline");
511 plts = (void *)s->sh_addr;
513 __init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
515 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
516 __init_plt(&plts[FTRACE_REGS_PLT_IDX], FTRACE_REGS_ADDR);
518 mod->arch.ftrace_trampolines = plts;
523 int module_finalize(const Elf_Ehdr *hdr,
524 const Elf_Shdr *sechdrs,
528 s = find_section(hdr, sechdrs, ".altinstructions");
530 apply_alternatives_module((void *)s->sh_addr, s->sh_size);
532 return module_init_ftrace_plt(hdr, sechdrs, me);