]> Git Repo - linux.git/blob - arch/arm64/kernel/module-plts.c
modules: catch concurrent module loads, treat them as idempotent
[linux.git] / arch / arm64 / kernel / module-plts.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2014-2017 Linaro Ltd. <[email protected]>
4  */
5
6 #include <linux/elf.h>
7 #include <linux/ftrace.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/moduleloader.h>
11 #include <linux/sort.h>
12
13 static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc,
14                                             enum aarch64_insn_register reg)
15 {
16         u32 adrp, add;
17
18         adrp = aarch64_insn_gen_adr(pc, dst, reg, AARCH64_INSN_ADR_TYPE_ADRP);
19         add = aarch64_insn_gen_add_sub_imm(reg, reg, dst % SZ_4K,
20                                            AARCH64_INSN_VARIANT_64BIT,
21                                            AARCH64_INSN_ADSB_ADD);
22
23         return (struct plt_entry){ cpu_to_le32(adrp), cpu_to_le32(add) };
24 }
25
26 struct plt_entry get_plt_entry(u64 dst, void *pc)
27 {
28         struct plt_entry plt;
29         static u32 br;
30
31         if (!br)
32                 br = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_16,
33                                                  AARCH64_INSN_BRANCH_NOLINK);
34
35         plt = __get_adrp_add_pair(dst, (u64)pc, AARCH64_INSN_REG_16);
36         plt.br = cpu_to_le32(br);
37
38         return plt;
39 }
40
41 static bool plt_entries_equal(const struct plt_entry *a,
42                               const struct plt_entry *b)
43 {
44         u64 p, q;
45
46         /*
47          * Check whether both entries refer to the same target:
48          * do the cheapest checks first.
49          * If the 'add' or 'br' opcodes are different, then the target
50          * cannot be the same.
51          */
52         if (a->add != b->add || a->br != b->br)
53                 return false;
54
55         p = ALIGN_DOWN((u64)a, SZ_4K);
56         q = ALIGN_DOWN((u64)b, SZ_4K);
57
58         /*
59          * If the 'adrp' opcodes are the same then we just need to check
60          * that they refer to the same 4k region.
61          */
62         if (a->adrp == b->adrp && p == q)
63                 return true;
64
65         return (p + aarch64_insn_adrp_get_offset(le32_to_cpu(a->adrp))) ==
66                (q + aarch64_insn_adrp_get_offset(le32_to_cpu(b->adrp)));
67 }
68
69 u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
70                           void *loc, const Elf64_Rela *rela,
71                           Elf64_Sym *sym)
72 {
73         struct mod_plt_sec *pltsec = !within_module_init((unsigned long)loc, mod) ?
74                                                 &mod->arch.core : &mod->arch.init;
75         struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
76         int i = pltsec->plt_num_entries;
77         int j = i - 1;
78         u64 val = sym->st_value + rela->r_addend;
79
80         if (is_forbidden_offset_for_adrp(&plt[i].adrp))
81                 i++;
82
83         plt[i] = get_plt_entry(val, &plt[i]);
84
85         /*
86          * Check if the entry we just created is a duplicate. Given that the
87          * relocations are sorted, this will be the last entry we allocated.
88          * (if one exists).
89          */
90         if (j >= 0 && plt_entries_equal(plt + i, plt + j))
91                 return (u64)&plt[j];
92
93         pltsec->plt_num_entries += i - j;
94         if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
95                 return 0;
96
97         return (u64)&plt[i];
98 }
99
100 #ifdef CONFIG_ARM64_ERRATUM_843419
101 u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
102                                 void *loc, u64 val)
103 {
104         struct mod_plt_sec *pltsec = !within_module_init((unsigned long)loc, mod) ?
105                                                 &mod->arch.core : &mod->arch.init;
106         struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
107         int i = pltsec->plt_num_entries++;
108         u32 br;
109         int rd;
110
111         if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
112                 return 0;
113
114         if (is_forbidden_offset_for_adrp(&plt[i].adrp))
115                 i = pltsec->plt_num_entries++;
116
117         /* get the destination register of the ADRP instruction */
118         rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD,
119                                           le32_to_cpup((__le32 *)loc));
120
121         br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4,
122                                          AARCH64_INSN_BRANCH_NOLINK);
123
124         plt[i] = __get_adrp_add_pair(val, (u64)&plt[i], rd);
125         plt[i].br = cpu_to_le32(br);
126
127         return (u64)&plt[i];
128 }
129 #endif
130
131 #define cmp_3way(a, b)  ((a) < (b) ? -1 : (a) > (b))
132
133 static int cmp_rela(const void *a, const void *b)
134 {
135         const Elf64_Rela *x = a, *y = b;
136         int i;
137
138         /* sort by type, symbol index and addend */
139         i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info));
140         if (i == 0)
141                 i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info));
142         if (i == 0)
143                 i = cmp_3way(x->r_addend, y->r_addend);
144         return i;
145 }
146
147 static bool duplicate_rel(const Elf64_Rela *rela, int num)
148 {
149         /*
150          * Entries are sorted by type, symbol index and addend. That means
151          * that, if a duplicate entry exists, it must be in the preceding
152          * slot.
153          */
154         return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
155 }
156
157 static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
158                                Elf64_Word dstidx, Elf_Shdr *dstsec)
159 {
160         unsigned int ret = 0;
161         Elf64_Sym *s;
162         int i;
163
164         for (i = 0; i < num; i++) {
165                 u64 min_align;
166
167                 switch (ELF64_R_TYPE(rela[i].r_info)) {
168                 case R_AARCH64_JUMP26:
169                 case R_AARCH64_CALL26:
170                         if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
171                                 break;
172
173                         /*
174                          * We only have to consider branch targets that resolve
175                          * to symbols that are defined in a different section.
176                          * This is not simply a heuristic, it is a fundamental
177                          * limitation, since there is no guaranteed way to emit
178                          * PLT entries sufficiently close to the branch if the
179                          * section size exceeds the range of a branch
180                          * instruction. So ignore relocations against defined
181                          * symbols if they live in the same section as the
182                          * relocation target.
183                          */
184                         s = syms + ELF64_R_SYM(rela[i].r_info);
185                         if (s->st_shndx == dstidx)
186                                 break;
187
188                         /*
189                          * Jump relocations with non-zero addends against
190                          * undefined symbols are supported by the ELF spec, but
191                          * do not occur in practice (e.g., 'jump n bytes past
192                          * the entry point of undefined function symbol f').
193                          * So we need to support them, but there is no need to
194                          * take them into consideration when trying to optimize
195                          * this code. So let's only check for duplicates when
196                          * the addend is zero: this allows us to record the PLT
197                          * entry address in the symbol table itself, rather than
198                          * having to search the list for duplicates each time we
199                          * emit one.
200                          */
201                         if (rela[i].r_addend != 0 || !duplicate_rel(rela, i))
202                                 ret++;
203                         break;
204                 case R_AARCH64_ADR_PREL_PG_HI21_NC:
205                 case R_AARCH64_ADR_PREL_PG_HI21:
206                         if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
207                             !cpus_have_const_cap(ARM64_WORKAROUND_843419))
208                                 break;
209
210                         /*
211                          * Determine the minimal safe alignment for this ADRP
212                          * instruction: the section alignment at which it is
213                          * guaranteed not to appear at a vulnerable offset.
214                          *
215                          * This comes down to finding the least significant zero
216                          * bit in bits [11:3] of the section offset, and
217                          * increasing the section's alignment so that the
218                          * resulting address of this instruction is guaranteed
219                          * to equal the offset in that particular bit (as well
220                          * as all less significant bits). This ensures that the
221                          * address modulo 4 KB != 0xfff8 or 0xfffc (which would
222                          * have all ones in bits [11:3])
223                          */
224                         min_align = 2ULL << ffz(rela[i].r_offset | 0x7);
225
226                         /*
227                          * Allocate veneer space for each ADRP that may appear
228                          * at a vulnerable offset nonetheless. At relocation
229                          * time, some of these will remain unused since some
230                          * ADRP instructions can be patched to ADR instructions
231                          * instead.
232                          */
233                         if (min_align > SZ_4K)
234                                 ret++;
235                         else
236                                 dstsec->sh_addralign = max(dstsec->sh_addralign,
237                                                            min_align);
238                         break;
239                 }
240         }
241
242         if (IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
243             cpus_have_const_cap(ARM64_WORKAROUND_843419))
244                 /*
245                  * Add some slack so we can skip PLT slots that may trigger
246                  * the erratum due to the placement of the ADRP instruction.
247                  */
248                 ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry)));
249
250         return ret;
251 }
252
253 static bool branch_rela_needs_plt(Elf64_Sym *syms, Elf64_Rela *rela,
254                                   Elf64_Word dstidx)
255 {
256
257         Elf64_Sym *s = syms + ELF64_R_SYM(rela->r_info);
258
259         if (s->st_shndx == dstidx)
260                 return false;
261
262         return ELF64_R_TYPE(rela->r_info) == R_AARCH64_JUMP26 ||
263                ELF64_R_TYPE(rela->r_info) == R_AARCH64_CALL26;
264 }
265
266 /* Group branch PLT relas at the front end of the array. */
267 static int partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela,
268                                       int numrels, Elf64_Word dstidx)
269 {
270         int i = 0, j = numrels - 1;
271
272         if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
273                 return 0;
274
275         while (i < j) {
276                 if (branch_rela_needs_plt(syms, &rela[i], dstidx))
277                         i++;
278                 else if (branch_rela_needs_plt(syms, &rela[j], dstidx))
279                         swap(rela[i], rela[j]);
280                 else
281                         j--;
282         }
283
284         return i;
285 }
286
287 int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
288                               char *secstrings, struct module *mod)
289 {
290         unsigned long core_plts = 0;
291         unsigned long init_plts = 0;
292         Elf64_Sym *syms = NULL;
293         Elf_Shdr *pltsec, *tramp = NULL;
294         int i;
295
296         /*
297          * Find the empty .plt section so we can expand it to store the PLT
298          * entries. Record the symtab address as well.
299          */
300         for (i = 0; i < ehdr->e_shnum; i++) {
301                 if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
302                         mod->arch.core.plt_shndx = i;
303                 else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
304                         mod->arch.init.plt_shndx = i;
305                 else if (!strcmp(secstrings + sechdrs[i].sh_name,
306                                  ".text.ftrace_trampoline"))
307                         tramp = sechdrs + i;
308                 else if (sechdrs[i].sh_type == SHT_SYMTAB)
309                         syms = (Elf64_Sym *)sechdrs[i].sh_addr;
310         }
311
312         if (!mod->arch.core.plt_shndx || !mod->arch.init.plt_shndx) {
313                 pr_err("%s: module PLT section(s) missing\n", mod->name);
314                 return -ENOEXEC;
315         }
316         if (!syms) {
317                 pr_err("%s: module symtab section missing\n", mod->name);
318                 return -ENOEXEC;
319         }
320
321         for (i = 0; i < ehdr->e_shnum; i++) {
322                 Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset;
323                 int nents, numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela);
324                 Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info;
325
326                 if (sechdrs[i].sh_type != SHT_RELA)
327                         continue;
328
329                 /* ignore relocations that operate on non-exec sections */
330                 if (!(dstsec->sh_flags & SHF_EXECINSTR))
331                         continue;
332
333                 /*
334                  * sort branch relocations requiring a PLT by type, symbol index
335                  * and addend
336                  */
337                 nents = partition_branch_plt_relas(syms, rels, numrels,
338                                                    sechdrs[i].sh_info);
339                 if (nents)
340                         sort(rels, nents, sizeof(Elf64_Rela), cmp_rela, NULL);
341
342                 if (!str_has_prefix(secstrings + dstsec->sh_name, ".init"))
343                         core_plts += count_plts(syms, rels, numrels,
344                                                 sechdrs[i].sh_info, dstsec);
345                 else
346                         init_plts += count_plts(syms, rels, numrels,
347                                                 sechdrs[i].sh_info, dstsec);
348         }
349
350         pltsec = sechdrs + mod->arch.core.plt_shndx;
351         pltsec->sh_type = SHT_NOBITS;
352         pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
353         pltsec->sh_addralign = L1_CACHE_BYTES;
354         pltsec->sh_size = (core_plts  + 1) * sizeof(struct plt_entry);
355         mod->arch.core.plt_num_entries = 0;
356         mod->arch.core.plt_max_entries = core_plts;
357
358         pltsec = sechdrs + mod->arch.init.plt_shndx;
359         pltsec->sh_type = SHT_NOBITS;
360         pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
361         pltsec->sh_addralign = L1_CACHE_BYTES;
362         pltsec->sh_size = (init_plts + 1) * sizeof(struct plt_entry);
363         mod->arch.init.plt_num_entries = 0;
364         mod->arch.init.plt_max_entries = init_plts;
365
366         if (tramp) {
367                 tramp->sh_type = SHT_NOBITS;
368                 tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
369                 tramp->sh_addralign = __alignof__(struct plt_entry);
370                 tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry);
371         }
372
373         return 0;
374 }
This page took 0.05517 seconds and 4 git commands to generate.