1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/fs/binfmt_elf.c
5 * These are the functions used to load ELF format executables as used
6 * on SVr4 machines. Information on the format may be found in the book
7 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
13 #include <linux/module.h>
14 #include <linux/kernel.h>
17 #include <linux/mman.h>
18 #include <linux/errno.h>
19 #include <linux/signal.h>
20 #include <linux/binfmts.h>
21 #include <linux/string.h>
22 #include <linux/file.h>
23 #include <linux/slab.h>
24 #include <linux/personality.h>
25 #include <linux/elfcore.h>
26 #include <linux/init.h>
27 #include <linux/highuid.h>
28 #include <linux/compiler.h>
29 #include <linux/highmem.h>
30 #include <linux/hugetlb.h>
31 #include <linux/pagemap.h>
32 #include <linux/vmalloc.h>
33 #include <linux/security.h>
34 #include <linux/random.h>
35 #include <linux/elf.h>
36 #include <linux/elf-randomize.h>
37 #include <linux/utsname.h>
38 #include <linux/coredump.h>
39 #include <linux/sched.h>
40 #include <linux/sched/coredump.h>
41 #include <linux/sched/task_stack.h>
42 #include <linux/sched/cputime.h>
43 #include <linux/sizes.h>
44 #include <linux/types.h>
45 #include <linux/cred.h>
46 #include <linux/dax.h>
47 #include <linux/uaccess.h>
48 #include <asm/param.h>
56 #define user_long_t long
58 #ifndef user_siginfo_t
59 #define user_siginfo_t siginfo_t
62 /* That's for binfmt_elf_fdpic to deal with */
63 #ifndef elf_check_fdpic
64 #define elf_check_fdpic(ex) false
67 static int load_elf_binary(struct linux_binprm *bprm);
70 static int load_elf_library(struct file *);
72 #define load_elf_library NULL
76 * If we don't support core dumping, then supply a NULL so we
79 #ifdef CONFIG_ELF_CORE
80 static int elf_core_dump(struct coredump_params *cprm);
82 #define elf_core_dump NULL
85 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
86 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
88 #define ELF_MIN_ALIGN PAGE_SIZE
91 #ifndef ELF_CORE_EFLAGS
92 #define ELF_CORE_EFLAGS 0
95 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
96 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
97 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
99 static struct linux_binfmt elf_format = {
100 .module = THIS_MODULE,
101 .load_binary = load_elf_binary,
102 .load_shlib = load_elf_library,
103 .core_dump = elf_core_dump,
104 .min_coredump = ELF_EXEC_PAGESIZE,
107 #define BAD_ADDR(x) (unlikely((unsigned long)(x) >= TASK_SIZE))
109 static int set_brk(unsigned long start, unsigned long end, int prot)
111 start = ELF_PAGEALIGN(start);
112 end = ELF_PAGEALIGN(end);
115 * Map the last of the bss segment.
116 * If the header is requesting these pages to be
117 * executable, honour that (ppc32 needs this).
119 int error = vm_brk_flags(start, end - start,
120 prot & PROT_EXEC ? VM_EXEC : 0);
124 current->mm->start_brk = current->mm->brk = end;
128 /* We need to explicitly zero any fractional pages
129 after the data section (i.e. bss). This would
130 contain the junk from the file that should not
133 static int padzero(unsigned long elf_bss)
137 nbyte = ELF_PAGEOFFSET(elf_bss);
139 nbyte = ELF_MIN_ALIGN - nbyte;
140 if (clear_user((void __user *) elf_bss, nbyte))
146 /* Let's use some macros to make this stack manipulation a little clearer */
147 #ifdef CONFIG_STACK_GROWSUP
148 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
149 #define STACK_ROUND(sp, items) \
150 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
151 #define STACK_ALLOC(sp, len) ({ \
152 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
155 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
156 #define STACK_ROUND(sp, items) \
157 (((unsigned long) (sp - items)) &~ 15UL)
158 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
161 #ifndef ELF_BASE_PLATFORM
163 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
164 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
165 * will be copied to the user stack in the same manner as AT_PLATFORM.
167 #define ELF_BASE_PLATFORM NULL
171 create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
172 unsigned long load_addr, unsigned long interp_load_addr,
173 unsigned long e_entry)
175 struct mm_struct *mm = current->mm;
176 unsigned long p = bprm->p;
177 int argc = bprm->argc;
178 int envc = bprm->envc;
179 elf_addr_t __user *sp;
180 elf_addr_t __user *u_platform;
181 elf_addr_t __user *u_base_platform;
182 elf_addr_t __user *u_rand_bytes;
183 const char *k_platform = ELF_PLATFORM;
184 const char *k_base_platform = ELF_BASE_PLATFORM;
185 unsigned char k_rand_bytes[16];
187 elf_addr_t *elf_info;
189 const struct cred *cred = current_cred();
190 struct vm_area_struct *vma;
193 * In some cases (e.g. Hyper-Threading), we want to avoid L1
194 * evictions by the processes running on the same package. One
195 * thing we can do is to shuffle the initial stack for them.
198 p = arch_align_stack(p);
201 * If this architecture has a platform capability string, copy it
202 * to userspace. In some cases (Sparc), this info is impossible
203 * for userspace to get any other way, in others (i386) it is
208 size_t len = strlen(k_platform) + 1;
210 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
211 if (__copy_to_user(u_platform, k_platform, len))
216 * If this architecture has a "base" platform capability
217 * string, copy it to userspace.
219 u_base_platform = NULL;
220 if (k_base_platform) {
221 size_t len = strlen(k_base_platform) + 1;
223 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
224 if (__copy_to_user(u_base_platform, k_base_platform, len))
229 * Generate 16 random bytes for userspace PRNG seeding.
231 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
232 u_rand_bytes = (elf_addr_t __user *)
233 STACK_ALLOC(p, sizeof(k_rand_bytes));
234 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
237 /* Create the ELF interpreter info */
238 elf_info = (elf_addr_t *)mm->saved_auxv;
239 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
240 #define NEW_AUX_ENT(id, val) \
248 * ARCH_DLINFO must come first so PPC can do its special alignment of
250 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
251 * ARCH_DLINFO changes
255 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
256 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
257 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
258 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
259 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
260 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
261 NEW_AUX_ENT(AT_BASE, interp_load_addr);
262 NEW_AUX_ENT(AT_FLAGS, 0);
263 NEW_AUX_ENT(AT_ENTRY, e_entry);
264 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
265 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
266 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
267 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
268 NEW_AUX_ENT(AT_SECURE, bprm->secureexec);
269 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
271 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
273 NEW_AUX_ENT(AT_EXECFN, bprm->exec);
275 NEW_AUX_ENT(AT_PLATFORM,
276 (elf_addr_t)(unsigned long)u_platform);
278 if (k_base_platform) {
279 NEW_AUX_ENT(AT_BASE_PLATFORM,
280 (elf_addr_t)(unsigned long)u_base_platform);
282 if (bprm->have_execfd) {
283 NEW_AUX_ENT(AT_EXECFD, bprm->execfd);
286 /* AT_NULL is zero; clear the rest too */
287 memset(elf_info, 0, (char *)mm->saved_auxv +
288 sizeof(mm->saved_auxv) - (char *)elf_info);
290 /* And advance past the AT_NULL entry. */
293 ei_index = elf_info - (elf_addr_t *)mm->saved_auxv;
294 sp = STACK_ADD(p, ei_index);
296 items = (argc + 1) + (envc + 1) + 1;
297 bprm->p = STACK_ROUND(sp, items);
299 /* Point sp at the lowest address on the stack */
300 #ifdef CONFIG_STACK_GROWSUP
301 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
302 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
304 sp = (elf_addr_t __user *)bprm->p;
309 * Grow the stack manually; some architectures have a limit on how
310 * far ahead a user-space access may be in order to grow the stack.
312 vma = find_extend_vma(mm, bprm->p);
316 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
317 if (__put_user(argc, sp++))
320 /* Populate list of argv pointers back to argv strings. */
321 p = mm->arg_end = mm->arg_start;
324 if (__put_user((elf_addr_t)p, sp++))
326 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
327 if (!len || len > MAX_ARG_STRLEN)
331 if (__put_user(0, sp++))
335 /* Populate list of envp pointers back to envp strings. */
336 mm->env_end = mm->env_start = p;
339 if (__put_user((elf_addr_t)p, sp++))
341 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
342 if (!len || len > MAX_ARG_STRLEN)
346 if (__put_user(0, sp++))
350 /* Put the elf_info on the stack in the right place. */
351 if (copy_to_user(sp, mm->saved_auxv, ei_index * sizeof(elf_addr_t)))
358 static unsigned long elf_map(struct file *filep, unsigned long addr,
359 const struct elf_phdr *eppnt, int prot, int type,
360 unsigned long total_size)
362 unsigned long map_addr;
363 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
364 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
365 addr = ELF_PAGESTART(addr);
366 size = ELF_PAGEALIGN(size);
368 /* mmap() will return -EINVAL if given a zero size, but a
369 * segment with zero filesize is perfectly valid */
374 * total_size is the size of the ELF (interpreter) image.
375 * The _first_ mmap needs to know the full size, otherwise
376 * randomization might put this image into an overlapping
377 * position with the ELF binary image. (since size < total_size)
378 * So we first map the 'big' image - and unmap the remainder at
379 * the end. (which unmap is needed for ELF images with holes.)
382 total_size = ELF_PAGEALIGN(total_size);
383 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
384 if (!BAD_ADDR(map_addr))
385 vm_munmap(map_addr+size, total_size-size);
387 map_addr = vm_mmap(filep, addr, size, prot, type, off);
389 if ((type & MAP_FIXED_NOREPLACE) &&
390 PTR_ERR((void *)map_addr) == -EEXIST)
391 pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n",
392 task_pid_nr(current), current->comm, (void *)addr);
397 #endif /* !elf_map */
399 static unsigned long total_mapping_size(const struct elf_phdr *cmds, int nr)
401 int i, first_idx = -1, last_idx = -1;
403 for (i = 0; i < nr; i++) {
404 if (cmds[i].p_type == PT_LOAD) {
413 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
414 ELF_PAGESTART(cmds[first_idx].p_vaddr);
417 static int elf_read(struct file *file, void *buf, size_t len, loff_t pos)
421 rv = kernel_read(file, buf, len, &pos);
422 if (unlikely(rv != len)) {
423 return (rv < 0) ? rv : -EIO;
429 * load_elf_phdrs() - load ELF program headers
430 * @elf_ex: ELF header of the binary whose program headers should be loaded
431 * @elf_file: the opened ELF binary file
433 * Loads ELF program headers from the binary file elf_file, which has the ELF
434 * header pointed to by elf_ex, into a newly allocated array. The caller is
435 * responsible for freeing the allocated data. Returns an ERR_PTR upon failure.
437 static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex,
438 struct file *elf_file)
440 struct elf_phdr *elf_phdata = NULL;
441 int retval, err = -1;
445 * If the size of this structure has changed, then punt, since
446 * we will be doing the wrong thing.
448 if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
451 /* Sanity check the number of program headers... */
452 /* ...and their total size. */
453 size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
454 if (size == 0 || size > 65536 || size > ELF_MIN_ALIGN)
457 elf_phdata = kmalloc(size, GFP_KERNEL);
461 /* Read in the program headers */
462 retval = elf_read(elf_file, elf_phdata, size, elf_ex->e_phoff);
478 #ifndef CONFIG_ARCH_BINFMT_ELF_STATE
481 * struct arch_elf_state - arch-specific ELF loading state
483 * This structure is used to preserve architecture specific data during
484 * the loading of an ELF file, throughout the checking of architecture
485 * specific ELF headers & through to the point where the ELF load is
486 * known to be proceeding (ie. SET_PERSONALITY).
488 * This implementation is a dummy for architectures which require no
491 struct arch_elf_state {
494 #define INIT_ARCH_ELF_STATE {}
497 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
498 * @ehdr: The main ELF header
499 * @phdr: The program header to check
500 * @elf: The open ELF file
501 * @is_interp: True if the phdr is from the interpreter of the ELF being
502 * loaded, else false.
503 * @state: Architecture-specific state preserved throughout the process
504 * of loading the ELF.
506 * Inspects the program header phdr to validate its correctness and/or
507 * suitability for the system. Called once per ELF program header in the
508 * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its
511 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
512 * with that return code.
514 static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
515 struct elf_phdr *phdr,
516 struct file *elf, bool is_interp,
517 struct arch_elf_state *state)
519 /* Dummy implementation, always proceed */
524 * arch_check_elf() - check an ELF executable
525 * @ehdr: The main ELF header
526 * @has_interp: True if the ELF has an interpreter, else false.
527 * @interp_ehdr: The interpreter's ELF header
528 * @state: Architecture-specific state preserved throughout the process
529 * of loading the ELF.
531 * Provides a final opportunity for architecture code to reject the loading
532 * of the ELF & cause an exec syscall to return an error. This is called after
533 * all program headers to be checked by arch_elf_pt_proc have been.
535 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
536 * with that return code.
538 static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
539 struct elfhdr *interp_ehdr,
540 struct arch_elf_state *state)
542 /* Dummy implementation, always proceed */
546 #endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
548 static inline int make_prot(u32 p_flags, struct arch_elf_state *arch_state,
549 bool has_interp, bool is_interp)
560 return arch_elf_adjust_prot(prot, arch_state, has_interp, is_interp);
563 /* This is much more generalized than the library routine read function,
564 so we keep this separate. Technically the library read function
565 is only provided so that we can read a.out libraries that have
568 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
569 struct file *interpreter,
570 unsigned long no_base, struct elf_phdr *interp_elf_phdata,
571 struct arch_elf_state *arch_state)
573 struct elf_phdr *eppnt;
574 unsigned long load_addr = 0;
575 int load_addr_set = 0;
576 unsigned long last_bss = 0, elf_bss = 0;
578 unsigned long error = ~0UL;
579 unsigned long total_size;
582 /* First of all, some simple consistency checks */
583 if (interp_elf_ex->e_type != ET_EXEC &&
584 interp_elf_ex->e_type != ET_DYN)
586 if (!elf_check_arch(interp_elf_ex) ||
587 elf_check_fdpic(interp_elf_ex))
589 if (!interpreter->f_op->mmap)
592 total_size = total_mapping_size(interp_elf_phdata,
593 interp_elf_ex->e_phnum);
599 eppnt = interp_elf_phdata;
600 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
601 if (eppnt->p_type == PT_LOAD) {
602 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
603 int elf_prot = make_prot(eppnt->p_flags, arch_state,
605 unsigned long vaddr = 0;
606 unsigned long k, map_addr;
608 vaddr = eppnt->p_vaddr;
609 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
610 elf_type |= MAP_FIXED_NOREPLACE;
611 else if (no_base && interp_elf_ex->e_type == ET_DYN)
614 map_addr = elf_map(interpreter, load_addr + vaddr,
615 eppnt, elf_prot, elf_type, total_size);
618 if (BAD_ADDR(map_addr))
621 if (!load_addr_set &&
622 interp_elf_ex->e_type == ET_DYN) {
623 load_addr = map_addr - ELF_PAGESTART(vaddr);
628 * Check to see if the section's size will overflow the
629 * allowed task size. Note that p_filesz must always be
630 * <= p_memsize so it's only necessary to check p_memsz.
632 k = load_addr + eppnt->p_vaddr;
634 eppnt->p_filesz > eppnt->p_memsz ||
635 eppnt->p_memsz > TASK_SIZE ||
636 TASK_SIZE - eppnt->p_memsz < k) {
642 * Find the end of the file mapping for this phdr, and
643 * keep track of the largest address we see for this.
645 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
650 * Do the same thing for the memory mapping - between
651 * elf_bss and last_bss is the bss section.
653 k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
662 * Now fill out the bss section: first pad the last page from
663 * the file up to the page boundary, and zero it from elf_bss
664 * up to the end of the page.
666 if (padzero(elf_bss)) {
671 * Next, align both the file and mem bss up to the page size,
672 * since this is where elf_bss was just zeroed up to, and where
673 * last_bss will end after the vm_brk_flags() below.
675 elf_bss = ELF_PAGEALIGN(elf_bss);
676 last_bss = ELF_PAGEALIGN(last_bss);
677 /* Finally, if there is still more bss to allocate, do it. */
678 if (last_bss > elf_bss) {
679 error = vm_brk_flags(elf_bss, last_bss - elf_bss,
680 bss_prot & PROT_EXEC ? VM_EXEC : 0);
691 * These are the functions used to load ELF style executables and shared
692 * libraries. There is no binary dependent code anywhere else.
695 static int parse_elf_property(const char *data, size_t *off, size_t datasz,
696 struct arch_elf_state *arch,
697 bool have_prev_type, u32 *prev_type)
700 const struct gnu_property *pr;
706 if (WARN_ON_ONCE(*off > datasz || *off % ELF_GNU_PROPERTY_ALIGN))
711 if (datasz < sizeof(*pr))
713 pr = (const struct gnu_property *)(data + o);
715 datasz -= sizeof(*pr);
717 if (pr->pr_datasz > datasz)
720 WARN_ON_ONCE(o % ELF_GNU_PROPERTY_ALIGN);
721 step = round_up(pr->pr_datasz, ELF_GNU_PROPERTY_ALIGN);
725 /* Properties are supposed to be unique and sorted on pr_type: */
726 if (have_prev_type && pr->pr_type <= *prev_type)
728 *prev_type = pr->pr_type;
730 ret = arch_parse_elf_property(pr->pr_type, data + o,
731 pr->pr_datasz, ELF_COMPAT, arch);
739 #define NOTE_DATA_SZ SZ_1K
740 #define GNU_PROPERTY_TYPE_0_NAME "GNU"
741 #define NOTE_NAME_SZ (sizeof(GNU_PROPERTY_TYPE_0_NAME))
743 static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr,
744 struct arch_elf_state *arch)
747 struct elf_note nhdr;
748 char data[NOTE_DATA_SZ];
757 if (!IS_ENABLED(CONFIG_ARCH_USE_GNU_PROPERTY) || !phdr)
760 /* load_elf_binary() shouldn't call us unless this is true... */
761 if (WARN_ON_ONCE(phdr->p_type != PT_GNU_PROPERTY))
764 /* If the properties are crazy large, that's too bad (for now): */
765 if (phdr->p_filesz > sizeof(note))
768 pos = phdr->p_offset;
769 n = kernel_read(f, ¬e, phdr->p_filesz, &pos);
771 BUILD_BUG_ON(sizeof(note) < sizeof(note.nhdr) + NOTE_NAME_SZ);
772 if (n < 0 || n < sizeof(note.nhdr) + NOTE_NAME_SZ)
775 if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 ||
776 note.nhdr.n_namesz != NOTE_NAME_SZ ||
777 strncmp(note.data + sizeof(note.nhdr),
778 GNU_PROPERTY_TYPE_0_NAME, n - sizeof(note.nhdr)))
781 off = round_up(sizeof(note.nhdr) + NOTE_NAME_SZ,
782 ELF_GNU_PROPERTY_ALIGN);
786 if (note.nhdr.n_descsz > n - off)
788 datasz = off + note.nhdr.n_descsz;
790 have_prev_type = false;
792 ret = parse_elf_property(note.data, &off, datasz, arch,
793 have_prev_type, &prev_type);
794 have_prev_type = true;
797 return ret == -ENOENT ? 0 : ret;
800 static int load_elf_binary(struct linux_binprm *bprm)
802 struct file *interpreter = NULL; /* to shut gcc up */
803 unsigned long load_addr = 0, load_bias = 0;
804 int load_addr_set = 0;
806 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
807 struct elf_phdr *elf_property_phdata = NULL;
808 unsigned long elf_bss, elf_brk;
811 unsigned long elf_entry;
812 unsigned long e_entry;
813 unsigned long interp_load_addr = 0;
814 unsigned long start_code, end_code, start_data, end_data;
815 unsigned long reloc_func_desc __maybe_unused = 0;
816 int executable_stack = EXSTACK_DEFAULT;
817 struct elfhdr *elf_ex = (struct elfhdr *)bprm->buf;
818 struct elfhdr *interp_elf_ex = NULL;
819 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
820 struct mm_struct *mm;
821 struct pt_regs *regs;
824 /* First of all, some simple consistency checks */
825 if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
828 if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN)
830 if (!elf_check_arch(elf_ex))
832 if (elf_check_fdpic(elf_ex))
834 if (!bprm->file->f_op->mmap)
837 elf_phdata = load_elf_phdrs(elf_ex, bprm->file);
841 elf_ppnt = elf_phdata;
842 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) {
843 char *elf_interpreter;
845 if (elf_ppnt->p_type == PT_GNU_PROPERTY) {
846 elf_property_phdata = elf_ppnt;
850 if (elf_ppnt->p_type != PT_INTERP)
854 * This is the program interpreter used for shared libraries -
855 * for now assume that this is an a.out format binary.
858 if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2)
862 elf_interpreter = kmalloc(elf_ppnt->p_filesz, GFP_KERNEL);
863 if (!elf_interpreter)
866 retval = elf_read(bprm->file, elf_interpreter, elf_ppnt->p_filesz,
869 goto out_free_interp;
870 /* make sure path is NULL terminated */
872 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
873 goto out_free_interp;
875 interpreter = open_exec(elf_interpreter);
876 kfree(elf_interpreter);
877 retval = PTR_ERR(interpreter);
878 if (IS_ERR(interpreter))
882 * If the binary is not readable then enforce mm->dumpable = 0
883 * regardless of the interpreter's permissions.
885 would_dump(bprm, interpreter);
887 interp_elf_ex = kmalloc(sizeof(*interp_elf_ex), GFP_KERNEL);
888 if (!interp_elf_ex) {
893 /* Get the exec headers */
894 retval = elf_read(interpreter, interp_elf_ex,
895 sizeof(*interp_elf_ex), 0);
897 goto out_free_dentry;
902 kfree(elf_interpreter);
906 elf_ppnt = elf_phdata;
907 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++)
908 switch (elf_ppnt->p_type) {
910 if (elf_ppnt->p_flags & PF_X)
911 executable_stack = EXSTACK_ENABLE_X;
913 executable_stack = EXSTACK_DISABLE_X;
916 case PT_LOPROC ... PT_HIPROC:
917 retval = arch_elf_pt_proc(elf_ex, elf_ppnt,
921 goto out_free_dentry;
925 /* Some simple consistency checks for the interpreter */
928 /* Not an ELF interpreter */
929 if (memcmp(interp_elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
930 goto out_free_dentry;
931 /* Verify the interpreter has a valid arch */
932 if (!elf_check_arch(interp_elf_ex) ||
933 elf_check_fdpic(interp_elf_ex))
934 goto out_free_dentry;
936 /* Load the interpreter program headers */
937 interp_elf_phdata = load_elf_phdrs(interp_elf_ex,
939 if (!interp_elf_phdata)
940 goto out_free_dentry;
942 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */
943 elf_property_phdata = NULL;
944 elf_ppnt = interp_elf_phdata;
945 for (i = 0; i < interp_elf_ex->e_phnum; i++, elf_ppnt++)
946 switch (elf_ppnt->p_type) {
947 case PT_GNU_PROPERTY:
948 elf_property_phdata = elf_ppnt;
951 case PT_LOPROC ... PT_HIPROC:
952 retval = arch_elf_pt_proc(interp_elf_ex,
953 elf_ppnt, interpreter,
956 goto out_free_dentry;
961 retval = parse_elf_properties(interpreter ?: bprm->file,
962 elf_property_phdata, &arch_state);
964 goto out_free_dentry;
967 * Allow arch code to reject the ELF at this point, whilst it's
968 * still possible to return an error to the code that invoked
971 retval = arch_check_elf(elf_ex,
972 !!interpreter, interp_elf_ex,
975 goto out_free_dentry;
977 /* Flush all traces of the currently running executable */
978 retval = begin_new_exec(bprm);
980 goto out_free_dentry;
982 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
983 may depend on the personality. */
984 SET_PERSONALITY2(*elf_ex, &arch_state);
985 if (elf_read_implies_exec(*elf_ex, executable_stack))
986 current->personality |= READ_IMPLIES_EXEC;
988 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
989 current->flags |= PF_RANDOMIZE;
991 setup_new_exec(bprm);
993 /* Do this so that we can load the interpreter, if need be. We will
994 change some of these later */
995 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
998 goto out_free_dentry;
1008 /* Now we do a little grungy work by mmapping the ELF image into
1009 the correct location in memory. */
1010 for(i = 0, elf_ppnt = elf_phdata;
1011 i < elf_ex->e_phnum; i++, elf_ppnt++) {
1012 int elf_prot, elf_flags;
1013 unsigned long k, vaddr;
1014 unsigned long total_size = 0;
1016 if (elf_ppnt->p_type != PT_LOAD)
1019 if (unlikely (elf_brk > elf_bss)) {
1020 unsigned long nbyte;
1022 /* There was a PT_LOAD segment with p_memsz > p_filesz
1023 before this one. Map anonymous pages, if needed,
1024 and clear the area. */
1025 retval = set_brk(elf_bss + load_bias,
1026 elf_brk + load_bias,
1029 goto out_free_dentry;
1030 nbyte = ELF_PAGEOFFSET(elf_bss);
1032 nbyte = ELF_MIN_ALIGN - nbyte;
1033 if (nbyte > elf_brk - elf_bss)
1034 nbyte = elf_brk - elf_bss;
1035 if (clear_user((void __user *)elf_bss +
1036 load_bias, nbyte)) {
1038 * This bss-zeroing can fail if the ELF
1039 * file specifies odd protections. So
1040 * we don't check the return value
1046 elf_prot = make_prot(elf_ppnt->p_flags, &arch_state,
1047 !!interpreter, false);
1049 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
1051 vaddr = elf_ppnt->p_vaddr;
1053 * If we are loading ET_EXEC or we have already performed
1054 * the ET_DYN load_addr calculations, proceed normally.
1056 if (elf_ex->e_type == ET_EXEC || load_addr_set) {
1057 elf_flags |= MAP_FIXED;
1058 } else if (elf_ex->e_type == ET_DYN) {
1060 * This logic is run once for the first LOAD Program
1061 * Header for ET_DYN binaries to calculate the
1062 * randomization (load_bias) for all the LOAD
1063 * Program Headers, and to calculate the entire
1064 * size of the ELF mapping (total_size). (Note that
1065 * load_addr_set is set to true later once the
1066 * initial mapping is performed.)
1068 * There are effectively two types of ET_DYN
1069 * binaries: programs (i.e. PIE: ET_DYN with INTERP)
1070 * and loaders (ET_DYN without INTERP, since they
1071 * _are_ the ELF interpreter). The loaders must
1072 * be loaded away from programs since the program
1073 * may otherwise collide with the loader (especially
1074 * for ET_EXEC which does not have a randomized
1075 * position). For example to handle invocations of
1076 * "./ld.so someprog" to test out a new version of
1077 * the loader, the subsequent program that the
1078 * loader loads must avoid the loader itself, so
1079 * they cannot share the same load range. Sufficient
1080 * room for the brk must be allocated with the
1081 * loader as well, since brk must be available with
1084 * Therefore, programs are loaded offset from
1085 * ELF_ET_DYN_BASE and loaders are loaded into the
1086 * independently randomized mmap region (0 load_bias
1087 * without MAP_FIXED).
1090 load_bias = ELF_ET_DYN_BASE;
1091 if (current->flags & PF_RANDOMIZE)
1092 load_bias += arch_mmap_rnd();
1093 elf_flags |= MAP_FIXED;
1098 * Since load_bias is used for all subsequent loading
1099 * calculations, we must lower it by the first vaddr
1100 * so that the remaining calculations based on the
1101 * ELF vaddrs will be correctly offset. The result
1102 * is then page aligned.
1104 load_bias = ELF_PAGESTART(load_bias - vaddr);
1106 total_size = total_mapping_size(elf_phdata,
1110 goto out_free_dentry;
1114 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
1115 elf_prot, elf_flags, total_size);
1116 if (BAD_ADDR(error)) {
1117 retval = IS_ERR((void *)error) ?
1118 PTR_ERR((void*)error) : -EINVAL;
1119 goto out_free_dentry;
1122 if (!load_addr_set) {
1124 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
1125 if (elf_ex->e_type == ET_DYN) {
1126 load_bias += error -
1127 ELF_PAGESTART(load_bias + vaddr);
1128 load_addr += load_bias;
1129 reloc_func_desc = load_bias;
1132 k = elf_ppnt->p_vaddr;
1133 if ((elf_ppnt->p_flags & PF_X) && k < start_code)
1139 * Check to see if the section's size will overflow the
1140 * allowed task size. Note that p_filesz must always be
1141 * <= p_memsz so it is only necessary to check p_memsz.
1143 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
1144 elf_ppnt->p_memsz > TASK_SIZE ||
1145 TASK_SIZE - elf_ppnt->p_memsz < k) {
1146 /* set_brk can never work. Avoid overflows. */
1148 goto out_free_dentry;
1151 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1155 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1159 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1161 bss_prot = elf_prot;
1166 e_entry = elf_ex->e_entry + load_bias;
1167 elf_bss += load_bias;
1168 elf_brk += load_bias;
1169 start_code += load_bias;
1170 end_code += load_bias;
1171 start_data += load_bias;
1172 end_data += load_bias;
1174 /* Calling set_brk effectively mmaps the pages that we need
1175 * for the bss and break sections. We must do this before
1176 * mapping in the interpreter, to make sure it doesn't wind
1177 * up getting placed where the bss needs to go.
1179 retval = set_brk(elf_bss, elf_brk, bss_prot);
1181 goto out_free_dentry;
1182 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
1183 retval = -EFAULT; /* Nobody gets to see this, but.. */
1184 goto out_free_dentry;
1188 elf_entry = load_elf_interp(interp_elf_ex,
1190 load_bias, interp_elf_phdata,
1192 if (!IS_ERR((void *)elf_entry)) {
1194 * load_elf_interp() returns relocation
1197 interp_load_addr = elf_entry;
1198 elf_entry += interp_elf_ex->e_entry;
1200 if (BAD_ADDR(elf_entry)) {
1201 retval = IS_ERR((void *)elf_entry) ?
1202 (int)elf_entry : -EINVAL;
1203 goto out_free_dentry;
1205 reloc_func_desc = interp_load_addr;
1207 allow_write_access(interpreter);
1210 kfree(interp_elf_ex);
1211 kfree(interp_elf_phdata);
1213 elf_entry = e_entry;
1214 if (BAD_ADDR(elf_entry)) {
1216 goto out_free_dentry;
1222 set_binfmt(&elf_format);
1224 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
1225 retval = arch_setup_additional_pages(bprm, !!interpreter);
1228 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1230 retval = create_elf_tables(bprm, elf_ex,
1231 load_addr, interp_load_addr, e_entry);
1236 mm->end_code = end_code;
1237 mm->start_code = start_code;
1238 mm->start_data = start_data;
1239 mm->end_data = end_data;
1240 mm->start_stack = bprm->p;
1242 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
1244 * For architectures with ELF randomization, when executing
1245 * a loader directly (i.e. no interpreter listed in ELF
1246 * headers), move the brk area out of the mmap region
1247 * (since it grows up, and may collide early with the stack
1248 * growing down), and into the unused ELF_ET_DYN_BASE region.
1250 if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
1251 elf_ex->e_type == ET_DYN && !interpreter) {
1252 mm->brk = mm->start_brk = ELF_ET_DYN_BASE;
1255 mm->brk = mm->start_brk = arch_randomize_brk(mm);
1256 #ifdef compat_brk_randomized
1257 current->brk_randomized = 1;
1261 if (current->personality & MMAP_PAGE_ZERO) {
1262 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1263 and some applications "depend" upon this behavior.
1264 Since we do not have the power to recompile these, we
1265 emulate the SVr4 behavior. Sigh. */
1266 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1267 MAP_FIXED | MAP_PRIVATE, 0);
1270 regs = current_pt_regs();
1271 #ifdef ELF_PLAT_INIT
1273 * The ABI may specify that certain registers be set up in special
1274 * ways (on i386 %edx is the address of a DT_FINI function, for
1275 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1276 * that the e_entry field is the address of the function descriptor
1277 * for the startup routine, rather than the address of the startup
1278 * routine itself. This macro performs whatever initialization to
1279 * the regs structure is required as well as any relocations to the
1280 * function descriptor entries when executing dynamically links apps.
1282 ELF_PLAT_INIT(regs, reloc_func_desc);
1285 finalize_exec(bprm);
1286 start_thread(regs, elf_entry, bprm->p);
1293 kfree(interp_elf_ex);
1294 kfree(interp_elf_phdata);
1295 allow_write_access(interpreter);
1303 #ifdef CONFIG_USELIB
1304 /* This is really simpleminded and specialized - we are loading an
1305 a.out library that is given an ELF header. */
1306 static int load_elf_library(struct file *file)
1308 struct elf_phdr *elf_phdata;
1309 struct elf_phdr *eppnt;
1310 unsigned long elf_bss, bss, len;
1311 int retval, error, i, j;
1312 struct elfhdr elf_ex;
1315 retval = elf_read(file, &elf_ex, sizeof(elf_ex), 0);
1319 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1322 /* First of all, some simple consistency checks */
1323 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1324 !elf_check_arch(&elf_ex) || !file->f_op->mmap)
1326 if (elf_check_fdpic(&elf_ex))
1329 /* Now read in all of the header information */
1331 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1332 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1335 elf_phdata = kmalloc(j, GFP_KERNEL);
1341 retval = elf_read(file, eppnt, j, elf_ex.e_phoff);
1345 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1346 if ((eppnt + i)->p_type == PT_LOAD)
1351 while (eppnt->p_type != PT_LOAD)
1354 /* Now use mmap to map the library into memory. */
1355 error = vm_mmap(file,
1356 ELF_PAGESTART(eppnt->p_vaddr),
1358 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1359 PROT_READ | PROT_WRITE | PROT_EXEC,
1360 MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_DENYWRITE,
1362 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1363 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1366 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1367 if (padzero(elf_bss)) {
1372 len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
1373 bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
1375 error = vm_brk(len, bss - len);
1386 #endif /* #ifdef CONFIG_USELIB */
1388 #ifdef CONFIG_ELF_CORE
1392 * Modelled on fs/exec.c:aout_core_dump()
1397 * The purpose of always_dump_vma() is to make sure that special kernel mappings
1398 * that are useful for post-mortem analysis are included in every core dump.
1399 * In that way we ensure that the core dump is fully interpretable later
1400 * without matching up the same kernel and hardware config to see what PC values
1401 * meant. These special mappings include - vDSO, vsyscall, and other
1402 * architecture specific mappings
1404 static bool always_dump_vma(struct vm_area_struct *vma)
1406 /* Any vsyscall mappings? */
1407 if (vma == get_gate_vma(vma->vm_mm))
1411 * Assume that all vmas with a .name op should always be dumped.
1412 * If this changes, a new vm_ops field can easily be added.
1414 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
1418 * arch_vma_name() returns non-NULL for special architecture mappings,
1419 * such as vDSO sections.
1421 if (arch_vma_name(vma))
1428 * Decide what to dump of a segment, part, all or none.
1430 static unsigned long vma_dump_size(struct vm_area_struct *vma,
1431 unsigned long mm_flags)
1433 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1435 /* always dump the vdso and vsyscall sections */
1436 if (always_dump_vma(vma))
1439 if (vma->vm_flags & VM_DONTDUMP)
1442 /* support for DAX */
1443 if (vma_is_dax(vma)) {
1444 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1446 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1451 /* Hugetlb memory check */
1452 if (is_vm_hugetlb_page(vma)) {
1453 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1455 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1460 /* Do not dump I/O mapped devices or special mappings */
1461 if (vma->vm_flags & VM_IO)
1464 /* By default, dump shared memory if mapped from an anonymous file. */
1465 if (vma->vm_flags & VM_SHARED) {
1466 if (file_inode(vma->vm_file)->i_nlink == 0 ?
1467 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1472 /* Dump segments that have been written to. */
1473 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1475 if (vma->vm_file == NULL)
1478 if (FILTER(MAPPED_PRIVATE))
1482 * If this looks like the beginning of a DSO or executable mapping,
1483 * check for an ELF header. If we find one, dump the first page to
1484 * aid in determining what was mapped here.
1486 if (FILTER(ELF_HEADERS) &&
1487 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
1488 u32 __user *header = (u32 __user *) vma->vm_start;
1491 * Doing it this way gets the constant folded by GCC.
1495 char elfmag[SELFMAG];
1497 BUILD_BUG_ON(SELFMAG != sizeof word);
1498 magic.elfmag[EI_MAG0] = ELFMAG0;
1499 magic.elfmag[EI_MAG1] = ELFMAG1;
1500 magic.elfmag[EI_MAG2] = ELFMAG2;
1501 magic.elfmag[EI_MAG3] = ELFMAG3;
1502 if (unlikely(get_user(word, header)))
1504 if (word == magic.cmp)
1513 return vma->vm_end - vma->vm_start;
1516 /* An ELF note in memory */
1521 unsigned int datasz;
1525 static int notesize(struct memelfnote *en)
1529 sz = sizeof(struct elf_note);
1530 sz += roundup(strlen(en->name) + 1, 4);
1531 sz += roundup(en->datasz, 4);
1536 static int writenote(struct memelfnote *men, struct coredump_params *cprm)
1539 en.n_namesz = strlen(men->name) + 1;
1540 en.n_descsz = men->datasz;
1541 en.n_type = men->type;
1543 return dump_emit(cprm, &en, sizeof(en)) &&
1544 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
1545 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
1548 static void fill_elf_header(struct elfhdr *elf, int segs,
1549 u16 machine, u32 flags)
1551 memset(elf, 0, sizeof(*elf));
1553 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1554 elf->e_ident[EI_CLASS] = ELF_CLASS;
1555 elf->e_ident[EI_DATA] = ELF_DATA;
1556 elf->e_ident[EI_VERSION] = EV_CURRENT;
1557 elf->e_ident[EI_OSABI] = ELF_OSABI;
1559 elf->e_type = ET_CORE;
1560 elf->e_machine = machine;
1561 elf->e_version = EV_CURRENT;
1562 elf->e_phoff = sizeof(struct elfhdr);
1563 elf->e_flags = flags;
1564 elf->e_ehsize = sizeof(struct elfhdr);
1565 elf->e_phentsize = sizeof(struct elf_phdr);
1566 elf->e_phnum = segs;
1569 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
1571 phdr->p_type = PT_NOTE;
1572 phdr->p_offset = offset;
1575 phdr->p_filesz = sz;
1581 static void fill_note(struct memelfnote *note, const char *name, int type,
1582 unsigned int sz, void *data)
1591 * fill up all the fields in prstatus from the given task struct, except
1592 * registers which need to be filled up separately.
1594 static void fill_prstatus(struct elf_prstatus *prstatus,
1595 struct task_struct *p, long signr)
1597 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1598 prstatus->pr_sigpend = p->pending.signal.sig[0];
1599 prstatus->pr_sighold = p->blocked.sig[0];
1601 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1603 prstatus->pr_pid = task_pid_vnr(p);
1604 prstatus->pr_pgrp = task_pgrp_vnr(p);
1605 prstatus->pr_sid = task_session_vnr(p);
1606 if (thread_group_leader(p)) {
1607 struct task_cputime cputime;
1610 * This is the record for the group leader. It shows the
1611 * group-wide total, not its individual thread total.
1613 thread_group_cputime(p, &cputime);
1614 prstatus->pr_utime = ns_to_kernel_old_timeval(cputime.utime);
1615 prstatus->pr_stime = ns_to_kernel_old_timeval(cputime.stime);
1619 task_cputime(p, &utime, &stime);
1620 prstatus->pr_utime = ns_to_kernel_old_timeval(utime);
1621 prstatus->pr_stime = ns_to_kernel_old_timeval(stime);
1624 prstatus->pr_cutime = ns_to_kernel_old_timeval(p->signal->cutime);
1625 prstatus->pr_cstime = ns_to_kernel_old_timeval(p->signal->cstime);
1628 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1629 struct mm_struct *mm)
1631 const struct cred *cred;
1632 unsigned int i, len;
1634 /* first copy the parameters from user space */
1635 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1637 len = mm->arg_end - mm->arg_start;
1638 if (len >= ELF_PRARGSZ)
1639 len = ELF_PRARGSZ-1;
1640 if (copy_from_user(&psinfo->pr_psargs,
1641 (const char __user *)mm->arg_start, len))
1643 for(i = 0; i < len; i++)
1644 if (psinfo->pr_psargs[i] == 0)
1645 psinfo->pr_psargs[i] = ' ';
1646 psinfo->pr_psargs[len] = 0;
1649 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1651 psinfo->pr_pid = task_pid_vnr(p);
1652 psinfo->pr_pgrp = task_pgrp_vnr(p);
1653 psinfo->pr_sid = task_session_vnr(p);
1655 i = p->state ? ffz(~p->state) + 1 : 0;
1656 psinfo->pr_state = i;
1657 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1658 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1659 psinfo->pr_nice = task_nice(p);
1660 psinfo->pr_flag = p->flags;
1662 cred = __task_cred(p);
1663 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
1664 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
1666 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1671 static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1673 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1677 while (auxv[i - 2] != AT_NULL);
1678 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1681 static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
1682 const kernel_siginfo_t *siginfo)
1684 copy_siginfo_to_external(csigdata, siginfo);
1685 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
1688 #define MAX_FILE_NOTE_SIZE (4*1024*1024)
1690 * Format of NT_FILE note:
1692 * long count -- how many files are mapped
1693 * long page_size -- units for file_ofs
1694 * array of [COUNT] elements of
1698 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
1700 static int fill_files_note(struct memelfnote *note)
1702 struct mm_struct *mm = current->mm;
1703 struct vm_area_struct *vma;
1704 unsigned count, size, names_ofs, remaining, n;
1706 user_long_t *start_end_ofs;
1707 char *name_base, *name_curpos;
1709 /* *Estimated* file count and total data size needed */
1710 count = mm->map_count;
1711 if (count > UINT_MAX / 64)
1715 names_ofs = (2 + 3 * count) * sizeof(data[0]);
1717 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
1719 size = round_up(size, PAGE_SIZE);
1721 * "size" can be 0 here legitimately.
1722 * Let it ENOMEM and omit NT_FILE section which will be empty anyway.
1724 data = kvmalloc(size, GFP_KERNEL);
1725 if (ZERO_OR_NULL_PTR(data))
1728 start_end_ofs = data + 2;
1729 name_base = name_curpos = ((char *)data) + names_ofs;
1730 remaining = size - names_ofs;
1732 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
1734 const char *filename;
1736 file = vma->vm_file;
1739 filename = file_path(file, name_curpos, remaining);
1740 if (IS_ERR(filename)) {
1741 if (PTR_ERR(filename) == -ENAMETOOLONG) {
1743 size = size * 5 / 4;
1749 /* file_path() fills at the end, move name down */
1750 /* n = strlen(filename) + 1: */
1751 n = (name_curpos + remaining) - filename;
1752 remaining = filename - name_curpos;
1753 memmove(name_curpos, filename, n);
1756 *start_end_ofs++ = vma->vm_start;
1757 *start_end_ofs++ = vma->vm_end;
1758 *start_end_ofs++ = vma->vm_pgoff;
1762 /* Now we know exact count of files, can store it */
1764 data[1] = PAGE_SIZE;
1766 * Count usually is less than mm->map_count,
1767 * we need to move filenames down.
1769 n = mm->map_count - count;
1771 unsigned shift_bytes = n * 3 * sizeof(data[0]);
1772 memmove(name_base - shift_bytes, name_base,
1773 name_curpos - name_base);
1774 name_curpos -= shift_bytes;
1777 size = name_curpos - (char *)data;
1778 fill_note(note, "CORE", NT_FILE, size, data);
1782 #ifdef CORE_DUMP_USE_REGSET
1783 #include <linux/regset.h>
1785 struct elf_thread_core_info {
1786 struct elf_thread_core_info *next;
1787 struct task_struct *task;
1788 struct elf_prstatus prstatus;
1789 struct memelfnote notes[0];
1792 struct elf_note_info {
1793 struct elf_thread_core_info *thread;
1794 struct memelfnote psinfo;
1795 struct memelfnote signote;
1796 struct memelfnote auxv;
1797 struct memelfnote files;
1798 user_siginfo_t csigdata;
1804 * When a regset has a writeback hook, we call it on each thread before
1805 * dumping user memory. On register window machines, this makes sure the
1806 * user memory backing the register data is up to date before we read it.
1808 static void do_thread_regset_writeback(struct task_struct *task,
1809 const struct user_regset *regset)
1811 if (regset->writeback)
1812 regset->writeback(task, regset, 1);
1815 #ifndef PRSTATUS_SIZE
1816 #define PRSTATUS_SIZE(S, R) sizeof(S)
1819 #ifndef SET_PR_FPVALID
1820 #define SET_PR_FPVALID(S, V, R) ((S)->pr_fpvalid = (V))
1823 static int fill_thread_core_info(struct elf_thread_core_info *t,
1824 const struct user_regset_view *view,
1825 long signr, size_t *total)
1828 unsigned int regset0_size = regset_size(t->task, &view->regsets[0]);
1831 * NT_PRSTATUS is the one special case, because the regset data
1832 * goes into the pr_reg field inside the note contents, rather
1833 * than being the whole note contents. We fill the reset in here.
1834 * We assume that regset 0 is NT_PRSTATUS.
1836 fill_prstatus(&t->prstatus, t->task, signr);
1837 (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset0_size,
1838 &t->prstatus.pr_reg, NULL);
1840 fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
1841 PRSTATUS_SIZE(t->prstatus, regset0_size), &t->prstatus);
1842 *total += notesize(&t->notes[0]);
1844 do_thread_regset_writeback(t->task, &view->regsets[0]);
1847 * Each other regset might generate a note too. For each regset
1848 * that has no core_note_type or is inactive, we leave t->notes[i]
1849 * all zero and we'll know to skip writing it later.
1851 for (i = 1; i < view->n; ++i) {
1852 const struct user_regset *regset = &view->regsets[i];
1853 do_thread_regset_writeback(t->task, regset);
1854 if (regset->core_note_type && regset->get &&
1855 (!regset->active || regset->active(t->task, regset) > 0)) {
1857 size_t size = regset_size(t->task, regset);
1858 void *data = kzalloc(size, GFP_KERNEL);
1859 if (unlikely(!data))
1861 ret = regset->get(t->task, regset,
1862 0, size, data, NULL);
1866 if (regset->core_note_type != NT_PRFPREG)
1867 fill_note(&t->notes[i], "LINUX",
1868 regset->core_note_type,
1871 SET_PR_FPVALID(&t->prstatus,
1873 fill_note(&t->notes[i], "CORE",
1874 NT_PRFPREG, size, data);
1876 *total += notesize(&t->notes[i]);
1884 static int fill_note_info(struct elfhdr *elf, int phdrs,
1885 struct elf_note_info *info,
1886 const kernel_siginfo_t *siginfo, struct pt_regs *regs)
1888 struct task_struct *dump_task = current;
1889 const struct user_regset_view *view = task_user_regset_view(dump_task);
1890 struct elf_thread_core_info *t;
1891 struct elf_prpsinfo *psinfo;
1892 struct core_thread *ct;
1896 info->thread = NULL;
1898 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1899 if (psinfo == NULL) {
1900 info->psinfo.data = NULL; /* So we don't free this wrongly */
1904 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1907 * Figure out how many notes we're going to need for each thread.
1909 info->thread_notes = 0;
1910 for (i = 0; i < view->n; ++i)
1911 if (view->regsets[i].core_note_type != 0)
1912 ++info->thread_notes;
1915 * Sanity check. We rely on regset 0 being in NT_PRSTATUS,
1916 * since it is our one special case.
1918 if (unlikely(info->thread_notes == 0) ||
1919 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1925 * Initialize the ELF file header.
1927 fill_elf_header(elf, phdrs,
1928 view->e_machine, view->e_flags);
1931 * Allocate a structure for each thread.
1933 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1934 t = kzalloc(offsetof(struct elf_thread_core_info,
1935 notes[info->thread_notes]),
1941 if (ct->task == dump_task || !info->thread) {
1942 t->next = info->thread;
1946 * Make sure to keep the original task at
1947 * the head of the list.
1949 t->next = info->thread->next;
1950 info->thread->next = t;
1955 * Now fill in each thread's information.
1957 for (t = info->thread; t != NULL; t = t->next)
1958 if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
1962 * Fill in the two process-wide notes.
1964 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1965 info->size += notesize(&info->psinfo);
1967 fill_siginfo_note(&info->signote, &info->csigdata, siginfo);
1968 info->size += notesize(&info->signote);
1970 fill_auxv_note(&info->auxv, current->mm);
1971 info->size += notesize(&info->auxv);
1973 if (fill_files_note(&info->files) == 0)
1974 info->size += notesize(&info->files);
1979 static size_t get_note_info_size(struct elf_note_info *info)
1985 * Write all the notes for each thread. When writing the first thread, the
1986 * process-wide notes are interleaved after the first thread-specific note.
1988 static int write_note_info(struct elf_note_info *info,
1989 struct coredump_params *cprm)
1992 struct elf_thread_core_info *t = info->thread;
1997 if (!writenote(&t->notes[0], cprm))
2000 if (first && !writenote(&info->psinfo, cprm))
2002 if (first && !writenote(&info->signote, cprm))
2004 if (first && !writenote(&info->auxv, cprm))
2006 if (first && info->files.data &&
2007 !writenote(&info->files, cprm))
2010 for (i = 1; i < info->thread_notes; ++i)
2011 if (t->notes[i].data &&
2012 !writenote(&t->notes[i], cprm))
2022 static void free_note_info(struct elf_note_info *info)
2024 struct elf_thread_core_info *threads = info->thread;
2027 struct elf_thread_core_info *t = threads;
2029 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
2030 for (i = 1; i < info->thread_notes; ++i)
2031 kfree(t->notes[i].data);
2034 kfree(info->psinfo.data);
2035 kvfree(info->files.data);
2040 /* Here is the structure in which status of each thread is captured. */
2041 struct elf_thread_status
2043 struct list_head list;
2044 struct elf_prstatus prstatus; /* NT_PRSTATUS */
2045 elf_fpregset_t fpu; /* NT_PRFPREG */
2046 struct task_struct *thread;
2047 #ifdef ELF_CORE_COPY_XFPREGS
2048 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
2050 struct memelfnote notes[3];
2055 * In order to add the specific thread information for the elf file format,
2056 * we need to keep a linked list of every threads pr_status and then create
2057 * a single section for them in the final core file.
2059 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
2062 struct task_struct *p = t->thread;
2065 fill_prstatus(&t->prstatus, p, signr);
2066 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
2068 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
2071 sz += notesize(&t->notes[0]);
2073 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
2075 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
2078 sz += notesize(&t->notes[1]);
2081 #ifdef ELF_CORE_COPY_XFPREGS
2082 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
2083 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
2084 sizeof(t->xfpu), &t->xfpu);
2086 sz += notesize(&t->notes[2]);
2092 struct elf_note_info {
2093 struct memelfnote *notes;
2094 struct memelfnote *notes_files;
2095 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
2096 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
2097 struct list_head thread_list;
2098 elf_fpregset_t *fpu;
2099 #ifdef ELF_CORE_COPY_XFPREGS
2100 elf_fpxregset_t *xfpu;
2102 user_siginfo_t csigdata;
2103 int thread_status_size;
2107 static int elf_note_info_init(struct elf_note_info *info)
2109 memset(info, 0, sizeof(*info));
2110 INIT_LIST_HEAD(&info->thread_list);
2112 /* Allocate space for ELF notes */
2113 info->notes = kmalloc_array(8, sizeof(struct memelfnote), GFP_KERNEL);
2116 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
2119 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
2120 if (!info->prstatus)
2122 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
2125 #ifdef ELF_CORE_COPY_XFPREGS
2126 info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
2133 static int fill_note_info(struct elfhdr *elf, int phdrs,
2134 struct elf_note_info *info,
2135 const kernel_siginfo_t *siginfo, struct pt_regs *regs)
2137 struct core_thread *ct;
2138 struct elf_thread_status *ets;
2140 if (!elf_note_info_init(info))
2143 for (ct = current->mm->core_state->dumper.next;
2144 ct; ct = ct->next) {
2145 ets = kzalloc(sizeof(*ets), GFP_KERNEL);
2149 ets->thread = ct->task;
2150 list_add(&ets->list, &info->thread_list);
2153 list_for_each_entry(ets, &info->thread_list, list) {
2156 sz = elf_dump_thread_status(siginfo->si_signo, ets);
2157 info->thread_status_size += sz;
2159 /* now collect the dump for the current */
2160 memset(info->prstatus, 0, sizeof(*info->prstatus));
2161 fill_prstatus(info->prstatus, current, siginfo->si_signo);
2162 elf_core_copy_regs(&info->prstatus->pr_reg, regs);
2165 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
2168 * Set up the notes in similar form to SVR4 core dumps made
2169 * with info from their /proc.
2172 fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
2173 sizeof(*info->prstatus), info->prstatus);
2174 fill_psinfo(info->psinfo, current->group_leader, current->mm);
2175 fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
2176 sizeof(*info->psinfo), info->psinfo);
2178 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
2179 fill_auxv_note(info->notes + 3, current->mm);
2182 if (fill_files_note(info->notes + info->numnote) == 0) {
2183 info->notes_files = info->notes + info->numnote;
2187 /* Try to dump the FPU. */
2188 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
2190 if (info->prstatus->pr_fpvalid)
2191 fill_note(info->notes + info->numnote++,
2192 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
2193 #ifdef ELF_CORE_COPY_XFPREGS
2194 if (elf_core_copy_task_xfpregs(current, info->xfpu))
2195 fill_note(info->notes + info->numnote++,
2196 "LINUX", ELF_CORE_XFPREG_TYPE,
2197 sizeof(*info->xfpu), info->xfpu);
2203 static size_t get_note_info_size(struct elf_note_info *info)
2208 for (i = 0; i < info->numnote; i++)
2209 sz += notesize(info->notes + i);
2211 sz += info->thread_status_size;
2216 static int write_note_info(struct elf_note_info *info,
2217 struct coredump_params *cprm)
2219 struct elf_thread_status *ets;
2222 for (i = 0; i < info->numnote; i++)
2223 if (!writenote(info->notes + i, cprm))
2226 /* write out the thread status notes section */
2227 list_for_each_entry(ets, &info->thread_list, list) {
2228 for (i = 0; i < ets->num_notes; i++)
2229 if (!writenote(&ets->notes[i], cprm))
2236 static void free_note_info(struct elf_note_info *info)
2238 while (!list_empty(&info->thread_list)) {
2239 struct list_head *tmp = info->thread_list.next;
2241 kfree(list_entry(tmp, struct elf_thread_status, list));
2244 /* Free data possibly allocated by fill_files_note(): */
2245 if (info->notes_files)
2246 kvfree(info->notes_files->data);
2248 kfree(info->prstatus);
2249 kfree(info->psinfo);
2252 #ifdef ELF_CORE_COPY_XFPREGS
2259 static struct vm_area_struct *first_vma(struct task_struct *tsk,
2260 struct vm_area_struct *gate_vma)
2262 struct vm_area_struct *ret = tsk->mm->mmap;
2269 * Helper function for iterating across a vma list. It ensures that the caller
2270 * will visit `gate_vma' prior to terminating the search.
2272 static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
2273 struct vm_area_struct *gate_vma)
2275 struct vm_area_struct *ret;
2277 ret = this_vma->vm_next;
2280 if (this_vma == gate_vma)
2285 static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
2286 elf_addr_t e_shoff, int segs)
2288 elf->e_shoff = e_shoff;
2289 elf->e_shentsize = sizeof(*shdr4extnum);
2291 elf->e_shstrndx = SHN_UNDEF;
2293 memset(shdr4extnum, 0, sizeof(*shdr4extnum));
2295 shdr4extnum->sh_type = SHT_NULL;
2296 shdr4extnum->sh_size = elf->e_shnum;
2297 shdr4extnum->sh_link = elf->e_shstrndx;
2298 shdr4extnum->sh_info = segs;
2304 * This is a two-pass process; first we find the offsets of the bits,
2305 * and then they are actually written out. If we run out of core limit
2308 static int elf_core_dump(struct coredump_params *cprm)
2312 size_t vma_data_size = 0;
2313 struct vm_area_struct *vma, *gate_vma;
2315 loff_t offset = 0, dataoff;
2316 struct elf_note_info info = { };
2317 struct elf_phdr *phdr4note = NULL;
2318 struct elf_shdr *shdr4extnum = NULL;
2321 elf_addr_t *vma_filesz = NULL;
2324 * We no longer stop all VM operations.
2326 * This is because those proceses that could possibly change map_count
2327 * or the mmap / vma pages are now blocked in do_exit on current
2328 * finishing this core dump.
2330 * Only ptrace can touch these memory addresses, but it doesn't change
2331 * the map_count or the pages allocated. So no possibility of crashing
2332 * exists while dumping the mm->vm_next areas to the core file.
2336 * The number of segs are recored into ELF header as 16bit value.
2337 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
2339 segs = current->mm->map_count;
2340 segs += elf_core_extra_phdrs();
2342 gate_vma = get_gate_vma(current->mm);
2343 if (gate_vma != NULL)
2346 /* for notes section */
2349 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
2350 * this, kernel supports extended numbering. Have a look at
2351 * include/linux/elf.h for further information. */
2352 e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
2355 * Collect all the non-memory information about the process for the
2356 * notes. This also sets up the file header.
2358 if (!fill_note_info(&elf, e_phnum, &info, cprm->siginfo, cprm->regs))
2363 offset += sizeof(elf); /* Elf header */
2364 offset += segs * sizeof(struct elf_phdr); /* Program headers */
2366 /* Write notes phdr entry */
2368 size_t sz = get_note_info_size(&info);
2370 sz += elf_coredump_extra_notes_size();
2372 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
2376 fill_elf_note_phdr(phdr4note, sz, offset);
2380 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2383 * Zero vma process will get ZERO_SIZE_PTR here.
2384 * Let coredump continue for register state at least.
2386 vma_filesz = kvmalloc(array_size(sizeof(*vma_filesz), (segs - 1)),
2391 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2392 vma = next_vma(vma, gate_vma)) {
2393 unsigned long dump_size;
2395 dump_size = vma_dump_size(vma, cprm->mm_flags);
2396 vma_filesz[i++] = dump_size;
2397 vma_data_size += dump_size;
2400 offset += vma_data_size;
2401 offset += elf_core_extra_data_size();
2404 if (e_phnum == PN_XNUM) {
2405 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
2408 fill_extnum_info(&elf, shdr4extnum, e_shoff, segs);
2413 if (!dump_emit(cprm, &elf, sizeof(elf)))
2416 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
2419 /* Write program headers for segments dump */
2420 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2421 vma = next_vma(vma, gate_vma)) {
2422 struct elf_phdr phdr;
2424 phdr.p_type = PT_LOAD;
2425 phdr.p_offset = offset;
2426 phdr.p_vaddr = vma->vm_start;
2428 phdr.p_filesz = vma_filesz[i++];
2429 phdr.p_memsz = vma->vm_end - vma->vm_start;
2430 offset += phdr.p_filesz;
2431 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
2432 if (vma->vm_flags & VM_WRITE)
2433 phdr.p_flags |= PF_W;
2434 if (vma->vm_flags & VM_EXEC)
2435 phdr.p_flags |= PF_X;
2436 phdr.p_align = ELF_EXEC_PAGESIZE;
2438 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
2442 if (!elf_core_write_extra_phdrs(cprm, offset))
2445 /* write out the notes section */
2446 if (!write_note_info(&info, cprm))
2449 if (elf_coredump_extra_notes_write(cprm))
2453 if (!dump_skip(cprm, dataoff - cprm->pos))
2456 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2457 vma = next_vma(vma, gate_vma)) {
2461 end = vma->vm_start + vma_filesz[i++];
2463 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
2467 page = get_dump_page(addr);
2469 void *kaddr = kmap(page);
2470 stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
2474 stop = !dump_skip(cprm, PAGE_SIZE);
2479 dump_truncate(cprm);
2481 if (!elf_core_write_extra_data(cprm))
2484 if (e_phnum == PN_XNUM) {
2485 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
2490 free_note_info(&info);
2497 #endif /* CONFIG_ELF_CORE */
2499 static int __init init_elf_binfmt(void)
2501 register_binfmt(&elf_format);
2505 static void __exit exit_elf_binfmt(void)
2507 /* Remove the COFF and ELF loaders. */
2508 unregister_binfmt(&elf_format);
2511 core_initcall(init_elf_binfmt);
2512 module_exit(exit_elf_binfmt);
2513 MODULE_LICENSE("GPL");