2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
12 #include <linux/module.h>
13 #include <linux/kernel.h>
16 #include <linux/mman.h>
17 #include <linux/errno.h>
18 #include <linux/signal.h>
19 #include <linux/binfmts.h>
20 #include <linux/string.h>
21 #include <linux/file.h>
22 #include <linux/slab.h>
23 #include <linux/personality.h>
24 #include <linux/elfcore.h>
25 #include <linux/init.h>
26 #include <linux/highuid.h>
27 #include <linux/compiler.h>
28 #include <linux/highmem.h>
29 #include <linux/pagemap.h>
30 #include <linux/vmalloc.h>
31 #include <linux/security.h>
32 #include <linux/random.h>
33 #include <linux/elf.h>
34 #include <linux/elf-randomize.h>
35 #include <linux/utsname.h>
36 #include <linux/coredump.h>
37 #include <linux/sched.h>
38 #include <linux/sched/coredump.h>
39 #include <linux/sched/task_stack.h>
40 #include <linux/sched/cputime.h>
41 #include <linux/cred.h>
42 #include <linux/dax.h>
43 #include <linux/uaccess.h>
44 #include <asm/param.h>
48 #define user_long_t long
50 #ifndef user_siginfo_t
51 #define user_siginfo_t siginfo_t
54 static int load_elf_binary(struct linux_binprm *bprm);
55 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
56 int, int, unsigned long);
59 static int load_elf_library(struct file *);
61 #define load_elf_library NULL
65 * If we don't support core dumping, then supply a NULL so we
68 #ifdef CONFIG_ELF_CORE
69 static int elf_core_dump(struct coredump_params *cprm);
71 #define elf_core_dump NULL
74 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
75 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
77 #define ELF_MIN_ALIGN PAGE_SIZE
80 #ifndef ELF_CORE_EFLAGS
81 #define ELF_CORE_EFLAGS 0
84 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
85 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
86 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
88 static struct linux_binfmt elf_format = {
89 .module = THIS_MODULE,
90 .load_binary = load_elf_binary,
91 .load_shlib = load_elf_library,
92 .core_dump = elf_core_dump,
93 .min_coredump = ELF_EXEC_PAGESIZE,
96 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
98 static int set_brk(unsigned long start, unsigned long end, int prot)
100 start = ELF_PAGEALIGN(start);
101 end = ELF_PAGEALIGN(end);
104 * Map the last of the bss segment.
105 * If the header is requesting these pages to be
106 * executable, honour that (ppc32 needs this).
108 int error = vm_brk_flags(start, end - start,
109 prot & PROT_EXEC ? VM_EXEC : 0);
113 current->mm->start_brk = current->mm->brk = end;
117 /* We need to explicitly zero any fractional pages
118 after the data section (i.e. bss). This would
119 contain the junk from the file that should not
122 static int padzero(unsigned long elf_bss)
126 nbyte = ELF_PAGEOFFSET(elf_bss);
128 nbyte = ELF_MIN_ALIGN - nbyte;
129 if (clear_user((void __user *) elf_bss, nbyte))
135 /* Let's use some macros to make this stack manipulation a little clearer */
136 #ifdef CONFIG_STACK_GROWSUP
137 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
138 #define STACK_ROUND(sp, items) \
139 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
140 #define STACK_ALLOC(sp, len) ({ \
141 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
144 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
145 #define STACK_ROUND(sp, items) \
146 (((unsigned long) (sp - items)) &~ 15UL)
147 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
150 #ifndef ELF_BASE_PLATFORM
152 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
153 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
154 * will be copied to the user stack in the same manner as AT_PLATFORM.
156 #define ELF_BASE_PLATFORM NULL
160 create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
161 unsigned long load_addr, unsigned long interp_load_addr)
163 unsigned long p = bprm->p;
164 int argc = bprm->argc;
165 int envc = bprm->envc;
166 elf_addr_t __user *sp;
167 elf_addr_t __user *u_platform;
168 elf_addr_t __user *u_base_platform;
169 elf_addr_t __user *u_rand_bytes;
170 const char *k_platform = ELF_PLATFORM;
171 const char *k_base_platform = ELF_BASE_PLATFORM;
172 unsigned char k_rand_bytes[16];
174 elf_addr_t *elf_info;
176 const struct cred *cred = current_cred();
177 struct vm_area_struct *vma;
180 * In some cases (e.g. Hyper-Threading), we want to avoid L1
181 * evictions by the processes running on the same package. One
182 * thing we can do is to shuffle the initial stack for them.
185 p = arch_align_stack(p);
188 * If this architecture has a platform capability string, copy it
189 * to userspace. In some cases (Sparc), this info is impossible
190 * for userspace to get any other way, in others (i386) it is
195 size_t len = strlen(k_platform) + 1;
197 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
198 if (__copy_to_user(u_platform, k_platform, len))
203 * If this architecture has a "base" platform capability
204 * string, copy it to userspace.
206 u_base_platform = NULL;
207 if (k_base_platform) {
208 size_t len = strlen(k_base_platform) + 1;
210 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
211 if (__copy_to_user(u_base_platform, k_base_platform, len))
216 * Generate 16 random bytes for userspace PRNG seeding.
218 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
219 u_rand_bytes = (elf_addr_t __user *)
220 STACK_ALLOC(p, sizeof(k_rand_bytes));
221 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
224 /* Create the ELF interpreter info */
225 elf_info = (elf_addr_t *)current->mm->saved_auxv;
226 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
227 #define NEW_AUX_ENT(id, val) \
229 elf_info[ei_index++] = id; \
230 elf_info[ei_index++] = val; \
235 * ARCH_DLINFO must come first so PPC can do its special alignment of
237 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
238 * ARCH_DLINFO changes
242 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
243 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
244 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
245 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
246 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
247 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
248 NEW_AUX_ENT(AT_BASE, interp_load_addr);
249 NEW_AUX_ENT(AT_FLAGS, 0);
250 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
251 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
252 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
253 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
254 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
255 NEW_AUX_ENT(AT_SECURE, bprm->secureexec);
256 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
258 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
260 NEW_AUX_ENT(AT_EXECFN, bprm->exec);
262 NEW_AUX_ENT(AT_PLATFORM,
263 (elf_addr_t)(unsigned long)u_platform);
265 if (k_base_platform) {
266 NEW_AUX_ENT(AT_BASE_PLATFORM,
267 (elf_addr_t)(unsigned long)u_base_platform);
269 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
270 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
273 /* AT_NULL is zero; clear the rest too */
274 memset(&elf_info[ei_index], 0,
275 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
277 /* And advance past the AT_NULL entry. */
280 sp = STACK_ADD(p, ei_index);
282 items = (argc + 1) + (envc + 1) + 1;
283 bprm->p = STACK_ROUND(sp, items);
285 /* Point sp at the lowest address on the stack */
286 #ifdef CONFIG_STACK_GROWSUP
287 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
288 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
290 sp = (elf_addr_t __user *)bprm->p;
295 * Grow the stack manually; some architectures have a limit on how
296 * far ahead a user-space access may be in order to grow the stack.
298 vma = find_extend_vma(current->mm, bprm->p);
302 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
303 if (__put_user(argc, sp++))
306 /* Populate list of argv pointers back to argv strings. */
307 p = current->mm->arg_end = current->mm->arg_start;
310 if (__put_user((elf_addr_t)p, sp++))
312 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
313 if (!len || len > MAX_ARG_STRLEN)
317 if (__put_user(0, sp++))
319 current->mm->arg_end = p;
321 /* Populate list of envp pointers back to envp strings. */
322 current->mm->env_end = current->mm->env_start = p;
325 if (__put_user((elf_addr_t)p, sp++))
327 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
328 if (!len || len > MAX_ARG_STRLEN)
332 if (__put_user(0, sp++))
334 current->mm->env_end = p;
336 /* Put the elf_info on the stack in the right place. */
337 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
344 static unsigned long elf_map(struct file *filep, unsigned long addr,
345 struct elf_phdr *eppnt, int prot, int type,
346 unsigned long total_size)
348 unsigned long map_addr;
349 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
350 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
351 addr = ELF_PAGESTART(addr);
352 size = ELF_PAGEALIGN(size);
354 /* mmap() will return -EINVAL if given a zero size, but a
355 * segment with zero filesize is perfectly valid */
360 * total_size is the size of the ELF (interpreter) image.
361 * The _first_ mmap needs to know the full size, otherwise
362 * randomization might put this image into an overlapping
363 * position with the ELF binary image. (since size < total_size)
364 * So we first map the 'big' image - and unmap the remainder at
365 * the end. (which unmap is needed for ELF images with holes.)
368 total_size = ELF_PAGEALIGN(total_size);
369 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
370 if (!BAD_ADDR(map_addr))
371 vm_munmap(map_addr+size, total_size-size);
373 map_addr = vm_mmap(filep, addr, size, prot, type, off);
378 #endif /* !elf_map */
380 static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
382 int i, first_idx = -1, last_idx = -1;
384 for (i = 0; i < nr; i++) {
385 if (cmds[i].p_type == PT_LOAD) {
394 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
395 ELF_PAGESTART(cmds[first_idx].p_vaddr);
399 * load_elf_phdrs() - load ELF program headers
400 * @elf_ex: ELF header of the binary whose program headers should be loaded
401 * @elf_file: the opened ELF binary file
403 * Loads ELF program headers from the binary file elf_file, which has the ELF
404 * header pointed to by elf_ex, into a newly allocated array. The caller is
405 * responsible for freeing the allocated data. Returns an ERR_PTR upon failure.
407 static struct elf_phdr *load_elf_phdrs(struct elfhdr *elf_ex,
408 struct file *elf_file)
410 struct elf_phdr *elf_phdata = NULL;
411 int retval, size, err = -1;
414 * If the size of this structure has changed, then punt, since
415 * we will be doing the wrong thing.
417 if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
420 /* Sanity check the number of program headers... */
421 if (elf_ex->e_phnum < 1 ||
422 elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
425 /* ...and their total size. */
426 size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
427 if (size > ELF_MIN_ALIGN)
430 elf_phdata = kmalloc(size, GFP_KERNEL);
434 /* Read in the program headers */
435 retval = kernel_read(elf_file, elf_ex->e_phoff,
436 (char *)elf_phdata, size);
437 if (retval != size) {
438 err = (retval < 0) ? retval : -EIO;
452 #ifndef CONFIG_ARCH_BINFMT_ELF_STATE
455 * struct arch_elf_state - arch-specific ELF loading state
457 * This structure is used to preserve architecture specific data during
458 * the loading of an ELF file, throughout the checking of architecture
459 * specific ELF headers & through to the point where the ELF load is
460 * known to be proceeding (ie. SET_PERSONALITY).
462 * This implementation is a dummy for architectures which require no
465 struct arch_elf_state {
468 #define INIT_ARCH_ELF_STATE {}
471 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
472 * @ehdr: The main ELF header
473 * @phdr: The program header to check
474 * @elf: The open ELF file
475 * @is_interp: True if the phdr is from the interpreter of the ELF being
476 * loaded, else false.
477 * @state: Architecture-specific state preserved throughout the process
478 * of loading the ELF.
480 * Inspects the program header phdr to validate its correctness and/or
481 * suitability for the system. Called once per ELF program header in the
482 * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its
485 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
486 * with that return code.
488 static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
489 struct elf_phdr *phdr,
490 struct file *elf, bool is_interp,
491 struct arch_elf_state *state)
493 /* Dummy implementation, always proceed */
498 * arch_check_elf() - check an ELF executable
499 * @ehdr: The main ELF header
500 * @has_interp: True if the ELF has an interpreter, else false.
501 * @interp_ehdr: The interpreter's ELF header
502 * @state: Architecture-specific state preserved throughout the process
503 * of loading the ELF.
505 * Provides a final opportunity for architecture code to reject the loading
506 * of the ELF & cause an exec syscall to return an error. This is called after
507 * all program headers to be checked by arch_elf_pt_proc have been.
509 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
510 * with that return code.
512 static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
513 struct elfhdr *interp_ehdr,
514 struct arch_elf_state *state)
516 /* Dummy implementation, always proceed */
520 #endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
522 /* This is much more generalized than the library routine read function,
523 so we keep this separate. Technically the library read function
524 is only provided so that we can read a.out libraries that have
527 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
528 struct file *interpreter, unsigned long *interp_map_addr,
529 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
531 struct elf_phdr *eppnt;
532 unsigned long load_addr = 0;
533 int load_addr_set = 0;
534 unsigned long last_bss = 0, elf_bss = 0;
536 unsigned long error = ~0UL;
537 unsigned long total_size;
540 /* First of all, some simple consistency checks */
541 if (interp_elf_ex->e_type != ET_EXEC &&
542 interp_elf_ex->e_type != ET_DYN)
544 if (!elf_check_arch(interp_elf_ex))
546 if (!interpreter->f_op->mmap)
549 total_size = total_mapping_size(interp_elf_phdata,
550 interp_elf_ex->e_phnum);
556 eppnt = interp_elf_phdata;
557 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
558 if (eppnt->p_type == PT_LOAD) {
559 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
561 unsigned long vaddr = 0;
562 unsigned long k, map_addr;
564 if (eppnt->p_flags & PF_R)
565 elf_prot = PROT_READ;
566 if (eppnt->p_flags & PF_W)
567 elf_prot |= PROT_WRITE;
568 if (eppnt->p_flags & PF_X)
569 elf_prot |= PROT_EXEC;
570 vaddr = eppnt->p_vaddr;
571 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
572 elf_type |= MAP_FIXED;
573 else if (no_base && interp_elf_ex->e_type == ET_DYN)
576 map_addr = elf_map(interpreter, load_addr + vaddr,
577 eppnt, elf_prot, elf_type, total_size);
579 if (!*interp_map_addr)
580 *interp_map_addr = map_addr;
582 if (BAD_ADDR(map_addr))
585 if (!load_addr_set &&
586 interp_elf_ex->e_type == ET_DYN) {
587 load_addr = map_addr - ELF_PAGESTART(vaddr);
592 * Check to see if the section's size will overflow the
593 * allowed task size. Note that p_filesz must always be
594 * <= p_memsize so it's only necessary to check p_memsz.
596 k = load_addr + eppnt->p_vaddr;
598 eppnt->p_filesz > eppnt->p_memsz ||
599 eppnt->p_memsz > TASK_SIZE ||
600 TASK_SIZE - eppnt->p_memsz < k) {
606 * Find the end of the file mapping for this phdr, and
607 * keep track of the largest address we see for this.
609 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
614 * Do the same thing for the memory mapping - between
615 * elf_bss and last_bss is the bss section.
617 k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
626 * Now fill out the bss section: first pad the last page from
627 * the file up to the page boundary, and zero it from elf_bss
628 * up to the end of the page.
630 if (padzero(elf_bss)) {
635 * Next, align both the file and mem bss up to the page size,
636 * since this is where elf_bss was just zeroed up to, and where
637 * last_bss will end after the vm_brk_flags() below.
639 elf_bss = ELF_PAGEALIGN(elf_bss);
640 last_bss = ELF_PAGEALIGN(last_bss);
641 /* Finally, if there is still more bss to allocate, do it. */
642 if (last_bss > elf_bss) {
643 error = vm_brk_flags(elf_bss, last_bss - elf_bss,
644 bss_prot & PROT_EXEC ? VM_EXEC : 0);
655 * These are the functions used to load ELF style executables and shared
656 * libraries. There is no binary dependent code anywhere else.
659 #ifndef STACK_RND_MASK
660 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
663 static unsigned long randomize_stack_top(unsigned long stack_top)
665 unsigned long random_variable = 0;
667 if (current->flags & PF_RANDOMIZE) {
668 random_variable = get_random_long();
669 random_variable &= STACK_RND_MASK;
670 random_variable <<= PAGE_SHIFT;
672 #ifdef CONFIG_STACK_GROWSUP
673 return PAGE_ALIGN(stack_top) + random_variable;
675 return PAGE_ALIGN(stack_top) - random_variable;
679 static int load_elf_binary(struct linux_binprm *bprm)
681 struct file *interpreter = NULL; /* to shut gcc up */
682 unsigned long load_addr = 0, load_bias = 0;
683 int load_addr_set = 0;
684 char * elf_interpreter = NULL;
686 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
687 unsigned long elf_bss, elf_brk;
690 unsigned long elf_entry;
691 unsigned long interp_load_addr = 0;
692 unsigned long start_code, end_code, start_data, end_data;
693 unsigned long reloc_func_desc __maybe_unused = 0;
694 int executable_stack = EXSTACK_DEFAULT;
695 struct pt_regs *regs = current_pt_regs();
697 struct elfhdr elf_ex;
698 struct elfhdr interp_elf_ex;
700 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
702 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
708 /* Get the exec-header */
709 loc->elf_ex = *((struct elfhdr *)bprm->buf);
712 /* First of all, some simple consistency checks */
713 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
716 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
718 if (!elf_check_arch(&loc->elf_ex))
720 if (!bprm->file->f_op->mmap)
723 elf_phdata = load_elf_phdrs(&loc->elf_ex, bprm->file);
727 elf_ppnt = elf_phdata;
736 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
737 if (elf_ppnt->p_type == PT_INTERP) {
738 /* This is the program interpreter used for
739 * shared libraries - for now assume that this
740 * is an a.out format binary
743 if (elf_ppnt->p_filesz > PATH_MAX ||
744 elf_ppnt->p_filesz < 2)
748 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
750 if (!elf_interpreter)
753 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
756 if (retval != elf_ppnt->p_filesz) {
759 goto out_free_interp;
761 /* make sure path is NULL terminated */
763 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
764 goto out_free_interp;
766 interpreter = open_exec(elf_interpreter);
767 retval = PTR_ERR(interpreter);
768 if (IS_ERR(interpreter))
769 goto out_free_interp;
772 * If the binary is not readable then enforce
773 * mm->dumpable = 0 regardless of the interpreter's
776 would_dump(bprm, interpreter);
778 /* Get the exec headers */
779 retval = kernel_read(interpreter, 0,
780 (void *)&loc->interp_elf_ex,
781 sizeof(loc->interp_elf_ex));
782 if (retval != sizeof(loc->interp_elf_ex)) {
785 goto out_free_dentry;
793 elf_ppnt = elf_phdata;
794 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
795 switch (elf_ppnt->p_type) {
797 if (elf_ppnt->p_flags & PF_X)
798 executable_stack = EXSTACK_ENABLE_X;
800 executable_stack = EXSTACK_DISABLE_X;
803 case PT_LOPROC ... PT_HIPROC:
804 retval = arch_elf_pt_proc(&loc->elf_ex, elf_ppnt,
808 goto out_free_dentry;
812 /* Some simple consistency checks for the interpreter */
813 if (elf_interpreter) {
815 /* Not an ELF interpreter */
816 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
817 goto out_free_dentry;
818 /* Verify the interpreter has a valid arch */
819 if (!elf_check_arch(&loc->interp_elf_ex))
820 goto out_free_dentry;
822 /* Load the interpreter program headers */
823 interp_elf_phdata = load_elf_phdrs(&loc->interp_elf_ex,
825 if (!interp_elf_phdata)
826 goto out_free_dentry;
828 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */
829 elf_ppnt = interp_elf_phdata;
830 for (i = 0; i < loc->interp_elf_ex.e_phnum; i++, elf_ppnt++)
831 switch (elf_ppnt->p_type) {
832 case PT_LOPROC ... PT_HIPROC:
833 retval = arch_elf_pt_proc(&loc->interp_elf_ex,
834 elf_ppnt, interpreter,
837 goto out_free_dentry;
843 * Allow arch code to reject the ELF at this point, whilst it's
844 * still possible to return an error to the code that invoked
847 retval = arch_check_elf(&loc->elf_ex,
848 !!interpreter, &loc->interp_elf_ex,
851 goto out_free_dentry;
853 /* Flush all traces of the currently running executable */
854 retval = flush_old_exec(bprm);
856 goto out_free_dentry;
858 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
859 may depend on the personality. */
860 SET_PERSONALITY2(loc->elf_ex, &arch_state);
861 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
862 current->personality |= READ_IMPLIES_EXEC;
864 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
865 current->flags |= PF_RANDOMIZE;
867 setup_new_exec(bprm);
868 install_exec_creds(bprm);
870 /* Do this so that we can load the interpreter, if need be. We will
871 change some of these later */
872 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
875 goto out_free_dentry;
877 current->mm->start_stack = bprm->p;
879 /* Now we do a little grungy work by mmapping the ELF image into
880 the correct location in memory. */
881 for(i = 0, elf_ppnt = elf_phdata;
882 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
883 int elf_prot = 0, elf_flags;
884 unsigned long k, vaddr;
885 unsigned long total_size = 0;
887 if (elf_ppnt->p_type != PT_LOAD)
890 if (unlikely (elf_brk > elf_bss)) {
893 /* There was a PT_LOAD segment with p_memsz > p_filesz
894 before this one. Map anonymous pages, if needed,
895 and clear the area. */
896 retval = set_brk(elf_bss + load_bias,
900 goto out_free_dentry;
901 nbyte = ELF_PAGEOFFSET(elf_bss);
903 nbyte = ELF_MIN_ALIGN - nbyte;
904 if (nbyte > elf_brk - elf_bss)
905 nbyte = elf_brk - elf_bss;
906 if (clear_user((void __user *)elf_bss +
909 * This bss-zeroing can fail if the ELF
910 * file specifies odd protections. So
911 * we don't check the return value
917 if (elf_ppnt->p_flags & PF_R)
918 elf_prot |= PROT_READ;
919 if (elf_ppnt->p_flags & PF_W)
920 elf_prot |= PROT_WRITE;
921 if (elf_ppnt->p_flags & PF_X)
922 elf_prot |= PROT_EXEC;
924 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
926 vaddr = elf_ppnt->p_vaddr;
928 * If we are loading ET_EXEC or we have already performed
929 * the ET_DYN load_addr calculations, proceed normally.
931 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
932 elf_flags |= MAP_FIXED;
933 } else if (loc->elf_ex.e_type == ET_DYN) {
935 * This logic is run once for the first LOAD Program
936 * Header for ET_DYN binaries to calculate the
937 * randomization (load_bias) for all the LOAD
938 * Program Headers, and to calculate the entire
939 * size of the ELF mapping (total_size). (Note that
940 * load_addr_set is set to true later once the
941 * initial mapping is performed.)
943 * There are effectively two types of ET_DYN
944 * binaries: programs (i.e. PIE: ET_DYN with INTERP)
945 * and loaders (ET_DYN without INTERP, since they
946 * _are_ the ELF interpreter). The loaders must
947 * be loaded away from programs since the program
948 * may otherwise collide with the loader (especially
949 * for ET_EXEC which does not have a randomized
950 * position). For example to handle invocations of
951 * "./ld.so someprog" to test out a new version of
952 * the loader, the subsequent program that the
953 * loader loads must avoid the loader itself, so
954 * they cannot share the same load range. Sufficient
955 * room for the brk must be allocated with the
956 * loader as well, since brk must be available with
959 * Therefore, programs are loaded offset from
960 * ELF_ET_DYN_BASE and loaders are loaded into the
961 * independently randomized mmap region (0 load_bias
962 * without MAP_FIXED).
964 if (elf_interpreter) {
965 load_bias = ELF_ET_DYN_BASE;
966 if (current->flags & PF_RANDOMIZE)
967 load_bias += arch_mmap_rnd();
968 elf_flags |= MAP_FIXED;
973 * Since load_bias is used for all subsequent loading
974 * calculations, we must lower it by the first vaddr
975 * so that the remaining calculations based on the
976 * ELF vaddrs will be correctly offset. The result
977 * is then page aligned.
979 load_bias = ELF_PAGESTART(load_bias - vaddr);
981 total_size = total_mapping_size(elf_phdata,
982 loc->elf_ex.e_phnum);
985 goto out_free_dentry;
989 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
990 elf_prot, elf_flags, total_size);
991 if (BAD_ADDR(error)) {
992 retval = IS_ERR((void *)error) ?
993 PTR_ERR((void*)error) : -EINVAL;
994 goto out_free_dentry;
997 if (!load_addr_set) {
999 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
1000 if (loc->elf_ex.e_type == ET_DYN) {
1001 load_bias += error -
1002 ELF_PAGESTART(load_bias + vaddr);
1003 load_addr += load_bias;
1004 reloc_func_desc = load_bias;
1007 k = elf_ppnt->p_vaddr;
1014 * Check to see if the section's size will overflow the
1015 * allowed task size. Note that p_filesz must always be
1016 * <= p_memsz so it is only necessary to check p_memsz.
1018 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
1019 elf_ppnt->p_memsz > TASK_SIZE ||
1020 TASK_SIZE - elf_ppnt->p_memsz < k) {
1021 /* set_brk can never work. Avoid overflows. */
1023 goto out_free_dentry;
1026 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1030 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1034 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1036 bss_prot = elf_prot;
1041 loc->elf_ex.e_entry += load_bias;
1042 elf_bss += load_bias;
1043 elf_brk += load_bias;
1044 start_code += load_bias;
1045 end_code += load_bias;
1046 start_data += load_bias;
1047 end_data += load_bias;
1049 /* Calling set_brk effectively mmaps the pages that we need
1050 * for the bss and break sections. We must do this before
1051 * mapping in the interpreter, to make sure it doesn't wind
1052 * up getting placed where the bss needs to go.
1054 retval = set_brk(elf_bss, elf_brk, bss_prot);
1056 goto out_free_dentry;
1057 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
1058 retval = -EFAULT; /* Nobody gets to see this, but.. */
1059 goto out_free_dentry;
1062 if (elf_interpreter) {
1063 unsigned long interp_map_addr = 0;
1065 elf_entry = load_elf_interp(&loc->interp_elf_ex,
1068 load_bias, interp_elf_phdata);
1069 if (!IS_ERR((void *)elf_entry)) {
1071 * load_elf_interp() returns relocation
1074 interp_load_addr = elf_entry;
1075 elf_entry += loc->interp_elf_ex.e_entry;
1077 if (BAD_ADDR(elf_entry)) {
1078 retval = IS_ERR((void *)elf_entry) ?
1079 (int)elf_entry : -EINVAL;
1080 goto out_free_dentry;
1082 reloc_func_desc = interp_load_addr;
1084 allow_write_access(interpreter);
1086 kfree(elf_interpreter);
1088 elf_entry = loc->elf_ex.e_entry;
1089 if (BAD_ADDR(elf_entry)) {
1091 goto out_free_dentry;
1095 kfree(interp_elf_phdata);
1098 set_binfmt(&elf_format);
1100 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
1101 retval = arch_setup_additional_pages(bprm, !!elf_interpreter);
1104 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1106 retval = create_elf_tables(bprm, &loc->elf_ex,
1107 load_addr, interp_load_addr);
1110 /* N.B. passed_fileno might not be initialized? */
1111 current->mm->end_code = end_code;
1112 current->mm->start_code = start_code;
1113 current->mm->start_data = start_data;
1114 current->mm->end_data = end_data;
1115 current->mm->start_stack = bprm->p;
1117 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
1118 current->mm->brk = current->mm->start_brk =
1119 arch_randomize_brk(current->mm);
1120 #ifdef compat_brk_randomized
1121 current->brk_randomized = 1;
1125 if (current->personality & MMAP_PAGE_ZERO) {
1126 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1127 and some applications "depend" upon this behavior.
1128 Since we do not have the power to recompile these, we
1129 emulate the SVr4 behavior. Sigh. */
1130 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1131 MAP_FIXED | MAP_PRIVATE, 0);
1134 #ifdef ELF_PLAT_INIT
1136 * The ABI may specify that certain registers be set up in special
1137 * ways (on i386 %edx is the address of a DT_FINI function, for
1138 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1139 * that the e_entry field is the address of the function descriptor
1140 * for the startup routine, rather than the address of the startup
1141 * routine itself. This macro performs whatever initialization to
1142 * the regs structure is required as well as any relocations to the
1143 * function descriptor entries when executing dynamically links apps.
1145 ELF_PLAT_INIT(regs, reloc_func_desc);
1148 start_thread(regs, elf_entry, bprm->p);
1157 kfree(interp_elf_phdata);
1158 allow_write_access(interpreter);
1162 kfree(elf_interpreter);
1168 #ifdef CONFIG_USELIB
1169 /* This is really simpleminded and specialized - we are loading an
1170 a.out library that is given an ELF header. */
1171 static int load_elf_library(struct file *file)
1173 struct elf_phdr *elf_phdata;
1174 struct elf_phdr *eppnt;
1175 unsigned long elf_bss, bss, len;
1176 int retval, error, i, j;
1177 struct elfhdr elf_ex;
1180 retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
1181 if (retval != sizeof(elf_ex))
1184 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1187 /* First of all, some simple consistency checks */
1188 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1189 !elf_check_arch(&elf_ex) || !file->f_op->mmap)
1192 /* Now read in all of the header information */
1194 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1195 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1198 elf_phdata = kmalloc(j, GFP_KERNEL);
1204 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1208 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1209 if ((eppnt + i)->p_type == PT_LOAD)
1214 while (eppnt->p_type != PT_LOAD)
1217 /* Now use mmap to map the library into memory. */
1218 error = vm_mmap(file,
1219 ELF_PAGESTART(eppnt->p_vaddr),
1221 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1222 PROT_READ | PROT_WRITE | PROT_EXEC,
1223 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1225 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1226 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1229 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1230 if (padzero(elf_bss)) {
1235 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
1237 bss = eppnt->p_memsz + eppnt->p_vaddr;
1239 error = vm_brk(len, bss - len);
1250 #endif /* #ifdef CONFIG_USELIB */
1252 #ifdef CONFIG_ELF_CORE
1256 * Modelled on fs/exec.c:aout_core_dump()
1261 * The purpose of always_dump_vma() is to make sure that special kernel mappings
1262 * that are useful for post-mortem analysis are included in every core dump.
1263 * In that way we ensure that the core dump is fully interpretable later
1264 * without matching up the same kernel and hardware config to see what PC values
1265 * meant. These special mappings include - vDSO, vsyscall, and other
1266 * architecture specific mappings
1268 static bool always_dump_vma(struct vm_area_struct *vma)
1270 /* Any vsyscall mappings? */
1271 if (vma == get_gate_vma(vma->vm_mm))
1275 * Assume that all vmas with a .name op should always be dumped.
1276 * If this changes, a new vm_ops field can easily be added.
1278 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
1282 * arch_vma_name() returns non-NULL for special architecture mappings,
1283 * such as vDSO sections.
1285 if (arch_vma_name(vma))
1292 * Decide what to dump of a segment, part, all or none.
1294 static unsigned long vma_dump_size(struct vm_area_struct *vma,
1295 unsigned long mm_flags)
1297 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1299 /* always dump the vdso and vsyscall sections */
1300 if (always_dump_vma(vma))
1303 if (vma->vm_flags & VM_DONTDUMP)
1306 /* support for DAX */
1307 if (vma_is_dax(vma)) {
1308 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1310 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1315 /* Hugetlb memory check */
1316 if (vma->vm_flags & VM_HUGETLB) {
1317 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1319 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1324 /* Do not dump I/O mapped devices or special mappings */
1325 if (vma->vm_flags & VM_IO)
1328 /* By default, dump shared memory if mapped from an anonymous file. */
1329 if (vma->vm_flags & VM_SHARED) {
1330 if (file_inode(vma->vm_file)->i_nlink == 0 ?
1331 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1336 /* Dump segments that have been written to. */
1337 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1339 if (vma->vm_file == NULL)
1342 if (FILTER(MAPPED_PRIVATE))
1346 * If this looks like the beginning of a DSO or executable mapping,
1347 * check for an ELF header. If we find one, dump the first page to
1348 * aid in determining what was mapped here.
1350 if (FILTER(ELF_HEADERS) &&
1351 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
1352 u32 __user *header = (u32 __user *) vma->vm_start;
1354 mm_segment_t fs = get_fs();
1356 * Doing it this way gets the constant folded by GCC.
1360 char elfmag[SELFMAG];
1362 BUILD_BUG_ON(SELFMAG != sizeof word);
1363 magic.elfmag[EI_MAG0] = ELFMAG0;
1364 magic.elfmag[EI_MAG1] = ELFMAG1;
1365 magic.elfmag[EI_MAG2] = ELFMAG2;
1366 magic.elfmag[EI_MAG3] = ELFMAG3;
1368 * Switch to the user "segment" for get_user(),
1369 * then put back what elf_core_dump() had in place.
1372 if (unlikely(get_user(word, header)))
1375 if (word == magic.cmp)
1384 return vma->vm_end - vma->vm_start;
1387 /* An ELF note in memory */
1392 unsigned int datasz;
1396 static int notesize(struct memelfnote *en)
1400 sz = sizeof(struct elf_note);
1401 sz += roundup(strlen(en->name) + 1, 4);
1402 sz += roundup(en->datasz, 4);
1407 static int writenote(struct memelfnote *men, struct coredump_params *cprm)
1410 en.n_namesz = strlen(men->name) + 1;
1411 en.n_descsz = men->datasz;
1412 en.n_type = men->type;
1414 return dump_emit(cprm, &en, sizeof(en)) &&
1415 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
1416 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
1419 static void fill_elf_header(struct elfhdr *elf, int segs,
1420 u16 machine, u32 flags)
1422 memset(elf, 0, sizeof(*elf));
1424 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1425 elf->e_ident[EI_CLASS] = ELF_CLASS;
1426 elf->e_ident[EI_DATA] = ELF_DATA;
1427 elf->e_ident[EI_VERSION] = EV_CURRENT;
1428 elf->e_ident[EI_OSABI] = ELF_OSABI;
1430 elf->e_type = ET_CORE;
1431 elf->e_machine = machine;
1432 elf->e_version = EV_CURRENT;
1433 elf->e_phoff = sizeof(struct elfhdr);
1434 elf->e_flags = flags;
1435 elf->e_ehsize = sizeof(struct elfhdr);
1436 elf->e_phentsize = sizeof(struct elf_phdr);
1437 elf->e_phnum = segs;
1442 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
1444 phdr->p_type = PT_NOTE;
1445 phdr->p_offset = offset;
1448 phdr->p_filesz = sz;
1455 static void fill_note(struct memelfnote *note, const char *name, int type,
1456 unsigned int sz, void *data)
1466 * fill up all the fields in prstatus from the given task struct, except
1467 * registers which need to be filled up separately.
1469 static void fill_prstatus(struct elf_prstatus *prstatus,
1470 struct task_struct *p, long signr)
1472 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1473 prstatus->pr_sigpend = p->pending.signal.sig[0];
1474 prstatus->pr_sighold = p->blocked.sig[0];
1476 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1478 prstatus->pr_pid = task_pid_vnr(p);
1479 prstatus->pr_pgrp = task_pgrp_vnr(p);
1480 prstatus->pr_sid = task_session_vnr(p);
1481 if (thread_group_leader(p)) {
1482 struct task_cputime cputime;
1485 * This is the record for the group leader. It shows the
1486 * group-wide total, not its individual thread total.
1488 thread_group_cputime(p, &cputime);
1489 prstatus->pr_utime = ns_to_timeval(cputime.utime);
1490 prstatus->pr_stime = ns_to_timeval(cputime.stime);
1494 task_cputime(p, &utime, &stime);
1495 prstatus->pr_utime = ns_to_timeval(utime);
1496 prstatus->pr_stime = ns_to_timeval(stime);
1499 prstatus->pr_cutime = ns_to_timeval(p->signal->cutime);
1500 prstatus->pr_cstime = ns_to_timeval(p->signal->cstime);
1503 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1504 struct mm_struct *mm)
1506 const struct cred *cred;
1507 unsigned int i, len;
1509 /* first copy the parameters from user space */
1510 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1512 len = mm->arg_end - mm->arg_start;
1513 if (len >= ELF_PRARGSZ)
1514 len = ELF_PRARGSZ-1;
1515 if (copy_from_user(&psinfo->pr_psargs,
1516 (const char __user *)mm->arg_start, len))
1518 for(i = 0; i < len; i++)
1519 if (psinfo->pr_psargs[i] == 0)
1520 psinfo->pr_psargs[i] = ' ';
1521 psinfo->pr_psargs[len] = 0;
1524 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1526 psinfo->pr_pid = task_pid_vnr(p);
1527 psinfo->pr_pgrp = task_pgrp_vnr(p);
1528 psinfo->pr_sid = task_session_vnr(p);
1530 i = p->state ? ffz(~p->state) + 1 : 0;
1531 psinfo->pr_state = i;
1532 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1533 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1534 psinfo->pr_nice = task_nice(p);
1535 psinfo->pr_flag = p->flags;
1537 cred = __task_cred(p);
1538 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
1539 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
1541 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1546 static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1548 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1552 while (auxv[i - 2] != AT_NULL);
1553 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1556 static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
1557 const siginfo_t *siginfo)
1559 mm_segment_t old_fs = get_fs();
1561 copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
1563 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
1566 #define MAX_FILE_NOTE_SIZE (4*1024*1024)
1568 * Format of NT_FILE note:
1570 * long count -- how many files are mapped
1571 * long page_size -- units for file_ofs
1572 * array of [COUNT] elements of
1576 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
1578 static int fill_files_note(struct memelfnote *note)
1580 struct vm_area_struct *vma;
1581 unsigned count, size, names_ofs, remaining, n;
1583 user_long_t *start_end_ofs;
1584 char *name_base, *name_curpos;
1586 /* *Estimated* file count and total data size needed */
1587 count = current->mm->map_count;
1590 names_ofs = (2 + 3 * count) * sizeof(data[0]);
1592 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
1594 size = round_up(size, PAGE_SIZE);
1595 data = vmalloc(size);
1599 start_end_ofs = data + 2;
1600 name_base = name_curpos = ((char *)data) + names_ofs;
1601 remaining = size - names_ofs;
1603 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1605 const char *filename;
1607 file = vma->vm_file;
1610 filename = file_path(file, name_curpos, remaining);
1611 if (IS_ERR(filename)) {
1612 if (PTR_ERR(filename) == -ENAMETOOLONG) {
1614 size = size * 5 / 4;
1620 /* file_path() fills at the end, move name down */
1621 /* n = strlen(filename) + 1: */
1622 n = (name_curpos + remaining) - filename;
1623 remaining = filename - name_curpos;
1624 memmove(name_curpos, filename, n);
1627 *start_end_ofs++ = vma->vm_start;
1628 *start_end_ofs++ = vma->vm_end;
1629 *start_end_ofs++ = vma->vm_pgoff;
1633 /* Now we know exact count of files, can store it */
1635 data[1] = PAGE_SIZE;
1637 * Count usually is less than current->mm->map_count,
1638 * we need to move filenames down.
1640 n = current->mm->map_count - count;
1642 unsigned shift_bytes = n * 3 * sizeof(data[0]);
1643 memmove(name_base - shift_bytes, name_base,
1644 name_curpos - name_base);
1645 name_curpos -= shift_bytes;
1648 size = name_curpos - (char *)data;
1649 fill_note(note, "CORE", NT_FILE, size, data);
1653 #ifdef CORE_DUMP_USE_REGSET
1654 #include <linux/regset.h>
1656 struct elf_thread_core_info {
1657 struct elf_thread_core_info *next;
1658 struct task_struct *task;
1659 struct elf_prstatus prstatus;
1660 struct memelfnote notes[0];
1663 struct elf_note_info {
1664 struct elf_thread_core_info *thread;
1665 struct memelfnote psinfo;
1666 struct memelfnote signote;
1667 struct memelfnote auxv;
1668 struct memelfnote files;
1669 user_siginfo_t csigdata;
1675 * When a regset has a writeback hook, we call it on each thread before
1676 * dumping user memory. On register window machines, this makes sure the
1677 * user memory backing the register data is up to date before we read it.
1679 static void do_thread_regset_writeback(struct task_struct *task,
1680 const struct user_regset *regset)
1682 if (regset->writeback)
1683 regset->writeback(task, regset, 1);
1686 #ifndef PRSTATUS_SIZE
1687 #define PRSTATUS_SIZE(S, R) sizeof(S)
1690 #ifndef SET_PR_FPVALID
1691 #define SET_PR_FPVALID(S, V, R) ((S)->pr_fpvalid = (V))
1694 static int fill_thread_core_info(struct elf_thread_core_info *t,
1695 const struct user_regset_view *view,
1696 long signr, size_t *total)
1699 unsigned int regset_size = view->regsets[0].n * view->regsets[0].size;
1702 * NT_PRSTATUS is the one special case, because the regset data
1703 * goes into the pr_reg field inside the note contents, rather
1704 * than being the whole note contents. We fill the reset in here.
1705 * We assume that regset 0 is NT_PRSTATUS.
1707 fill_prstatus(&t->prstatus, t->task, signr);
1708 (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset_size,
1709 &t->prstatus.pr_reg, NULL);
1711 fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
1712 PRSTATUS_SIZE(t->prstatus, regset_size), &t->prstatus);
1713 *total += notesize(&t->notes[0]);
1715 do_thread_regset_writeback(t->task, &view->regsets[0]);
1718 * Each other regset might generate a note too. For each regset
1719 * that has no core_note_type or is inactive, we leave t->notes[i]
1720 * all zero and we'll know to skip writing it later.
1722 for (i = 1; i < view->n; ++i) {
1723 const struct user_regset *regset = &view->regsets[i];
1724 do_thread_regset_writeback(t->task, regset);
1725 if (regset->core_note_type && regset->get &&
1726 (!regset->active || regset->active(t->task, regset))) {
1728 size_t size = regset->n * regset->size;
1729 void *data = kmalloc(size, GFP_KERNEL);
1730 if (unlikely(!data))
1732 ret = regset->get(t->task, regset,
1733 0, size, data, NULL);
1737 if (regset->core_note_type != NT_PRFPREG)
1738 fill_note(&t->notes[i], "LINUX",
1739 regset->core_note_type,
1742 SET_PR_FPVALID(&t->prstatus,
1744 fill_note(&t->notes[i], "CORE",
1745 NT_PRFPREG, size, data);
1747 *total += notesize(&t->notes[i]);
1755 static int fill_note_info(struct elfhdr *elf, int phdrs,
1756 struct elf_note_info *info,
1757 const siginfo_t *siginfo, struct pt_regs *regs)
1759 struct task_struct *dump_task = current;
1760 const struct user_regset_view *view = task_user_regset_view(dump_task);
1761 struct elf_thread_core_info *t;
1762 struct elf_prpsinfo *psinfo;
1763 struct core_thread *ct;
1767 info->thread = NULL;
1769 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1770 if (psinfo == NULL) {
1771 info->psinfo.data = NULL; /* So we don't free this wrongly */
1775 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1778 * Figure out how many notes we're going to need for each thread.
1780 info->thread_notes = 0;
1781 for (i = 0; i < view->n; ++i)
1782 if (view->regsets[i].core_note_type != 0)
1783 ++info->thread_notes;
1786 * Sanity check. We rely on regset 0 being in NT_PRSTATUS,
1787 * since it is our one special case.
1789 if (unlikely(info->thread_notes == 0) ||
1790 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1796 * Initialize the ELF file header.
1798 fill_elf_header(elf, phdrs,
1799 view->e_machine, view->e_flags);
1802 * Allocate a structure for each thread.
1804 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1805 t = kzalloc(offsetof(struct elf_thread_core_info,
1806 notes[info->thread_notes]),
1812 if (ct->task == dump_task || !info->thread) {
1813 t->next = info->thread;
1817 * Make sure to keep the original task at
1818 * the head of the list.
1820 t->next = info->thread->next;
1821 info->thread->next = t;
1826 * Now fill in each thread's information.
1828 for (t = info->thread; t != NULL; t = t->next)
1829 if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
1833 * Fill in the two process-wide notes.
1835 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1836 info->size += notesize(&info->psinfo);
1838 fill_siginfo_note(&info->signote, &info->csigdata, siginfo);
1839 info->size += notesize(&info->signote);
1841 fill_auxv_note(&info->auxv, current->mm);
1842 info->size += notesize(&info->auxv);
1844 if (fill_files_note(&info->files) == 0)
1845 info->size += notesize(&info->files);
1850 static size_t get_note_info_size(struct elf_note_info *info)
1856 * Write all the notes for each thread. When writing the first thread, the
1857 * process-wide notes are interleaved after the first thread-specific note.
1859 static int write_note_info(struct elf_note_info *info,
1860 struct coredump_params *cprm)
1863 struct elf_thread_core_info *t = info->thread;
1868 if (!writenote(&t->notes[0], cprm))
1871 if (first && !writenote(&info->psinfo, cprm))
1873 if (first && !writenote(&info->signote, cprm))
1875 if (first && !writenote(&info->auxv, cprm))
1877 if (first && info->files.data &&
1878 !writenote(&info->files, cprm))
1881 for (i = 1; i < info->thread_notes; ++i)
1882 if (t->notes[i].data &&
1883 !writenote(&t->notes[i], cprm))
1893 static void free_note_info(struct elf_note_info *info)
1895 struct elf_thread_core_info *threads = info->thread;
1898 struct elf_thread_core_info *t = threads;
1900 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
1901 for (i = 1; i < info->thread_notes; ++i)
1902 kfree(t->notes[i].data);
1905 kfree(info->psinfo.data);
1906 vfree(info->files.data);
1911 /* Here is the structure in which status of each thread is captured. */
1912 struct elf_thread_status
1914 struct list_head list;
1915 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1916 elf_fpregset_t fpu; /* NT_PRFPREG */
1917 struct task_struct *thread;
1918 #ifdef ELF_CORE_COPY_XFPREGS
1919 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
1921 struct memelfnote notes[3];
1926 * In order to add the specific thread information for the elf file format,
1927 * we need to keep a linked list of every threads pr_status and then create
1928 * a single section for them in the final core file.
1930 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1933 struct task_struct *p = t->thread;
1936 fill_prstatus(&t->prstatus, p, signr);
1937 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1939 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1942 sz += notesize(&t->notes[0]);
1944 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1946 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1949 sz += notesize(&t->notes[1]);
1952 #ifdef ELF_CORE_COPY_XFPREGS
1953 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1954 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
1955 sizeof(t->xfpu), &t->xfpu);
1957 sz += notesize(&t->notes[2]);
1963 struct elf_note_info {
1964 struct memelfnote *notes;
1965 struct memelfnote *notes_files;
1966 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
1967 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1968 struct list_head thread_list;
1969 elf_fpregset_t *fpu;
1970 #ifdef ELF_CORE_COPY_XFPREGS
1971 elf_fpxregset_t *xfpu;
1973 user_siginfo_t csigdata;
1974 int thread_status_size;
1978 static int elf_note_info_init(struct elf_note_info *info)
1980 memset(info, 0, sizeof(*info));
1981 INIT_LIST_HEAD(&info->thread_list);
1983 /* Allocate space for ELF notes */
1984 info->notes = kmalloc(8 * sizeof(struct memelfnote), GFP_KERNEL);
1987 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
1990 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
1991 if (!info->prstatus)
1993 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
1996 #ifdef ELF_CORE_COPY_XFPREGS
1997 info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
2004 static int fill_note_info(struct elfhdr *elf, int phdrs,
2005 struct elf_note_info *info,
2006 const siginfo_t *siginfo, struct pt_regs *regs)
2008 struct list_head *t;
2009 struct core_thread *ct;
2010 struct elf_thread_status *ets;
2012 if (!elf_note_info_init(info))
2015 for (ct = current->mm->core_state->dumper.next;
2016 ct; ct = ct->next) {
2017 ets = kzalloc(sizeof(*ets), GFP_KERNEL);
2021 ets->thread = ct->task;
2022 list_add(&ets->list, &info->thread_list);
2025 list_for_each(t, &info->thread_list) {
2028 ets = list_entry(t, struct elf_thread_status, list);
2029 sz = elf_dump_thread_status(siginfo->si_signo, ets);
2030 info->thread_status_size += sz;
2032 /* now collect the dump for the current */
2033 memset(info->prstatus, 0, sizeof(*info->prstatus));
2034 fill_prstatus(info->prstatus, current, siginfo->si_signo);
2035 elf_core_copy_regs(&info->prstatus->pr_reg, regs);
2038 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
2041 * Set up the notes in similar form to SVR4 core dumps made
2042 * with info from their /proc.
2045 fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
2046 sizeof(*info->prstatus), info->prstatus);
2047 fill_psinfo(info->psinfo, current->group_leader, current->mm);
2048 fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
2049 sizeof(*info->psinfo), info->psinfo);
2051 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
2052 fill_auxv_note(info->notes + 3, current->mm);
2055 if (fill_files_note(info->notes + info->numnote) == 0) {
2056 info->notes_files = info->notes + info->numnote;
2060 /* Try to dump the FPU. */
2061 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
2063 if (info->prstatus->pr_fpvalid)
2064 fill_note(info->notes + info->numnote++,
2065 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
2066 #ifdef ELF_CORE_COPY_XFPREGS
2067 if (elf_core_copy_task_xfpregs(current, info->xfpu))
2068 fill_note(info->notes + info->numnote++,
2069 "LINUX", ELF_CORE_XFPREG_TYPE,
2070 sizeof(*info->xfpu), info->xfpu);
2076 static size_t get_note_info_size(struct elf_note_info *info)
2081 for (i = 0; i < info->numnote; i++)
2082 sz += notesize(info->notes + i);
2084 sz += info->thread_status_size;
2089 static int write_note_info(struct elf_note_info *info,
2090 struct coredump_params *cprm)
2093 struct list_head *t;
2095 for (i = 0; i < info->numnote; i++)
2096 if (!writenote(info->notes + i, cprm))
2099 /* write out the thread status notes section */
2100 list_for_each(t, &info->thread_list) {
2101 struct elf_thread_status *tmp =
2102 list_entry(t, struct elf_thread_status, list);
2104 for (i = 0; i < tmp->num_notes; i++)
2105 if (!writenote(&tmp->notes[i], cprm))
2112 static void free_note_info(struct elf_note_info *info)
2114 while (!list_empty(&info->thread_list)) {
2115 struct list_head *tmp = info->thread_list.next;
2117 kfree(list_entry(tmp, struct elf_thread_status, list));
2120 /* Free data possibly allocated by fill_files_note(): */
2121 if (info->notes_files)
2122 vfree(info->notes_files->data);
2124 kfree(info->prstatus);
2125 kfree(info->psinfo);
2128 #ifdef ELF_CORE_COPY_XFPREGS
2135 static struct vm_area_struct *first_vma(struct task_struct *tsk,
2136 struct vm_area_struct *gate_vma)
2138 struct vm_area_struct *ret = tsk->mm->mmap;
2145 * Helper function for iterating across a vma list. It ensures that the caller
2146 * will visit `gate_vma' prior to terminating the search.
2148 static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
2149 struct vm_area_struct *gate_vma)
2151 struct vm_area_struct *ret;
2153 ret = this_vma->vm_next;
2156 if (this_vma == gate_vma)
2161 static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
2162 elf_addr_t e_shoff, int segs)
2164 elf->e_shoff = e_shoff;
2165 elf->e_shentsize = sizeof(*shdr4extnum);
2167 elf->e_shstrndx = SHN_UNDEF;
2169 memset(shdr4extnum, 0, sizeof(*shdr4extnum));
2171 shdr4extnum->sh_type = SHT_NULL;
2172 shdr4extnum->sh_size = elf->e_shnum;
2173 shdr4extnum->sh_link = elf->e_shstrndx;
2174 shdr4extnum->sh_info = segs;
2180 * This is a two-pass process; first we find the offsets of the bits,
2181 * and then they are actually written out. If we run out of core limit
2184 static int elf_core_dump(struct coredump_params *cprm)
2189 size_t vma_data_size = 0;
2190 struct vm_area_struct *vma, *gate_vma;
2191 struct elfhdr *elf = NULL;
2192 loff_t offset = 0, dataoff;
2193 struct elf_note_info info = { };
2194 struct elf_phdr *phdr4note = NULL;
2195 struct elf_shdr *shdr4extnum = NULL;
2198 elf_addr_t *vma_filesz = NULL;
2201 * We no longer stop all VM operations.
2203 * This is because those proceses that could possibly change map_count
2204 * or the mmap / vma pages are now blocked in do_exit on current
2205 * finishing this core dump.
2207 * Only ptrace can touch these memory addresses, but it doesn't change
2208 * the map_count or the pages allocated. So no possibility of crashing
2209 * exists while dumping the mm->vm_next areas to the core file.
2212 /* alloc memory for large data structures: too large to be on stack */
2213 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
2217 * The number of segs are recored into ELF header as 16bit value.
2218 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
2220 segs = current->mm->map_count;
2221 segs += elf_core_extra_phdrs();
2223 gate_vma = get_gate_vma(current->mm);
2224 if (gate_vma != NULL)
2227 /* for notes section */
2230 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
2231 * this, kernel supports extended numbering. Have a look at
2232 * include/linux/elf.h for further information. */
2233 e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
2236 * Collect all the non-memory information about the process for the
2237 * notes. This also sets up the file header.
2239 if (!fill_note_info(elf, e_phnum, &info, cprm->siginfo, cprm->regs))
2247 offset += sizeof(*elf); /* Elf header */
2248 offset += segs * sizeof(struct elf_phdr); /* Program headers */
2250 /* Write notes phdr entry */
2252 size_t sz = get_note_info_size(&info);
2254 sz += elf_coredump_extra_notes_size();
2256 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
2260 fill_elf_note_phdr(phdr4note, sz, offset);
2264 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2266 if (segs - 1 > ULONG_MAX / sizeof(*vma_filesz))
2268 vma_filesz = vmalloc((segs - 1) * sizeof(*vma_filesz));
2272 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2273 vma = next_vma(vma, gate_vma)) {
2274 unsigned long dump_size;
2276 dump_size = vma_dump_size(vma, cprm->mm_flags);
2277 vma_filesz[i++] = dump_size;
2278 vma_data_size += dump_size;
2281 offset += vma_data_size;
2282 offset += elf_core_extra_data_size();
2285 if (e_phnum == PN_XNUM) {
2286 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
2289 fill_extnum_info(elf, shdr4extnum, e_shoff, segs);
2294 if (!dump_emit(cprm, elf, sizeof(*elf)))
2297 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
2300 /* Write program headers for segments dump */
2301 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2302 vma = next_vma(vma, gate_vma)) {
2303 struct elf_phdr phdr;
2305 phdr.p_type = PT_LOAD;
2306 phdr.p_offset = offset;
2307 phdr.p_vaddr = vma->vm_start;
2309 phdr.p_filesz = vma_filesz[i++];
2310 phdr.p_memsz = vma->vm_end - vma->vm_start;
2311 offset += phdr.p_filesz;
2312 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
2313 if (vma->vm_flags & VM_WRITE)
2314 phdr.p_flags |= PF_W;
2315 if (vma->vm_flags & VM_EXEC)
2316 phdr.p_flags |= PF_X;
2317 phdr.p_align = ELF_EXEC_PAGESIZE;
2319 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
2323 if (!elf_core_write_extra_phdrs(cprm, offset))
2326 /* write out the notes section */
2327 if (!write_note_info(&info, cprm))
2330 if (elf_coredump_extra_notes_write(cprm))
2334 if (!dump_skip(cprm, dataoff - cprm->pos))
2337 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2338 vma = next_vma(vma, gate_vma)) {
2342 end = vma->vm_start + vma_filesz[i++];
2344 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
2348 page = get_dump_page(addr);
2350 void *kaddr = kmap(page);
2351 stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
2355 stop = !dump_skip(cprm, PAGE_SIZE);
2360 dump_truncate(cprm);
2362 if (!elf_core_write_extra_data(cprm))
2365 if (e_phnum == PN_XNUM) {
2366 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
2374 free_note_info(&info);
2383 #endif /* CONFIG_ELF_CORE */
2385 static int __init init_elf_binfmt(void)
2387 register_binfmt(&elf_format);
2391 static void __exit exit_elf_binfmt(void)
2393 /* Remove the COFF and ELF loaders. */
2394 unregister_binfmt(&elf_format);
2397 core_initcall(init_elf_binfmt);
2398 module_exit(exit_elf_binfmt);
2399 MODULE_LICENSE("GPL");