1 /* This is the Linux kernel elf-loading code, ported into user space */
18 #define ELF_START_MMAP 0x80000000
21 * This is used to ensure we don't load something for the wrong architecture.
23 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
26 * These are used to set parameters in the core dumps.
28 #define ELF_CLASS ELFCLASS32
29 #define ELF_DATA ELFDATA2LSB
30 #define ELF_ARCH EM_386
32 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
33 starts %edx contains a pointer to a function which might be
34 registered using `atexit'. This provides a mean for the
35 dynamic linker to call DT_FINI functions for shared libraries
36 that have been loaded before the code runs.
38 A value of 0 tells we have no such handler. */
39 #define ELF_PLAT_INIT(_r) _r->edx = 0
41 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
43 regs->esp = infop->start_stack;
44 regs->eip = infop->entry;
47 #define USE_ELF_CORE_DUMP
48 #define ELF_EXEC_PAGESIZE 4096
54 #define ELF_START_MMAP 0x80000000
56 #define elf_check_arch(x) ( (x) == EM_ARM )
58 #define ELF_CLASS ELFCLASS32
59 #ifdef TARGET_WORDS_BIGENDIAN
60 #define ELF_DATA ELFDATA2MSB
62 #define ELF_DATA ELFDATA2LSB
64 #define ELF_ARCH EM_ARM
66 #define ELF_PLAT_INIT(_r) _r->ARM_r0 = 0
68 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
70 target_long *stack = (void *)infop->start_stack;
71 memset(regs, 0, sizeof(*regs));
72 regs->ARM_cpsr = 0x10;
73 regs->ARM_pc = infop->entry;
74 regs->ARM_sp = infop->start_stack;
75 regs->ARM_r2 = tswapl(stack[2]); /* envp */
76 regs->ARM_r1 = tswapl(stack[1]); /* argv */
77 /* XXX: it seems that r0 is zeroed after ! */
78 // regs->ARM_r0 = tswapl(stack[0]); /* argc */
81 #define USE_ELF_CORE_DUMP
82 #define ELF_EXEC_PAGESIZE 4096
89 * MAX_ARG_PAGES defines the number of pages allocated for arguments
90 * and envelope for the new program. 32 should suffice, this gives
91 * a maximum env+arg of 128kB w/4KB pages!
93 #define MAX_ARG_PAGES 32
96 * This structure is used to hold the arguments that are
97 * used when loading binaries.
101 unsigned long page[MAX_ARG_PAGES];
107 char * filename; /* Name of binary */
108 unsigned long loader, exec;
109 int dont_iput; /* binfmt handler has put inode */
114 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
115 unsigned int a_text; /* length of text, in bytes */
116 unsigned int a_data; /* length of data, in bytes */
117 unsigned int a_bss; /* length of uninitialized data area, in bytes */
118 unsigned int a_syms; /* length of symbol table data in file, in bytes */
119 unsigned int a_entry; /* start address */
120 unsigned int a_trsize; /* length of relocation info for text, in bytes */
121 unsigned int a_drsize; /* length of relocation info for data, in bytes */
125 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
131 /* max code+data+bss space allocated to elf interpreter */
132 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
134 /* max code+data+bss+brk space allocated to ET_DYN executables */
135 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
137 /* from personality.h */
139 /* Flags for bug emulation. These occupy the top three bytes. */
140 #define STICKY_TIMEOUTS 0x4000000
141 #define WHOLE_SECONDS 0x2000000
143 /* Personality types. These go in the low byte. Avoid using the top bit,
144 * it will conflict with error returns.
146 #define PER_MASK (0x00ff)
147 #define PER_LINUX (0x0000)
148 #define PER_SVR4 (0x0001 | STICKY_TIMEOUTS)
149 #define PER_SVR3 (0x0002 | STICKY_TIMEOUTS)
150 #define PER_SCOSVR3 (0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS)
151 #define PER_WYSEV386 (0x0004 | STICKY_TIMEOUTS)
152 #define PER_ISCR4 (0x0005 | STICKY_TIMEOUTS)
153 #define PER_BSD (0x0006)
154 #define PER_XENIX (0x0007 | STICKY_TIMEOUTS)
156 /* Necessary parameters */
159 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
160 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
161 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
163 #define INTERPRETER_NONE 0
164 #define INTERPRETER_AOUT 1
165 #define INTERPRETER_ELF 2
167 #define DLINFO_ITEMS 12
169 #define put_user(x,ptr) (void)(*(ptr) = (typeof(*ptr))(x))
170 #define get_user(ptr) (typeof(*ptr))(*(ptr))
172 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
177 static inline void memcpy_tofs(void * to, const void * from, unsigned long n)
182 extern unsigned long x86_stack_size;
184 static int load_aout_interp(void * exptr, int interp_fd);
187 static void bswap_ehdr(Elf32_Ehdr *ehdr)
189 bswap16s(&ehdr->e_type); /* Object file type */
190 bswap16s(&ehdr->e_machine); /* Architecture */
191 bswap32s(&ehdr->e_version); /* Object file version */
192 bswap32s(&ehdr->e_entry); /* Entry point virtual address */
193 bswap32s(&ehdr->e_phoff); /* Program header table file offset */
194 bswap32s(&ehdr->e_shoff); /* Section header table file offset */
195 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
196 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
197 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
198 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
199 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
200 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
201 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
204 static void bswap_phdr(Elf32_Phdr *phdr)
206 bswap32s(&phdr->p_type); /* Segment type */
207 bswap32s(&phdr->p_offset); /* Segment file offset */
208 bswap32s(&phdr->p_vaddr); /* Segment virtual address */
209 bswap32s(&phdr->p_paddr); /* Segment physical address */
210 bswap32s(&phdr->p_filesz); /* Segment size in file */
211 bswap32s(&phdr->p_memsz); /* Segment size in memory */
212 bswap32s(&phdr->p_flags); /* Segment flags */
213 bswap32s(&phdr->p_align); /* Segment alignment */
216 static void bswap_shdr(Elf32_Shdr *shdr)
218 bswap32s(&shdr->sh_name);
219 bswap32s(&shdr->sh_type);
220 bswap32s(&shdr->sh_flags);
221 bswap32s(&shdr->sh_addr);
222 bswap32s(&shdr->sh_offset);
223 bswap32s(&shdr->sh_size);
224 bswap32s(&shdr->sh_link);
225 bswap32s(&shdr->sh_info);
226 bswap32s(&shdr->sh_addralign);
227 bswap32s(&shdr->sh_entsize);
230 static void bswap_sym(Elf32_Sym *sym)
232 bswap32s(&sym->st_name);
233 bswap32s(&sym->st_value);
234 bswap32s(&sym->st_size);
235 bswap16s(&sym->st_shndx);
239 static void * get_free_page(void)
243 /* User-space version of kernel get_free_page. Returns a page-aligned
244 * page-sized chunk of memory.
246 retval = (void *)target_mmap(0, host_page_size, PROT_READ|PROT_WRITE,
247 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
249 if((long)retval == -1) {
250 perror("get_free_page");
258 static void free_page(void * pageaddr)
260 target_munmap((unsigned long)pageaddr, host_page_size);
264 * 'copy_string()' copies argument/envelope strings from user
265 * memory to free pages in kernel mem. These are in a format ready
266 * to be put directly into the top of new user memory.
269 static unsigned long copy_strings(int argc,char ** argv,unsigned long *page,
272 char *tmp, *tmp1, *pag = NULL;
276 return 0; /* bullet-proofing */
279 if (!(tmp1 = tmp = get_user(argv+argc))) {
280 fprintf(stderr, "VFS: argc is wrong");
283 while (get_user(tmp++));
285 if (p < len) { /* this shouldn't happen - 128kB */
291 offset = p % TARGET_PAGE_SIZE;
292 if (!(pag = (char *) page[p/TARGET_PAGE_SIZE]) &&
293 !(pag = (char *) page[p/TARGET_PAGE_SIZE] =
294 (unsigned long *) get_free_page())) {
298 if (len == 0 || offset == 0) {
299 *(pag + offset) = get_user(tmp);
302 int bytes_to_copy = (len > offset) ? offset : len;
303 tmp -= bytes_to_copy;
305 offset -= bytes_to_copy;
306 len -= bytes_to_copy;
307 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
314 static int in_group_p(gid_t g)
316 /* return TRUE if we're in the specified group, FALSE otherwise */
319 gid_t grouplist[NGROUPS];
321 ngroup = getgroups(NGROUPS, grouplist);
322 for(i = 0; i < ngroup; i++) {
323 if(grouplist[i] == g) {
330 static int count(char ** vec)
334 for(i = 0; *vec; i++) {
341 static int prepare_binprm(struct linux_binprm *bprm)
345 int retval, id_change;
347 if(fstat(bprm->fd, &st) < 0) {
352 if(!S_ISREG(mode)) { /* Must be regular file */
355 if(!(mode & 0111)) { /* Must have at least one execute bit set */
359 bprm->e_uid = geteuid();
360 bprm->e_gid = getegid();
365 bprm->e_uid = st.st_uid;
366 if(bprm->e_uid != geteuid()) {
373 * If setgid is set but no group execute bit then this
374 * is a candidate for mandatory locking, not a setgid
377 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
378 bprm->e_gid = st.st_gid;
379 if (!in_group_p(bprm->e_gid)) {
384 memset(bprm->buf, 0, sizeof(bprm->buf));
385 retval = lseek(bprm->fd, 0L, SEEK_SET);
387 retval = read(bprm->fd, bprm->buf, 128);
390 perror("prepare_binprm");
392 /* return(-errno); */
399 unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm,
400 struct image_info * info)
402 unsigned long stack_base, size, error;
405 /* Create enough stack to hold everything. If we don't use
406 * it for args, we'll use it for something else...
408 size = x86_stack_size;
409 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
410 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
411 error = target_mmap(0,
412 size + host_page_size,
413 PROT_READ | PROT_WRITE,
414 MAP_PRIVATE | MAP_ANONYMOUS,
420 /* we reserve one extra page at the top of the stack as guard */
421 target_mprotect(error + size, host_page_size, PROT_NONE);
423 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
427 bprm->loader += stack_base;
429 bprm->exec += stack_base;
431 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
435 memcpy((void *)stack_base, (void *)bprm->page[i], TARGET_PAGE_SIZE);
436 free_page((void *)bprm->page[i]);
438 stack_base += TARGET_PAGE_SIZE;
443 static void set_brk(unsigned long start, unsigned long end)
445 /* page-align the start and end addresses... */
446 start = HOST_PAGE_ALIGN(start);
447 end = HOST_PAGE_ALIGN(end);
450 if(target_mmap(start, end - start,
451 PROT_READ | PROT_WRITE | PROT_EXEC,
452 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
453 perror("cannot mmap brk");
459 /* We need to explicitly zero any fractional pages
460 after the data section (i.e. bss). This would
461 contain the junk from the file that should not
465 static void padzero(unsigned long elf_bss)
470 nbyte = elf_bss & (host_page_size-1); /* was TARGET_PAGE_SIZE - JRP */
472 nbyte = host_page_size - nbyte;
473 fpnt = (char *) elf_bss;
480 static unsigned int * create_elf_tables(char *p, int argc, int envc,
481 struct elfhdr * exec,
482 unsigned long load_addr,
483 unsigned long load_bias,
484 unsigned long interp_load_addr, int ibcs,
485 struct image_info *info)
487 target_ulong *argv, *envp, *dlinfo;
491 * Force 16 byte alignment here for generality.
493 sp = (unsigned int *) (~15UL & (unsigned long) p);
494 sp -= DLINFO_ITEMS*2;
501 put_user(tswapl((target_ulong)envp),--sp);
502 put_user(tswapl((target_ulong)argv),--sp);
505 #define NEW_AUX_ENT(id, val) \
506 put_user (tswapl(id), dlinfo++); \
507 put_user (tswapl(val), dlinfo++)
509 NEW_AUX_ENT (AT_PHDR, (target_ulong)(load_addr + exec->e_phoff));
510 NEW_AUX_ENT (AT_PHENT, (target_ulong)(sizeof (struct elf_phdr)));
511 NEW_AUX_ENT (AT_PHNUM, (target_ulong)(exec->e_phnum));
512 NEW_AUX_ENT (AT_PAGESZ, (target_ulong)(TARGET_PAGE_SIZE));
513 NEW_AUX_ENT (AT_BASE, (target_ulong)(interp_load_addr));
514 NEW_AUX_ENT (AT_FLAGS, (target_ulong)0);
515 NEW_AUX_ENT (AT_ENTRY, load_bias + exec->e_entry);
516 NEW_AUX_ENT (AT_UID, (target_ulong) getuid());
517 NEW_AUX_ENT (AT_EUID, (target_ulong) geteuid());
518 NEW_AUX_ENT (AT_GID, (target_ulong) getgid());
519 NEW_AUX_ENT (AT_EGID, (target_ulong) getegid());
520 NEW_AUX_ENT (AT_NULL, 0);
523 put_user(tswapl(argc),--sp);
524 info->arg_start = (unsigned int)((unsigned long)p & 0xffffffff);
526 put_user(tswapl((target_ulong)p),argv++);
527 while (get_user(p++)) /* nothing */ ;
530 info->arg_end = info->env_start = (unsigned int)((unsigned long)p & 0xffffffff);
532 put_user(tswapl((target_ulong)p),envp++);
533 while (get_user(p++)) /* nothing */ ;
536 info->env_end = (unsigned int)((unsigned long)p & 0xffffffff);
542 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
544 unsigned long *interp_load_addr)
546 struct elf_phdr *elf_phdata = NULL;
547 struct elf_phdr *eppnt;
548 unsigned long load_addr = 0;
549 int load_addr_set = 0;
551 unsigned long last_bss, elf_bss;
560 bswap_ehdr(interp_elf_ex);
562 /* First of all, some simple consistency checks */
563 if ((interp_elf_ex->e_type != ET_EXEC &&
564 interp_elf_ex->e_type != ET_DYN) ||
565 !elf_check_arch(interp_elf_ex->e_machine)) {
570 /* Now read in all of the header information */
572 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
575 elf_phdata = (struct elf_phdr *)
576 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
582 * If the size of this structure has changed, then punt, since
583 * we will be doing the wrong thing.
585 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
590 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
592 retval = read(interpreter_fd,
594 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
597 perror("load_elf_interp");
604 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
609 if (interp_elf_ex->e_type == ET_DYN) {
610 /* in order to avoid harcoding the interpreter load
611 address in qemu, we allocate a big enough memory zone */
612 error = target_mmap(0, INTERP_MAP_SIZE,
613 PROT_NONE, MAP_PRIVATE | MAP_ANON,
624 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
625 if (eppnt->p_type == PT_LOAD) {
626 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
628 unsigned long vaddr = 0;
631 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
632 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
633 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
634 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
635 elf_type |= MAP_FIXED;
636 vaddr = eppnt->p_vaddr;
638 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
639 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
643 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
645 if (error > -1024UL) {
647 close(interpreter_fd);
652 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
658 * Find the end of the file mapping for this phdr, and keep
659 * track of the largest address we see for this.
661 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
662 if (k > elf_bss) elf_bss = k;
665 * Do the same thing for the memory mapping - between
666 * elf_bss and last_bss is the bss section.
668 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
669 if (k > last_bss) last_bss = k;
672 /* Now use mmap to map the library into memory. */
674 close(interpreter_fd);
677 * Now fill out the bss section. First pad the last page up
678 * to the page boundary, and then perform a mmap to make sure
679 * that there are zeromapped pages up to and including the last
683 elf_bss = TARGET_ELF_PAGESTART(elf_bss + host_page_size - 1); /* What we have mapped so far */
685 /* Map the last of the bss segment */
686 if (last_bss > elf_bss) {
687 target_mmap(elf_bss, last_bss-elf_bss,
688 PROT_READ|PROT_WRITE|PROT_EXEC,
689 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
693 *interp_load_addr = load_addr;
694 return ((unsigned long) interp_elf_ex->e_entry) + load_addr;
697 /* Best attempt to load symbols from this ELF object. */
698 static void load_symbols(struct elfhdr *hdr, int fd)
701 struct elf_shdr sechdr, symtab, strtab;
704 lseek(fd, hdr->e_shoff, SEEK_SET);
705 for (i = 0; i < hdr->e_shnum; i++) {
706 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
711 if (sechdr.sh_type == SHT_SYMTAB) {
713 lseek(fd, hdr->e_shoff
714 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
715 if (read(fd, &strtab, sizeof(strtab))
724 return; /* Shouldn't happen... */
727 /* Now know where the strtab and symtab are. Snarf them. */
728 disas_symtab = malloc(symtab.sh_size);
729 disas_strtab = strings = malloc(strtab.sh_size);
730 if (!disas_symtab || !disas_strtab)
733 lseek(fd, symtab.sh_offset, SEEK_SET);
734 if (read(fd, disas_symtab, symtab.sh_size) != symtab.sh_size)
738 for (i = 0; i < symtab.sh_size / sizeof(struct elf_sym); i++)
739 bswap_sym(disas_symtab + sizeof(struct elf_sym)*i);
742 lseek(fd, strtab.sh_offset, SEEK_SET);
743 if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
745 disas_num_syms = symtab.sh_size / sizeof(struct elf_sym);
748 static int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
749 struct image_info * info)
751 struct elfhdr elf_ex;
752 struct elfhdr interp_elf_ex;
753 struct exec interp_ex;
754 int interpreter_fd = -1; /* avoid warning */
755 unsigned long load_addr, load_bias;
756 int load_addr_set = 0;
757 unsigned int interpreter_type = INTERPRETER_NONE;
758 unsigned char ibcs2_interpreter;
760 unsigned long mapped_addr;
761 struct elf_phdr * elf_ppnt;
762 struct elf_phdr *elf_phdata;
763 unsigned long elf_bss, k, elf_brk;
765 char * elf_interpreter;
766 unsigned long elf_entry, interp_load_addr = 0;
768 unsigned long start_code, end_code, end_data;
769 unsigned long elf_stack;
770 char passed_fileno[6];
772 ibcs2_interpreter = 0;
776 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
781 if (elf_ex.e_ident[0] != 0x7f ||
782 strncmp(&elf_ex.e_ident[1], "ELF",3) != 0) {
786 /* First of all, some simple consistency checks */
787 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
788 (! elf_check_arch(elf_ex.e_machine))) {
792 /* Now read in all of the header information */
793 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
794 if (elf_phdata == NULL) {
798 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
800 retval = read(bprm->fd, (char *) elf_phdata,
801 elf_ex.e_phentsize * elf_ex.e_phnum);
805 perror("load_elf_binary");
812 elf_ppnt = elf_phdata;
813 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
814 bswap_phdr(elf_ppnt);
817 elf_ppnt = elf_phdata;
824 elf_interpreter = NULL;
829 for(i=0;i < elf_ex.e_phnum; i++) {
830 if (elf_ppnt->p_type == PT_INTERP) {
831 if ( elf_interpreter != NULL )
834 free(elf_interpreter);
839 /* This is the program interpreter used for
840 * shared libraries - for now assume that this
841 * is an a.out format binary
844 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
846 if (elf_interpreter == NULL) {
852 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
854 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
857 perror("load_elf_binary2");
861 /* If the program interpreter is one of these two,
862 then assume an iBCS2 image. Otherwise assume
863 a native linux image. */
865 /* JRP - Need to add X86 lib dir stuff here... */
867 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
868 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
869 ibcs2_interpreter = 1;
873 printf("Using ELF interpreter %s\n", elf_interpreter);
876 retval = open(path(elf_interpreter), O_RDONLY);
878 interpreter_fd = retval;
881 perror(elf_interpreter);
883 /* retval = -errno; */
888 retval = lseek(interpreter_fd, 0, SEEK_SET);
890 retval = read(interpreter_fd,bprm->buf,128);
894 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
895 interp_elf_ex=*((struct elfhdr *) bprm->buf); /* elf exec-header */
898 perror("load_elf_binary3");
901 free(elf_interpreter);
909 /* Some simple consistency checks for the interpreter */
910 if (elf_interpreter){
911 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
913 /* Now figure out which format our binary is */
914 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
915 (N_MAGIC(interp_ex) != QMAGIC)) {
916 interpreter_type = INTERPRETER_ELF;
919 if (interp_elf_ex.e_ident[0] != 0x7f ||
920 strncmp(&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
921 interpreter_type &= ~INTERPRETER_ELF;
924 if (!interpreter_type) {
925 free(elf_interpreter);
932 /* OK, we are done with that, now set up the arg stuff,
933 and then start this sucker up */
935 if (!bprm->sh_bang) {
938 if (interpreter_type == INTERPRETER_AOUT) {
939 sprintf(passed_fileno, "%d", bprm->fd);
940 passed_p = passed_fileno;
942 if (elf_interpreter) {
943 bprm->p = copy_strings(1,&passed_p,bprm->page,bprm->p);
948 if (elf_interpreter) {
949 free(elf_interpreter);
957 /* OK, This is the point of no return */
960 info->start_mmap = (unsigned long)ELF_START_MMAP;
962 elf_entry = (unsigned long) elf_ex.e_entry;
964 /* Do this so that we can load the interpreter, if need be. We will
965 change some of these later */
967 bprm->p = setup_arg_pages(bprm->p, bprm, info);
968 info->start_stack = bprm->p;
970 /* Now we do a little grungy work by mmaping the ELF image into
971 * the correct location in memory. At this point, we assume that
972 * the image should be loaded at fixed address, not at a variable
976 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
981 if (elf_ppnt->p_type != PT_LOAD)
984 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
985 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
986 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
987 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
988 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
989 elf_flags |= MAP_FIXED;
990 } else if (elf_ex.e_type == ET_DYN) {
991 /* Try and get dynamic programs out of the way of the default mmap
992 base, as well as whatever program they might try to exec. This
993 is because the brk will follow the loader, and is not movable. */
994 /* NOTE: for qemu, we do a big mmap to get enough space
995 without harcoding any address */
996 error = target_mmap(0, ET_DYN_MAP_SIZE,
997 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1003 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1006 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1007 (elf_ppnt->p_filesz +
1008 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1010 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1012 (elf_ppnt->p_offset -
1013 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1019 #ifdef LOW_ELF_STACK
1020 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1021 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1024 if (!load_addr_set) {
1026 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1027 if (elf_ex.e_type == ET_DYN) {
1028 load_bias += error -
1029 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1030 load_addr += load_bias;
1033 k = elf_ppnt->p_vaddr;
1036 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1039 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1043 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1044 if (k > elf_brk) elf_brk = k;
1047 elf_entry += load_bias;
1048 elf_bss += load_bias;
1049 elf_brk += load_bias;
1050 start_code += load_bias;
1051 end_code += load_bias;
1052 // start_data += load_bias;
1053 end_data += load_bias;
1055 if (elf_interpreter) {
1056 if (interpreter_type & 1) {
1057 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1059 else if (interpreter_type & 2) {
1060 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1064 close(interpreter_fd);
1065 free(elf_interpreter);
1067 if (elf_entry == ~0UL) {
1068 printf("Unable to load interpreter\n");
1078 load_symbols(&elf_ex, bprm->fd);
1080 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1081 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1083 #ifdef LOW_ELF_STACK
1084 info->start_stack = bprm->p = elf_stack - 4;
1086 bprm->p = (unsigned long)
1087 create_elf_tables((char *)bprm->p,
1091 load_addr, load_bias,
1093 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1095 if (interpreter_type == INTERPRETER_AOUT)
1096 info->arg_start += strlen(passed_fileno) + 1;
1097 info->start_brk = info->brk = elf_brk;
1098 info->end_code = end_code;
1099 info->start_code = start_code;
1100 info->end_data = end_data;
1101 info->start_stack = bprm->p;
1103 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1105 set_brk(elf_bss, elf_brk);
1110 printf("(start_brk) %x\n" , info->start_brk);
1111 printf("(end_code) %x\n" , info->end_code);
1112 printf("(start_code) %x\n" , info->start_code);
1113 printf("(end_data) %x\n" , info->end_data);
1114 printf("(start_stack) %x\n" , info->start_stack);
1115 printf("(brk) %x\n" , info->brk);
1118 if ( info->personality == PER_SVR4 )
1120 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1121 and some applications "depend" upon this behavior.
1122 Since we do not have the power to recompile these, we
1123 emulate the SVr4 behavior. Sigh. */
1124 mapped_addr = target_mmap(0, host_page_size, PROT_READ | PROT_EXEC,
1125 MAP_FIXED | MAP_PRIVATE, -1, 0);
1128 #ifdef ELF_PLAT_INIT
1130 * The ABI may specify that certain registers be set up in special
1131 * ways (on i386 %edx is the address of a DT_FINI function, for
1132 * example. This macro performs whatever initialization to
1133 * the regs structure is required.
1135 ELF_PLAT_INIT(regs);
1139 info->entry = elf_entry;
1146 int elf_exec(const char * filename, char ** argv, char ** envp,
1147 struct target_pt_regs * regs, struct image_info *infop)
1149 struct linux_binprm bprm;
1153 bprm.p = TARGET_PAGE_SIZE*MAX_ARG_PAGES-sizeof(unsigned int);
1154 for (i=0 ; i<MAX_ARG_PAGES ; i++) /* clear page-table */
1156 retval = open(filename, O_RDONLY);
1160 /* return retval; */
1165 bprm.filename = (char *)filename;
1170 bprm.argc = count(argv);
1171 bprm.envc = count(envp);
1173 retval = prepare_binprm(&bprm);
1176 bprm.p = copy_strings(1, &bprm.filename, bprm.page, bprm.p);
1178 bprm.p = copy_strings(bprm.envc,envp,bprm.page,bprm.p);
1179 bprm.p = copy_strings(bprm.argc,argv,bprm.page,bprm.p);
1186 retval = load_elf_binary(&bprm,regs,infop);
1189 /* success. Initialize important registers */
1190 init_thread(regs, infop);
1194 /* Something went wrong, return the inode and free the argument pages*/
1195 for (i=0 ; i<MAX_ARG_PAGES ; i++) {
1196 free_page((void *)bprm.page[i]);
1202 static int load_aout_interp(void * exptr, int interp_fd)
1204 printf("a.out interpreter not yet supported\n");