1 /* This is the Linux kernel elf-loading code, ported into user space */
16 /* this flag is uneffective under linux too, should be deleted */
18 #define MAP_DENYWRITE 0
21 /* should probably go in elf.h */
28 #define ELF_START_MMAP 0x80000000
31 * This is used to ensure we don't load something for the wrong architecture.
33 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
36 * These are used to set parameters in the core dumps.
38 #define ELF_CLASS ELFCLASS32
39 #define ELF_DATA ELFDATA2LSB
40 #define ELF_ARCH EM_386
42 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
43 starts %edx contains a pointer to a function which might be
44 registered using `atexit'. This provides a mean for the
45 dynamic linker to call DT_FINI functions for shared libraries
46 that have been loaded before the code runs.
48 A value of 0 tells we have no such handler. */
49 #define ELF_PLAT_INIT(_r) _r->edx = 0
51 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
53 regs->esp = infop->start_stack;
54 regs->eip = infop->entry;
57 #define USE_ELF_CORE_DUMP
58 #define ELF_EXEC_PAGESIZE 4096
64 #define ELF_START_MMAP 0x80000000
66 #define elf_check_arch(x) ( (x) == EM_ARM )
68 #define ELF_CLASS ELFCLASS32
69 #ifdef TARGET_WORDS_BIGENDIAN
70 #define ELF_DATA ELFDATA2MSB
72 #define ELF_DATA ELFDATA2LSB
74 #define ELF_ARCH EM_ARM
76 #define ELF_PLAT_INIT(_r) _r->ARM_r0 = 0
78 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
80 target_long *stack = (void *)infop->start_stack;
81 memset(regs, 0, sizeof(*regs));
82 regs->ARM_cpsr = 0x10;
83 regs->ARM_pc = infop->entry;
84 regs->ARM_sp = infop->start_stack;
85 regs->ARM_r2 = tswapl(stack[2]); /* envp */
86 regs->ARM_r1 = tswapl(stack[1]); /* argv */
87 /* XXX: it seems that r0 is zeroed after ! */
88 // regs->ARM_r0 = tswapl(stack[0]); /* argc */
91 #define USE_ELF_CORE_DUMP
92 #define ELF_EXEC_PAGESIZE 4096
98 #define ELF_START_MMAP 0x80000000
100 #define elf_check_arch(x) ( (x) == EM_SPARC )
102 #define ELF_CLASS ELFCLASS32
103 #define ELF_DATA ELFDATA2MSB
104 #define ELF_ARCH EM_SPARC
107 #define ELF_PLAT_INIT(_r)
109 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
112 regs->pc = infop->entry;
113 regs->npc = regs->pc + 4;
115 regs->u_regs[14] = infop->start_stack - 16 * 4;
122 #define ELF_START_MMAP 0x80000000
124 #define elf_check_arch(x) ( (x) == EM_PPC )
126 #define ELF_CLASS ELFCLASS32
127 #ifdef TARGET_WORDS_BIGENDIAN
128 #define ELF_DATA ELFDATA2MSB
130 #define ELF_DATA ELFDATA2LSB
132 #define ELF_ARCH EM_PPC
134 /* Note that isn't exactly what regular kernel does
135 * but this is what the ABI wants and is needed to allow
136 * execution of PPC BSD programs.
138 #define ELF_PLAT_INIT(_r) \
140 target_ulong *pos = (target_ulong *)bprm->p, tmp = 1; \
141 _r->gpr[3] = bprm->argc; \
142 _r->gpr[4] = (unsigned long)++pos; \
143 for (; tmp != 0; pos++) \
145 _r->gpr[5] = (unsigned long)pos; \
149 * We need to put in some extra aux table entries to tell glibc what
150 * the cache block size is, so it can use the dcbz instruction safely.
152 #define AT_DCACHEBSIZE 19
153 #define AT_ICACHEBSIZE 20
154 #define AT_UCACHEBSIZE 21
155 /* A special ignored type value for PPC, for glibc compatibility. */
156 #define AT_IGNOREPPC 22
158 * The requirements here are:
159 * - keep the final alignment of sp (sp & 0xf)
160 * - make sure the 32-bit value at the first 16 byte aligned position of
161 * AUXV is greater than 16 for glibc compatibility.
162 * AT_IGNOREPPC is used for that.
163 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
164 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
166 #define DLINFO_ARCH_ITEMS 3
167 #define ARCH_DLINFO \
169 sp -= DLINFO_ARCH_ITEMS * 2; \
170 NEW_AUX_ENT(0, AT_DCACHEBSIZE, 0x20); \
171 NEW_AUX_ENT(1, AT_ICACHEBSIZE, 0x20); \
172 NEW_AUX_ENT(2, AT_UCACHEBSIZE, 0); \
174 * Now handle glibc compatibility. \
177 NEW_AUX_ENT(0, AT_IGNOREPPC, AT_IGNOREPPC); \
178 NEW_AUX_ENT(1, AT_IGNOREPPC, AT_IGNOREPPC); \
181 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
183 _regs->msr = 1 << MSR_PR; /* Set user mode */
184 _regs->gpr[1] = infop->start_stack;
185 _regs->nip = infop->entry;
188 #define USE_ELF_CORE_DUMP
189 #define ELF_EXEC_PAGESIZE 4096
196 * MAX_ARG_PAGES defines the number of pages allocated for arguments
197 * and envelope for the new program. 32 should suffice, this gives
198 * a maximum env+arg of 128kB w/4KB pages!
200 #define MAX_ARG_PAGES 32
203 * This structure is used to hold the arguments that are
204 * used when loading binaries.
206 struct linux_binprm {
208 unsigned long page[MAX_ARG_PAGES];
214 char * filename; /* Name of binary */
215 unsigned long loader, exec;
216 int dont_iput; /* binfmt handler has put inode */
221 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
222 unsigned int a_text; /* length of text, in bytes */
223 unsigned int a_data; /* length of data, in bytes */
224 unsigned int a_bss; /* length of uninitialized data area, in bytes */
225 unsigned int a_syms; /* length of symbol table data in file, in bytes */
226 unsigned int a_entry; /* start address */
227 unsigned int a_trsize; /* length of relocation info for text, in bytes */
228 unsigned int a_drsize; /* length of relocation info for data, in bytes */
232 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
238 /* max code+data+bss space allocated to elf interpreter */
239 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
241 /* max code+data+bss+brk space allocated to ET_DYN executables */
242 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
244 /* from personality.h */
246 /* Flags for bug emulation. These occupy the top three bytes. */
247 #define STICKY_TIMEOUTS 0x4000000
248 #define WHOLE_SECONDS 0x2000000
250 /* Personality types. These go in the low byte. Avoid using the top bit,
251 * it will conflict with error returns.
253 #define PER_MASK (0x00ff)
254 #define PER_LINUX (0x0000)
255 #define PER_SVR4 (0x0001 | STICKY_TIMEOUTS)
256 #define PER_SVR3 (0x0002 | STICKY_TIMEOUTS)
257 #define PER_SCOSVR3 (0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS)
258 #define PER_WYSEV386 (0x0004 | STICKY_TIMEOUTS)
259 #define PER_ISCR4 (0x0005 | STICKY_TIMEOUTS)
260 #define PER_BSD (0x0006)
261 #define PER_XENIX (0x0007 | STICKY_TIMEOUTS)
263 /* Necessary parameters */
266 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
267 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
268 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
270 #define INTERPRETER_NONE 0
271 #define INTERPRETER_AOUT 1
272 #define INTERPRETER_ELF 2
274 #define DLINFO_ITEMS 11
276 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
281 extern unsigned long x86_stack_size;
283 static int load_aout_interp(void * exptr, int interp_fd);
286 static void bswap_ehdr(Elf32_Ehdr *ehdr)
288 bswap16s(&ehdr->e_type); /* Object file type */
289 bswap16s(&ehdr->e_machine); /* Architecture */
290 bswap32s(&ehdr->e_version); /* Object file version */
291 bswap32s(&ehdr->e_entry); /* Entry point virtual address */
292 bswap32s(&ehdr->e_phoff); /* Program header table file offset */
293 bswap32s(&ehdr->e_shoff); /* Section header table file offset */
294 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
295 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
296 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
297 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
298 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
299 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
300 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
303 static void bswap_phdr(Elf32_Phdr *phdr)
305 bswap32s(&phdr->p_type); /* Segment type */
306 bswap32s(&phdr->p_offset); /* Segment file offset */
307 bswap32s(&phdr->p_vaddr); /* Segment virtual address */
308 bswap32s(&phdr->p_paddr); /* Segment physical address */
309 bswap32s(&phdr->p_filesz); /* Segment size in file */
310 bswap32s(&phdr->p_memsz); /* Segment size in memory */
311 bswap32s(&phdr->p_flags); /* Segment flags */
312 bswap32s(&phdr->p_align); /* Segment alignment */
315 static void bswap_shdr(Elf32_Shdr *shdr)
317 bswap32s(&shdr->sh_name);
318 bswap32s(&shdr->sh_type);
319 bswap32s(&shdr->sh_flags);
320 bswap32s(&shdr->sh_addr);
321 bswap32s(&shdr->sh_offset);
322 bswap32s(&shdr->sh_size);
323 bswap32s(&shdr->sh_link);
324 bswap32s(&shdr->sh_info);
325 bswap32s(&shdr->sh_addralign);
326 bswap32s(&shdr->sh_entsize);
329 static void bswap_sym(Elf32_Sym *sym)
331 bswap32s(&sym->st_name);
332 bswap32s(&sym->st_value);
333 bswap32s(&sym->st_size);
334 bswap16s(&sym->st_shndx);
338 static void * get_free_page(void)
342 /* User-space version of kernel get_free_page. Returns a page-aligned
343 * page-sized chunk of memory.
345 retval = (void *)target_mmap(0, qemu_host_page_size, PROT_READ|PROT_WRITE,
346 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
348 if((long)retval == -1) {
349 perror("get_free_page");
357 static void free_page(void * pageaddr)
359 target_munmap((unsigned long)pageaddr, qemu_host_page_size);
363 * 'copy_string()' copies argument/envelope strings from user
364 * memory to free pages in kernel mem. These are in a format ready
365 * to be put directly into the top of new user memory.
368 static unsigned long copy_strings(int argc,char ** argv,unsigned long *page,
371 char *tmp, *tmp1, *pag = NULL;
375 return 0; /* bullet-proofing */
380 fprintf(stderr, "VFS: argc is wrong");
386 if (p < len) { /* this shouldn't happen - 128kB */
392 offset = p % TARGET_PAGE_SIZE;
393 pag = (char *) page[p/TARGET_PAGE_SIZE];
395 pag = (char *)get_free_page();
396 page[p/TARGET_PAGE_SIZE] = (unsigned long)pag;
401 if (len == 0 || offset == 0) {
402 *(pag + offset) = *tmp;
405 int bytes_to_copy = (len > offset) ? offset : len;
406 tmp -= bytes_to_copy;
408 offset -= bytes_to_copy;
409 len -= bytes_to_copy;
410 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
417 static int in_group_p(gid_t g)
419 /* return TRUE if we're in the specified group, FALSE otherwise */
422 gid_t grouplist[NGROUPS];
424 ngroup = getgroups(NGROUPS, grouplist);
425 for(i = 0; i < ngroup; i++) {
426 if(grouplist[i] == g) {
433 static int count(char ** vec)
437 for(i = 0; *vec; i++) {
444 static int prepare_binprm(struct linux_binprm *bprm)
448 int retval, id_change;
450 if(fstat(bprm->fd, &st) < 0) {
455 if(!S_ISREG(mode)) { /* Must be regular file */
458 if(!(mode & 0111)) { /* Must have at least one execute bit set */
462 bprm->e_uid = geteuid();
463 bprm->e_gid = getegid();
468 bprm->e_uid = st.st_uid;
469 if(bprm->e_uid != geteuid()) {
476 * If setgid is set but no group execute bit then this
477 * is a candidate for mandatory locking, not a setgid
480 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
481 bprm->e_gid = st.st_gid;
482 if (!in_group_p(bprm->e_gid)) {
487 memset(bprm->buf, 0, sizeof(bprm->buf));
488 retval = lseek(bprm->fd, 0L, SEEK_SET);
490 retval = read(bprm->fd, bprm->buf, 128);
493 perror("prepare_binprm");
495 /* return(-errno); */
502 unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm,
503 struct image_info * info)
505 unsigned long stack_base, size, error;
508 /* Create enough stack to hold everything. If we don't use
509 * it for args, we'll use it for something else...
511 size = x86_stack_size;
512 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
513 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
514 error = target_mmap(0,
515 size + qemu_host_page_size,
516 PROT_READ | PROT_WRITE,
517 MAP_PRIVATE | MAP_ANONYMOUS,
523 /* we reserve one extra page at the top of the stack as guard */
524 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
526 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
530 bprm->loader += stack_base;
532 bprm->exec += stack_base;
534 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
538 memcpy((void *)stack_base, (void *)bprm->page[i], TARGET_PAGE_SIZE);
539 free_page((void *)bprm->page[i]);
541 stack_base += TARGET_PAGE_SIZE;
546 static void set_brk(unsigned long start, unsigned long end)
548 /* page-align the start and end addresses... */
549 start = HOST_PAGE_ALIGN(start);
550 end = HOST_PAGE_ALIGN(end);
553 if(target_mmap(start, end - start,
554 PROT_READ | PROT_WRITE | PROT_EXEC,
555 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
556 perror("cannot mmap brk");
562 /* We need to explicitly zero any fractional pages after the data
563 section (i.e. bss). This would contain the junk from the file that
564 should not be in memory. */
565 static void padzero(unsigned long elf_bss)
570 /* XXX: this is really a hack : if the real host page size is
571 smaller than the target page size, some pages after the end
572 of the file may not be mapped. A better fix would be to
573 patch target_mmap(), but it is more complicated as the file
574 size must be known */
575 if (qemu_real_host_page_size < qemu_host_page_size) {
576 unsigned long end_addr, end_addr1;
577 end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
578 ~(qemu_real_host_page_size - 1);
579 end_addr = HOST_PAGE_ALIGN(elf_bss);
580 if (end_addr1 < end_addr) {
581 mmap((void *)end_addr1, end_addr - end_addr1,
582 PROT_READ|PROT_WRITE|PROT_EXEC,
583 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
587 nbyte = elf_bss & (qemu_host_page_size-1);
589 nbyte = qemu_host_page_size - nbyte;
590 fpnt = (char *) elf_bss;
597 static unsigned int * create_elf_tables(char *p, int argc, int envc,
598 struct elfhdr * exec,
599 unsigned long load_addr,
600 unsigned long load_bias,
601 unsigned long interp_load_addr, int ibcs,
602 struct image_info *info)
604 target_ulong *argv, *envp;
605 target_ulong *sp, *csp;
609 * Force 16 byte _final_ alignment here for generality.
611 sp = (unsigned int *) (~15UL & (unsigned long) p);
613 csp -= (DLINFO_ITEMS + 1) * 2;
614 #ifdef DLINFO_ARCH_ITEMS
615 csp -= DLINFO_ARCH_ITEMS*2;
619 csp -= (!ibcs ? 3 : 1); /* argc itself */
620 if ((unsigned long)csp & 15UL)
621 sp -= ((unsigned long)csp & 15UL) / sizeof(*sp);
623 #define NEW_AUX_ENT(nr, id, val) \
624 put_user (id, sp + (nr * 2)); \
625 put_user (val, sp + (nr * 2 + 1))
627 NEW_AUX_ENT (0, AT_NULL, 0);
629 sp -= DLINFO_ITEMS*2;
630 NEW_AUX_ENT( 0, AT_PHDR, (target_ulong)(load_addr + exec->e_phoff));
631 NEW_AUX_ENT( 1, AT_PHENT, (target_ulong)(sizeof (struct elf_phdr)));
632 NEW_AUX_ENT( 2, AT_PHNUM, (target_ulong)(exec->e_phnum));
633 NEW_AUX_ENT( 3, AT_PAGESZ, (target_ulong)(TARGET_PAGE_SIZE));
634 NEW_AUX_ENT( 4, AT_BASE, (target_ulong)(interp_load_addr));
635 NEW_AUX_ENT( 5, AT_FLAGS, (target_ulong)0);
636 NEW_AUX_ENT( 6, AT_ENTRY, load_bias + exec->e_entry);
637 NEW_AUX_ENT( 7, AT_UID, (target_ulong) getuid());
638 NEW_AUX_ENT( 8, AT_EUID, (target_ulong) geteuid());
639 NEW_AUX_ENT( 9, AT_GID, (target_ulong) getgid());
640 NEW_AUX_ENT(11, AT_EGID, (target_ulong) getegid());
643 * ARCH_DLINFO must come last so platform specific code can enforce
644 * special alignment requirements on the AUXV if necessary (eg. PPC).
655 put_user((target_ulong)envp,--sp);
656 put_user((target_ulong)argv,--sp);
659 info->arg_start = (unsigned int)((unsigned long)p & 0xffffffff);
661 put_user((target_ulong)p,argv++);
668 info->arg_end = info->env_start = (unsigned int)((unsigned long)p & 0xffffffff);
670 put_user((target_ulong)p,envp++);
677 info->env_end = (unsigned int)((unsigned long)p & 0xffffffff);
683 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
685 unsigned long *interp_load_addr)
687 struct elf_phdr *elf_phdata = NULL;
688 struct elf_phdr *eppnt;
689 unsigned long load_addr = 0;
690 int load_addr_set = 0;
692 unsigned long last_bss, elf_bss;
701 bswap_ehdr(interp_elf_ex);
703 /* First of all, some simple consistency checks */
704 if ((interp_elf_ex->e_type != ET_EXEC &&
705 interp_elf_ex->e_type != ET_DYN) ||
706 !elf_check_arch(interp_elf_ex->e_machine)) {
711 /* Now read in all of the header information */
713 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
716 elf_phdata = (struct elf_phdr *)
717 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
723 * If the size of this structure has changed, then punt, since
724 * we will be doing the wrong thing.
726 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
731 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
733 retval = read(interpreter_fd,
735 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
738 perror("load_elf_interp");
745 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
750 if (interp_elf_ex->e_type == ET_DYN) {
751 /* in order to avoid harcoding the interpreter load
752 address in qemu, we allocate a big enough memory zone */
753 error = target_mmap(0, INTERP_MAP_SIZE,
754 PROT_NONE, MAP_PRIVATE | MAP_ANON,
765 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
766 if (eppnt->p_type == PT_LOAD) {
767 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
769 unsigned long vaddr = 0;
772 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
773 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
774 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
775 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
776 elf_type |= MAP_FIXED;
777 vaddr = eppnt->p_vaddr;
779 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
780 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
784 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
786 if (error > -1024UL) {
788 close(interpreter_fd);
793 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
799 * Find the end of the file mapping for this phdr, and keep
800 * track of the largest address we see for this.
802 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
803 if (k > elf_bss) elf_bss = k;
806 * Do the same thing for the memory mapping - between
807 * elf_bss and last_bss is the bss section.
809 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
810 if (k > last_bss) last_bss = k;
813 /* Now use mmap to map the library into memory. */
815 close(interpreter_fd);
818 * Now fill out the bss section. First pad the last page up
819 * to the page boundary, and then perform a mmap to make sure
820 * that there are zeromapped pages up to and including the last
824 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
826 /* Map the last of the bss segment */
827 if (last_bss > elf_bss) {
828 target_mmap(elf_bss, last_bss-elf_bss,
829 PROT_READ|PROT_WRITE|PROT_EXEC,
830 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
834 *interp_load_addr = load_addr;
835 return ((unsigned long) interp_elf_ex->e_entry) + load_addr;
838 /* Best attempt to load symbols from this ELF object. */
839 static void load_symbols(struct elfhdr *hdr, int fd)
842 struct elf_shdr sechdr, symtab, strtab;
845 lseek(fd, hdr->e_shoff, SEEK_SET);
846 for (i = 0; i < hdr->e_shnum; i++) {
847 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
852 if (sechdr.sh_type == SHT_SYMTAB) {
854 lseek(fd, hdr->e_shoff
855 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
856 if (read(fd, &strtab, sizeof(strtab))
865 return; /* Shouldn't happen... */
868 /* Now know where the strtab and symtab are. Snarf them. */
869 disas_symtab = malloc(symtab.sh_size);
870 disas_strtab = strings = malloc(strtab.sh_size);
871 if (!disas_symtab || !disas_strtab)
874 lseek(fd, symtab.sh_offset, SEEK_SET);
875 if (read(fd, disas_symtab, symtab.sh_size) != symtab.sh_size)
879 for (i = 0; i < symtab.sh_size / sizeof(struct elf_sym); i++)
880 bswap_sym(disas_symtab + sizeof(struct elf_sym)*i);
883 lseek(fd, strtab.sh_offset, SEEK_SET);
884 if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
886 disas_num_syms = symtab.sh_size / sizeof(struct elf_sym);
889 static int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
890 struct image_info * info)
892 struct elfhdr elf_ex;
893 struct elfhdr interp_elf_ex;
894 struct exec interp_ex;
895 int interpreter_fd = -1; /* avoid warning */
896 unsigned long load_addr, load_bias;
897 int load_addr_set = 0;
898 unsigned int interpreter_type = INTERPRETER_NONE;
899 unsigned char ibcs2_interpreter;
901 unsigned long mapped_addr;
902 struct elf_phdr * elf_ppnt;
903 struct elf_phdr *elf_phdata;
904 unsigned long elf_bss, k, elf_brk;
906 char * elf_interpreter;
907 unsigned long elf_entry, interp_load_addr = 0;
909 unsigned long start_code, end_code, end_data;
910 unsigned long elf_stack;
911 char passed_fileno[6];
913 ibcs2_interpreter = 0;
917 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
922 if (elf_ex.e_ident[0] != 0x7f ||
923 strncmp(&elf_ex.e_ident[1], "ELF",3) != 0) {
927 /* First of all, some simple consistency checks */
928 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
929 (! elf_check_arch(elf_ex.e_machine))) {
933 /* Now read in all of the header information */
934 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
935 if (elf_phdata == NULL) {
939 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
941 retval = read(bprm->fd, (char *) elf_phdata,
942 elf_ex.e_phentsize * elf_ex.e_phnum);
946 perror("load_elf_binary");
953 elf_ppnt = elf_phdata;
954 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
955 bswap_phdr(elf_ppnt);
958 elf_ppnt = elf_phdata;
965 elf_interpreter = NULL;
970 for(i=0;i < elf_ex.e_phnum; i++) {
971 if (elf_ppnt->p_type == PT_INTERP) {
972 if ( elf_interpreter != NULL )
975 free(elf_interpreter);
980 /* This is the program interpreter used for
981 * shared libraries - for now assume that this
982 * is an a.out format binary
985 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
987 if (elf_interpreter == NULL) {
993 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
995 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
998 perror("load_elf_binary2");
1002 /* If the program interpreter is one of these two,
1003 then assume an iBCS2 image. Otherwise assume
1004 a native linux image. */
1006 /* JRP - Need to add X86 lib dir stuff here... */
1008 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1009 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1010 ibcs2_interpreter = 1;
1014 printf("Using ELF interpreter %s\n", elf_interpreter);
1017 retval = open(path(elf_interpreter), O_RDONLY);
1019 interpreter_fd = retval;
1022 perror(elf_interpreter);
1024 /* retval = -errno; */
1029 retval = lseek(interpreter_fd, 0, SEEK_SET);
1031 retval = read(interpreter_fd,bprm->buf,128);
1035 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1036 interp_elf_ex=*((struct elfhdr *) bprm->buf); /* elf exec-header */
1039 perror("load_elf_binary3");
1042 free(elf_interpreter);
1050 /* Some simple consistency checks for the interpreter */
1051 if (elf_interpreter){
1052 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1054 /* Now figure out which format our binary is */
1055 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1056 (N_MAGIC(interp_ex) != QMAGIC)) {
1057 interpreter_type = INTERPRETER_ELF;
1060 if (interp_elf_ex.e_ident[0] != 0x7f ||
1061 strncmp(&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1062 interpreter_type &= ~INTERPRETER_ELF;
1065 if (!interpreter_type) {
1066 free(elf_interpreter);
1073 /* OK, we are done with that, now set up the arg stuff,
1074 and then start this sucker up */
1076 if (!bprm->sh_bang) {
1079 if (interpreter_type == INTERPRETER_AOUT) {
1080 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1081 passed_p = passed_fileno;
1083 if (elf_interpreter) {
1084 bprm->p = copy_strings(1,&passed_p,bprm->page,bprm->p);
1089 if (elf_interpreter) {
1090 free(elf_interpreter);
1098 /* OK, This is the point of no return */
1101 info->start_mmap = (unsigned long)ELF_START_MMAP;
1103 elf_entry = (unsigned long) elf_ex.e_entry;
1105 /* Do this so that we can load the interpreter, if need be. We will
1106 change some of these later */
1108 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1109 info->start_stack = bprm->p;
1111 /* Now we do a little grungy work by mmaping the ELF image into
1112 * the correct location in memory. At this point, we assume that
1113 * the image should be loaded at fixed address, not at a variable
1117 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1120 unsigned long error;
1122 if (elf_ppnt->p_type != PT_LOAD)
1125 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1126 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1127 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1128 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1129 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1130 elf_flags |= MAP_FIXED;
1131 } else if (elf_ex.e_type == ET_DYN) {
1132 /* Try and get dynamic programs out of the way of the default mmap
1133 base, as well as whatever program they might try to exec. This
1134 is because the brk will follow the loader, and is not movable. */
1135 /* NOTE: for qemu, we do a big mmap to get enough space
1136 without harcoding any address */
1137 error = target_mmap(0, ET_DYN_MAP_SIZE,
1138 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1144 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1147 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1148 (elf_ppnt->p_filesz +
1149 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1151 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1153 (elf_ppnt->p_offset -
1154 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1160 #ifdef LOW_ELF_STACK
1161 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1162 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1165 if (!load_addr_set) {
1167 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1168 if (elf_ex.e_type == ET_DYN) {
1169 load_bias += error -
1170 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1171 load_addr += load_bias;
1174 k = elf_ppnt->p_vaddr;
1177 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1180 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1184 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1185 if (k > elf_brk) elf_brk = k;
1188 elf_entry += load_bias;
1189 elf_bss += load_bias;
1190 elf_brk += load_bias;
1191 start_code += load_bias;
1192 end_code += load_bias;
1193 // start_data += load_bias;
1194 end_data += load_bias;
1196 if (elf_interpreter) {
1197 if (interpreter_type & 1) {
1198 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1200 else if (interpreter_type & 2) {
1201 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1205 close(interpreter_fd);
1206 free(elf_interpreter);
1208 if (elf_entry == ~0UL) {
1209 printf("Unable to load interpreter\n");
1219 load_symbols(&elf_ex, bprm->fd);
1221 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1222 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1224 #ifdef LOW_ELF_STACK
1225 info->start_stack = bprm->p = elf_stack - 4;
1227 bprm->p = (unsigned long)
1228 create_elf_tables((char *)bprm->p,
1232 load_addr, load_bias,
1234 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1236 if (interpreter_type == INTERPRETER_AOUT)
1237 info->arg_start += strlen(passed_fileno) + 1;
1238 info->start_brk = info->brk = elf_brk;
1239 info->end_code = end_code;
1240 info->start_code = start_code;
1241 info->end_data = end_data;
1242 info->start_stack = bprm->p;
1244 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1246 set_brk(elf_bss, elf_brk);
1251 printf("(start_brk) %x\n" , info->start_brk);
1252 printf("(end_code) %x\n" , info->end_code);
1253 printf("(start_code) %x\n" , info->start_code);
1254 printf("(end_data) %x\n" , info->end_data);
1255 printf("(start_stack) %x\n" , info->start_stack);
1256 printf("(brk) %x\n" , info->brk);
1259 if ( info->personality == PER_SVR4 )
1261 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1262 and some applications "depend" upon this behavior.
1263 Since we do not have the power to recompile these, we
1264 emulate the SVr4 behavior. Sigh. */
1265 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1266 MAP_FIXED | MAP_PRIVATE, -1, 0);
1269 #ifdef ELF_PLAT_INIT
1271 * The ABI may specify that certain registers be set up in special
1272 * ways (on i386 %edx is the address of a DT_FINI function, for
1273 * example. This macro performs whatever initialization to
1274 * the regs structure is required.
1276 ELF_PLAT_INIT(regs);
1280 info->entry = elf_entry;
1287 int elf_exec(const char * filename, char ** argv, char ** envp,
1288 struct target_pt_regs * regs, struct image_info *infop)
1290 struct linux_binprm bprm;
1294 bprm.p = TARGET_PAGE_SIZE*MAX_ARG_PAGES-sizeof(unsigned int);
1295 for (i=0 ; i<MAX_ARG_PAGES ; i++) /* clear page-table */
1297 retval = open(filename, O_RDONLY);
1301 bprm.filename = (char *)filename;
1306 bprm.argc = count(argv);
1307 bprm.envc = count(envp);
1309 retval = prepare_binprm(&bprm);
1312 bprm.p = copy_strings(1, &bprm.filename, bprm.page, bprm.p);
1314 bprm.p = copy_strings(bprm.envc,envp,bprm.page,bprm.p);
1315 bprm.p = copy_strings(bprm.argc,argv,bprm.page,bprm.p);
1322 retval = load_elf_binary(&bprm,regs,infop);
1325 /* success. Initialize important registers */
1326 init_thread(regs, infop);
1330 /* Something went wrong, return the inode and free the argument pages*/
1331 for (i=0 ; i<MAX_ARG_PAGES ; i++) {
1332 free_page((void *)bprm.page[i]);
1338 static int load_aout_interp(void * exptr, int interp_fd)
1340 printf("a.out interpreter not yet supported\n");