1 /* This is the Linux kernel elf-loading code, ported into user space */
16 /* this flag is uneffective under linux too, should be deleted */
18 #define MAP_DENYWRITE 0
21 /* should probably go in elf.h */
28 #define ELF_START_MMAP 0x80000000
31 * This is used to ensure we don't load something for the wrong architecture.
33 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
36 * These are used to set parameters in the core dumps.
38 #define ELF_CLASS ELFCLASS32
39 #define ELF_DATA ELFDATA2LSB
40 #define ELF_ARCH EM_386
42 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
43 starts %edx contains a pointer to a function which might be
44 registered using `atexit'. This provides a mean for the
45 dynamic linker to call DT_FINI functions for shared libraries
46 that have been loaded before the code runs.
48 A value of 0 tells we have no such handler. */
49 #define ELF_PLAT_INIT(_r) _r->edx = 0
51 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
53 regs->esp = infop->start_stack;
54 regs->eip = infop->entry;
57 #define USE_ELF_CORE_DUMP
58 #define ELF_EXEC_PAGESIZE 4096
64 #define ELF_START_MMAP 0x80000000
66 #define elf_check_arch(x) ( (x) == EM_ARM )
68 #define ELF_CLASS ELFCLASS32
69 #ifdef TARGET_WORDS_BIGENDIAN
70 #define ELF_DATA ELFDATA2MSB
72 #define ELF_DATA ELFDATA2LSB
74 #define ELF_ARCH EM_ARM
76 #define ELF_PLAT_INIT(_r) _r->ARM_r0 = 0
78 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
80 target_long *stack = (void *)infop->start_stack;
81 memset(regs, 0, sizeof(*regs));
82 regs->ARM_cpsr = 0x10;
83 regs->ARM_pc = infop->entry;
84 regs->ARM_sp = infop->start_stack;
85 regs->ARM_r2 = tswapl(stack[2]); /* envp */
86 regs->ARM_r1 = tswapl(stack[1]); /* argv */
87 /* XXX: it seems that r0 is zeroed after ! */
88 // regs->ARM_r0 = tswapl(stack[0]); /* argc */
91 #define USE_ELF_CORE_DUMP
92 #define ELF_EXEC_PAGESIZE 4096
99 #define ELF_START_MMAP 0x80000000
101 #define elf_check_arch(x) ( (x) == EM_SPARC )
103 #define ELF_CLASS ELFCLASS64
104 #define ELF_DATA ELFDATA2MSB
105 #define ELF_ARCH EM_SPARC
108 #define ELF_PLAT_INIT(_r)
110 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
113 regs->pc = infop->entry;
114 regs->npc = regs->pc + 4;
116 regs->u_regs[14] = infop->start_stack - 16 * 4;
120 #define ELF_START_MMAP 0x80000000
122 #define elf_check_arch(x) ( (x) == EM_SPARC )
124 #define ELF_CLASS ELFCLASS32
125 #define ELF_DATA ELFDATA2MSB
126 #define ELF_ARCH EM_SPARC
129 #define ELF_PLAT_INIT(_r)
131 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
134 regs->pc = infop->entry;
135 regs->npc = regs->pc + 4;
137 regs->u_regs[14] = infop->start_stack - 16 * 4;
145 #define ELF_START_MMAP 0x80000000
147 #define elf_check_arch(x) ( (x) == EM_PPC )
149 #define ELF_CLASS ELFCLASS32
150 #ifdef TARGET_WORDS_BIGENDIAN
151 #define ELF_DATA ELFDATA2MSB
153 #define ELF_DATA ELFDATA2LSB
155 #define ELF_ARCH EM_PPC
157 /* Note that isn't exactly what regular kernel does
158 * but this is what the ABI wants and is needed to allow
159 * execution of PPC BSD programs.
161 #define ELF_PLAT_INIT(_r) \
163 target_ulong *pos = (target_ulong *)bprm->p, tmp = 1; \
164 _r->gpr[3] = bprm->argc; \
165 _r->gpr[4] = (unsigned long)++pos; \
166 for (; tmp != 0; pos++) \
168 _r->gpr[5] = (unsigned long)pos; \
172 * We need to put in some extra aux table entries to tell glibc what
173 * the cache block size is, so it can use the dcbz instruction safely.
175 #define AT_DCACHEBSIZE 19
176 #define AT_ICACHEBSIZE 20
177 #define AT_UCACHEBSIZE 21
178 /* A special ignored type value for PPC, for glibc compatibility. */
179 #define AT_IGNOREPPC 22
181 * The requirements here are:
182 * - keep the final alignment of sp (sp & 0xf)
183 * - make sure the 32-bit value at the first 16 byte aligned position of
184 * AUXV is greater than 16 for glibc compatibility.
185 * AT_IGNOREPPC is used for that.
186 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
187 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
189 #define DLINFO_ARCH_ITEMS 5
190 #define ARCH_DLINFO \
192 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
193 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
194 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
196 * Now handle glibc compatibility. \
198 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
199 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
202 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
204 _regs->msr = 1 << MSR_PR; /* Set user mode */
205 _regs->gpr[1] = infop->start_stack;
206 _regs->nip = infop->entry;
209 #define USE_ELF_CORE_DUMP
210 #define ELF_EXEC_PAGESIZE 4096
217 * MAX_ARG_PAGES defines the number of pages allocated for arguments
218 * and envelope for the new program. 32 should suffice, this gives
219 * a maximum env+arg of 128kB w/4KB pages!
221 #define MAX_ARG_PAGES 32
224 * This structure is used to hold the arguments that are
225 * used when loading binaries.
227 struct linux_binprm {
229 unsigned long page[MAX_ARG_PAGES];
235 char * filename; /* Name of binary */
236 unsigned long loader, exec;
237 int dont_iput; /* binfmt handler has put inode */
242 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
243 unsigned int a_text; /* length of text, in bytes */
244 unsigned int a_data; /* length of data, in bytes */
245 unsigned int a_bss; /* length of uninitialized data area, in bytes */
246 unsigned int a_syms; /* length of symbol table data in file, in bytes */
247 unsigned int a_entry; /* start address */
248 unsigned int a_trsize; /* length of relocation info for text, in bytes */
249 unsigned int a_drsize; /* length of relocation info for data, in bytes */
253 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
259 /* max code+data+bss space allocated to elf interpreter */
260 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
262 /* max code+data+bss+brk space allocated to ET_DYN executables */
263 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
265 /* from personality.h */
267 /* Flags for bug emulation. These occupy the top three bytes. */
268 #define STICKY_TIMEOUTS 0x4000000
269 #define WHOLE_SECONDS 0x2000000
271 /* Personality types. These go in the low byte. Avoid using the top bit,
272 * it will conflict with error returns.
274 #define PER_MASK (0x00ff)
275 #define PER_LINUX (0x0000)
276 #define PER_SVR4 (0x0001 | STICKY_TIMEOUTS)
277 #define PER_SVR3 (0x0002 | STICKY_TIMEOUTS)
278 #define PER_SCOSVR3 (0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS)
279 #define PER_WYSEV386 (0x0004 | STICKY_TIMEOUTS)
280 #define PER_ISCR4 (0x0005 | STICKY_TIMEOUTS)
281 #define PER_BSD (0x0006)
282 #define PER_XENIX (0x0007 | STICKY_TIMEOUTS)
284 /* Necessary parameters */
287 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
288 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
289 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
291 #define INTERPRETER_NONE 0
292 #define INTERPRETER_AOUT 1
293 #define INTERPRETER_ELF 2
295 #define DLINFO_ITEMS 11
297 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
302 extern unsigned long x86_stack_size;
304 static int load_aout_interp(void * exptr, int interp_fd);
307 static void bswap_ehdr(struct elfhdr *ehdr)
309 bswap16s(&ehdr->e_type); /* Object file type */
310 bswap16s(&ehdr->e_machine); /* Architecture */
311 bswap32s(&ehdr->e_version); /* Object file version */
312 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
313 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
314 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
315 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
316 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
317 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
318 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
319 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
320 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
321 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
324 static void bswap_phdr(struct elf_phdr *phdr)
326 bswap32s(&phdr->p_type); /* Segment type */
327 bswaptls(&phdr->p_offset); /* Segment file offset */
328 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
329 bswaptls(&phdr->p_paddr); /* Segment physical address */
330 bswaptls(&phdr->p_filesz); /* Segment size in file */
331 bswaptls(&phdr->p_memsz); /* Segment size in memory */
332 bswap32s(&phdr->p_flags); /* Segment flags */
333 bswaptls(&phdr->p_align); /* Segment alignment */
336 static void bswap_shdr(struct elf_shdr *shdr)
338 bswap32s(&shdr->sh_name);
339 bswap32s(&shdr->sh_type);
340 bswaptls(&shdr->sh_flags);
341 bswaptls(&shdr->sh_addr);
342 bswaptls(&shdr->sh_offset);
343 bswaptls(&shdr->sh_size);
344 bswap32s(&shdr->sh_link);
345 bswap32s(&shdr->sh_info);
346 bswaptls(&shdr->sh_addralign);
347 bswaptls(&shdr->sh_entsize);
350 static void bswap_sym(Elf32_Sym *sym)
352 bswap32s(&sym->st_name);
353 bswap32s(&sym->st_value);
354 bswap32s(&sym->st_size);
355 bswap16s(&sym->st_shndx);
359 static void * get_free_page(void)
363 /* User-space version of kernel get_free_page. Returns a page-aligned
364 * page-sized chunk of memory.
366 retval = (void *)target_mmap(0, qemu_host_page_size, PROT_READ|PROT_WRITE,
367 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
369 if((long)retval == -1) {
370 perror("get_free_page");
378 static void free_page(void * pageaddr)
380 target_munmap((unsigned long)pageaddr, qemu_host_page_size);
384 * 'copy_string()' copies argument/envelope strings from user
385 * memory to free pages in kernel mem. These are in a format ready
386 * to be put directly into the top of new user memory.
389 static unsigned long copy_strings(int argc,char ** argv,unsigned long *page,
392 char *tmp, *tmp1, *pag = NULL;
396 return 0; /* bullet-proofing */
401 fprintf(stderr, "VFS: argc is wrong");
407 if (p < len) { /* this shouldn't happen - 128kB */
413 offset = p % TARGET_PAGE_SIZE;
414 pag = (char *) page[p/TARGET_PAGE_SIZE];
416 pag = (char *)get_free_page();
417 page[p/TARGET_PAGE_SIZE] = (unsigned long)pag;
422 if (len == 0 || offset == 0) {
423 *(pag + offset) = *tmp;
426 int bytes_to_copy = (len > offset) ? offset : len;
427 tmp -= bytes_to_copy;
429 offset -= bytes_to_copy;
430 len -= bytes_to_copy;
431 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
438 static int in_group_p(gid_t g)
440 /* return TRUE if we're in the specified group, FALSE otherwise */
443 gid_t grouplist[NGROUPS];
445 ngroup = getgroups(NGROUPS, grouplist);
446 for(i = 0; i < ngroup; i++) {
447 if(grouplist[i] == g) {
454 static int count(char ** vec)
458 for(i = 0; *vec; i++) {
465 static int prepare_binprm(struct linux_binprm *bprm)
469 int retval, id_change;
471 if(fstat(bprm->fd, &st) < 0) {
476 if(!S_ISREG(mode)) { /* Must be regular file */
479 if(!(mode & 0111)) { /* Must have at least one execute bit set */
483 bprm->e_uid = geteuid();
484 bprm->e_gid = getegid();
489 bprm->e_uid = st.st_uid;
490 if(bprm->e_uid != geteuid()) {
497 * If setgid is set but no group execute bit then this
498 * is a candidate for mandatory locking, not a setgid
501 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
502 bprm->e_gid = st.st_gid;
503 if (!in_group_p(bprm->e_gid)) {
508 memset(bprm->buf, 0, sizeof(bprm->buf));
509 retval = lseek(bprm->fd, 0L, SEEK_SET);
511 retval = read(bprm->fd, bprm->buf, 128);
514 perror("prepare_binprm");
516 /* return(-errno); */
523 unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm,
524 struct image_info * info)
526 unsigned long stack_base, size, error;
529 /* Create enough stack to hold everything. If we don't use
530 * it for args, we'll use it for something else...
532 size = x86_stack_size;
533 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
534 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
535 error = target_mmap(0,
536 size + qemu_host_page_size,
537 PROT_READ | PROT_WRITE,
538 MAP_PRIVATE | MAP_ANONYMOUS,
544 /* we reserve one extra page at the top of the stack as guard */
545 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
547 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
551 bprm->loader += stack_base;
553 bprm->exec += stack_base;
555 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
559 memcpy((void *)stack_base, (void *)bprm->page[i], TARGET_PAGE_SIZE);
560 free_page((void *)bprm->page[i]);
562 stack_base += TARGET_PAGE_SIZE;
567 static void set_brk(unsigned long start, unsigned long end)
569 /* page-align the start and end addresses... */
570 start = HOST_PAGE_ALIGN(start);
571 end = HOST_PAGE_ALIGN(end);
574 if(target_mmap(start, end - start,
575 PROT_READ | PROT_WRITE | PROT_EXEC,
576 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
577 perror("cannot mmap brk");
583 /* We need to explicitly zero any fractional pages after the data
584 section (i.e. bss). This would contain the junk from the file that
585 should not be in memory. */
586 static void padzero(unsigned long elf_bss)
591 /* XXX: this is really a hack : if the real host page size is
592 smaller than the target page size, some pages after the end
593 of the file may not be mapped. A better fix would be to
594 patch target_mmap(), but it is more complicated as the file
595 size must be known */
596 if (qemu_real_host_page_size < qemu_host_page_size) {
597 unsigned long end_addr, end_addr1;
598 end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
599 ~(qemu_real_host_page_size - 1);
600 end_addr = HOST_PAGE_ALIGN(elf_bss);
601 if (end_addr1 < end_addr) {
602 mmap((void *)end_addr1, end_addr - end_addr1,
603 PROT_READ|PROT_WRITE|PROT_EXEC,
604 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
608 nbyte = elf_bss & (qemu_host_page_size-1);
610 nbyte = qemu_host_page_size - nbyte;
611 fpnt = (char *) elf_bss;
618 static unsigned int * create_elf_tables(char *p, int argc, int envc,
619 struct elfhdr * exec,
620 unsigned long load_addr,
621 unsigned long load_bias,
622 unsigned long interp_load_addr, int ibcs,
623 struct image_info *info)
625 target_ulong *argv, *envp;
626 target_ulong *sp, *csp;
630 * Force 16 byte _final_ alignment here for generality.
632 sp = (unsigned int *) (~15UL & (unsigned long) p);
634 csp -= (DLINFO_ITEMS + 1) * 2;
635 #ifdef DLINFO_ARCH_ITEMS
636 csp -= DLINFO_ARCH_ITEMS*2;
640 csp -= (!ibcs ? 3 : 1); /* argc itself */
641 if ((unsigned long)csp & 15UL)
642 sp -= ((unsigned long)csp & 15UL) / sizeof(*sp);
644 #define NEW_AUX_ENT(id, val) \
647 put_user (val, sp + 1)
648 NEW_AUX_ENT (AT_NULL, 0);
650 /* There must be exactly DLINFO_ITEMS entries here. */
651 NEW_AUX_ENT(AT_PHDR, (target_ulong)(load_addr + exec->e_phoff));
652 NEW_AUX_ENT(AT_PHENT, (target_ulong)(sizeof (struct elf_phdr)));
653 NEW_AUX_ENT(AT_PHNUM, (target_ulong)(exec->e_phnum));
654 NEW_AUX_ENT(AT_PAGESZ, (target_ulong)(TARGET_PAGE_SIZE));
655 NEW_AUX_ENT(AT_BASE, (target_ulong)(interp_load_addr));
656 NEW_AUX_ENT(AT_FLAGS, (target_ulong)0);
657 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
658 NEW_AUX_ENT(AT_UID, (target_ulong) getuid());
659 NEW_AUX_ENT(AT_EUID, (target_ulong) geteuid());
660 NEW_AUX_ENT(AT_GID, (target_ulong) getgid());
661 NEW_AUX_ENT(AT_EGID, (target_ulong) getegid());
664 * ARCH_DLINFO must come last so platform specific code can enforce
665 * special alignment requirements on the AUXV if necessary (eg. PPC).
676 put_user((target_ulong)envp,--sp);
677 put_user((target_ulong)argv,--sp);
680 info->arg_start = (unsigned int)((unsigned long)p & 0xffffffff);
682 put_user((target_ulong)p,argv++);
689 info->arg_end = info->env_start = (unsigned int)((unsigned long)p & 0xffffffff);
691 put_user((target_ulong)p,envp++);
698 info->env_end = (unsigned int)((unsigned long)p & 0xffffffff);
704 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
706 unsigned long *interp_load_addr)
708 struct elf_phdr *elf_phdata = NULL;
709 struct elf_phdr *eppnt;
710 unsigned long load_addr = 0;
711 int load_addr_set = 0;
713 unsigned long last_bss, elf_bss;
722 bswap_ehdr(interp_elf_ex);
724 /* First of all, some simple consistency checks */
725 if ((interp_elf_ex->e_type != ET_EXEC &&
726 interp_elf_ex->e_type != ET_DYN) ||
727 !elf_check_arch(interp_elf_ex->e_machine)) {
732 /* Now read in all of the header information */
734 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
737 elf_phdata = (struct elf_phdr *)
738 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
744 * If the size of this structure has changed, then punt, since
745 * we will be doing the wrong thing.
747 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
752 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
754 retval = read(interpreter_fd,
756 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
759 perror("load_elf_interp");
766 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
771 if (interp_elf_ex->e_type == ET_DYN) {
772 /* in order to avoid harcoding the interpreter load
773 address in qemu, we allocate a big enough memory zone */
774 error = target_mmap(0, INTERP_MAP_SIZE,
775 PROT_NONE, MAP_PRIVATE | MAP_ANON,
786 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
787 if (eppnt->p_type == PT_LOAD) {
788 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
790 unsigned long vaddr = 0;
793 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
794 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
795 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
796 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
797 elf_type |= MAP_FIXED;
798 vaddr = eppnt->p_vaddr;
800 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
801 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
805 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
807 if (error > -1024UL) {
809 close(interpreter_fd);
814 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
820 * Find the end of the file mapping for this phdr, and keep
821 * track of the largest address we see for this.
823 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
824 if (k > elf_bss) elf_bss = k;
827 * Do the same thing for the memory mapping - between
828 * elf_bss and last_bss is the bss section.
830 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
831 if (k > last_bss) last_bss = k;
834 /* Now use mmap to map the library into memory. */
836 close(interpreter_fd);
839 * Now fill out the bss section. First pad the last page up
840 * to the page boundary, and then perform a mmap to make sure
841 * that there are zeromapped pages up to and including the last
845 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
847 /* Map the last of the bss segment */
848 if (last_bss > elf_bss) {
849 target_mmap(elf_bss, last_bss-elf_bss,
850 PROT_READ|PROT_WRITE|PROT_EXEC,
851 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
855 *interp_load_addr = load_addr;
856 return ((unsigned long) interp_elf_ex->e_entry) + load_addr;
859 /* Best attempt to load symbols from this ELF object. */
860 static void load_symbols(struct elfhdr *hdr, int fd)
863 struct elf_shdr sechdr, symtab, strtab;
867 lseek(fd, hdr->e_shoff, SEEK_SET);
868 for (i = 0; i < hdr->e_shnum; i++) {
869 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
874 if (sechdr.sh_type == SHT_SYMTAB) {
876 lseek(fd, hdr->e_shoff
877 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
878 if (read(fd, &strtab, sizeof(strtab))
887 return; /* Shouldn't happen... */
890 /* Now know where the strtab and symtab are. Snarf them. */
891 s = malloc(sizeof(*s));
892 s->disas_symtab = malloc(symtab.sh_size);
893 s->disas_strtab = strings = malloc(strtab.sh_size);
894 if (!s->disas_symtab || !s->disas_strtab)
897 lseek(fd, symtab.sh_offset, SEEK_SET);
898 if (read(fd, s->disas_symtab, symtab.sh_size) != symtab.sh_size)
902 for (i = 0; i < symtab.sh_size / sizeof(struct elf_sym); i++)
903 bswap_sym(s->disas_symtab + sizeof(struct elf_sym)*i);
906 lseek(fd, strtab.sh_offset, SEEK_SET);
907 if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
909 s->disas_num_syms = symtab.sh_size / sizeof(struct elf_sym);
914 static int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
915 struct image_info * info)
917 struct elfhdr elf_ex;
918 struct elfhdr interp_elf_ex;
919 struct exec interp_ex;
920 int interpreter_fd = -1; /* avoid warning */
921 unsigned long load_addr, load_bias;
922 int load_addr_set = 0;
923 unsigned int interpreter_type = INTERPRETER_NONE;
924 unsigned char ibcs2_interpreter;
926 unsigned long mapped_addr;
927 struct elf_phdr * elf_ppnt;
928 struct elf_phdr *elf_phdata;
929 unsigned long elf_bss, k, elf_brk;
931 char * elf_interpreter;
932 unsigned long elf_entry, interp_load_addr = 0;
934 unsigned long start_code, end_code, end_data;
935 unsigned long elf_stack;
936 char passed_fileno[6];
938 ibcs2_interpreter = 0;
942 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
947 if (elf_ex.e_ident[0] != 0x7f ||
948 strncmp(&elf_ex.e_ident[1], "ELF",3) != 0) {
952 /* First of all, some simple consistency checks */
953 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
954 (! elf_check_arch(elf_ex.e_machine))) {
958 /* Now read in all of the header information */
959 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
960 if (elf_phdata == NULL) {
964 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
966 retval = read(bprm->fd, (char *) elf_phdata,
967 elf_ex.e_phentsize * elf_ex.e_phnum);
971 perror("load_elf_binary");
978 elf_ppnt = elf_phdata;
979 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
980 bswap_phdr(elf_ppnt);
983 elf_ppnt = elf_phdata;
990 elf_interpreter = NULL;
995 for(i=0;i < elf_ex.e_phnum; i++) {
996 if (elf_ppnt->p_type == PT_INTERP) {
997 if ( elf_interpreter != NULL )
1000 free(elf_interpreter);
1005 /* This is the program interpreter used for
1006 * shared libraries - for now assume that this
1007 * is an a.out format binary
1010 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
1012 if (elf_interpreter == NULL) {
1018 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
1020 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
1023 perror("load_elf_binary2");
1027 /* If the program interpreter is one of these two,
1028 then assume an iBCS2 image. Otherwise assume
1029 a native linux image. */
1031 /* JRP - Need to add X86 lib dir stuff here... */
1033 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1034 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1035 ibcs2_interpreter = 1;
1039 printf("Using ELF interpreter %s\n", elf_interpreter);
1042 retval = open(path(elf_interpreter), O_RDONLY);
1044 interpreter_fd = retval;
1047 perror(elf_interpreter);
1049 /* retval = -errno; */
1054 retval = lseek(interpreter_fd, 0, SEEK_SET);
1056 retval = read(interpreter_fd,bprm->buf,128);
1060 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1061 interp_elf_ex=*((struct elfhdr *) bprm->buf); /* elf exec-header */
1064 perror("load_elf_binary3");
1067 free(elf_interpreter);
1075 /* Some simple consistency checks for the interpreter */
1076 if (elf_interpreter){
1077 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1079 /* Now figure out which format our binary is */
1080 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1081 (N_MAGIC(interp_ex) != QMAGIC)) {
1082 interpreter_type = INTERPRETER_ELF;
1085 if (interp_elf_ex.e_ident[0] != 0x7f ||
1086 strncmp(&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1087 interpreter_type &= ~INTERPRETER_ELF;
1090 if (!interpreter_type) {
1091 free(elf_interpreter);
1098 /* OK, we are done with that, now set up the arg stuff,
1099 and then start this sucker up */
1101 if (!bprm->sh_bang) {
1104 if (interpreter_type == INTERPRETER_AOUT) {
1105 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1106 passed_p = passed_fileno;
1108 if (elf_interpreter) {
1109 bprm->p = copy_strings(1,&passed_p,bprm->page,bprm->p);
1114 if (elf_interpreter) {
1115 free(elf_interpreter);
1123 /* OK, This is the point of no return */
1126 info->start_mmap = (unsigned long)ELF_START_MMAP;
1128 elf_entry = (unsigned long) elf_ex.e_entry;
1130 /* Do this so that we can load the interpreter, if need be. We will
1131 change some of these later */
1133 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1134 info->start_stack = bprm->p;
1136 /* Now we do a little grungy work by mmaping the ELF image into
1137 * the correct location in memory. At this point, we assume that
1138 * the image should be loaded at fixed address, not at a variable
1142 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1145 unsigned long error;
1147 if (elf_ppnt->p_type != PT_LOAD)
1150 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1151 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1152 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1153 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1154 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1155 elf_flags |= MAP_FIXED;
1156 } else if (elf_ex.e_type == ET_DYN) {
1157 /* Try and get dynamic programs out of the way of the default mmap
1158 base, as well as whatever program they might try to exec. This
1159 is because the brk will follow the loader, and is not movable. */
1160 /* NOTE: for qemu, we do a big mmap to get enough space
1161 without harcoding any address */
1162 error = target_mmap(0, ET_DYN_MAP_SIZE,
1163 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1169 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1172 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1173 (elf_ppnt->p_filesz +
1174 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1176 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1178 (elf_ppnt->p_offset -
1179 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1185 #ifdef LOW_ELF_STACK
1186 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1187 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1190 if (!load_addr_set) {
1192 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1193 if (elf_ex.e_type == ET_DYN) {
1194 load_bias += error -
1195 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1196 load_addr += load_bias;
1199 k = elf_ppnt->p_vaddr;
1202 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1205 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1209 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1210 if (k > elf_brk) elf_brk = k;
1213 elf_entry += load_bias;
1214 elf_bss += load_bias;
1215 elf_brk += load_bias;
1216 start_code += load_bias;
1217 end_code += load_bias;
1218 // start_data += load_bias;
1219 end_data += load_bias;
1221 if (elf_interpreter) {
1222 if (interpreter_type & 1) {
1223 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1225 else if (interpreter_type & 2) {
1226 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1230 close(interpreter_fd);
1231 free(elf_interpreter);
1233 if (elf_entry == ~0UL) {
1234 printf("Unable to load interpreter\n");
1244 load_symbols(&elf_ex, bprm->fd);
1246 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1247 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1249 #ifdef LOW_ELF_STACK
1250 info->start_stack = bprm->p = elf_stack - 4;
1252 bprm->p = (unsigned long)
1253 create_elf_tables((char *)bprm->p,
1257 load_addr, load_bias,
1259 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1261 if (interpreter_type == INTERPRETER_AOUT)
1262 info->arg_start += strlen(passed_fileno) + 1;
1263 info->start_brk = info->brk = elf_brk;
1264 info->end_code = end_code;
1265 info->start_code = start_code;
1266 info->end_data = end_data;
1267 info->start_stack = bprm->p;
1269 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1271 set_brk(elf_bss, elf_brk);
1276 printf("(start_brk) %x\n" , info->start_brk);
1277 printf("(end_code) %x\n" , info->end_code);
1278 printf("(start_code) %x\n" , info->start_code);
1279 printf("(end_data) %x\n" , info->end_data);
1280 printf("(start_stack) %x\n" , info->start_stack);
1281 printf("(brk) %x\n" , info->brk);
1284 if ( info->personality == PER_SVR4 )
1286 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1287 and some applications "depend" upon this behavior.
1288 Since we do not have the power to recompile these, we
1289 emulate the SVr4 behavior. Sigh. */
1290 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1291 MAP_FIXED | MAP_PRIVATE, -1, 0);
1294 #ifdef ELF_PLAT_INIT
1296 * The ABI may specify that certain registers be set up in special
1297 * ways (on i386 %edx is the address of a DT_FINI function, for
1298 * example. This macro performs whatever initialization to
1299 * the regs structure is required.
1301 ELF_PLAT_INIT(regs);
1305 info->entry = elf_entry;
1312 int elf_exec(const char * filename, char ** argv, char ** envp,
1313 struct target_pt_regs * regs, struct image_info *infop)
1315 struct linux_binprm bprm;
1319 bprm.p = TARGET_PAGE_SIZE*MAX_ARG_PAGES-sizeof(unsigned int);
1320 for (i=0 ; i<MAX_ARG_PAGES ; i++) /* clear page-table */
1322 retval = open(filename, O_RDONLY);
1326 bprm.filename = (char *)filename;
1331 bprm.argc = count(argv);
1332 bprm.envc = count(envp);
1334 retval = prepare_binprm(&bprm);
1337 bprm.p = copy_strings(1, &bprm.filename, bprm.page, bprm.p);
1339 bprm.p = copy_strings(bprm.envc,envp,bprm.page,bprm.p);
1340 bprm.p = copy_strings(bprm.argc,argv,bprm.page,bprm.p);
1347 retval = load_elf_binary(&bprm,regs,infop);
1350 /* success. Initialize important registers */
1351 init_thread(regs, infop);
1355 /* Something went wrong, return the inode and free the argument pages*/
1356 for (i=0 ; i<MAX_ARG_PAGES ; i++) {
1357 free_page((void *)bprm.page[i]);
1363 static int load_aout_interp(void * exptr, int interp_fd)
1365 printf("a.out interpreter not yet supported\n");