]> Git Repo - qemu.git/blobdiff - linux-user/elfload.c
target/arm: Use gvec for NEON_3R_VTST_VCEQ, NEON_3R_VCGT, NEON_3R_VCGE
[qemu.git] / linux-user / elfload.c
index 5fc130cc20ca4f15057fbc1b4e587477d9c9a13d..055f6a95ab1b4e53f77b0ca6c560a870406a4fa3 100644 (file)
@@ -78,6 +78,11 @@ enum {
  */
 #define personality(pers)       (pers & PER_MASK)
 
+int info_is_fdpic(struct image_info *info)
+{
+    return info->personality == PER_LINUX_FDPIC;
+}
+
 /* this flag is uneffective under linux too, should be deleted */
 #ifndef MAP_DENYWRITE
 #define MAP_DENYWRITE 0
@@ -287,6 +292,25 @@ static inline void init_thread(struct target_pt_regs *regs,
     /* For uClinux PIC binaries.  */
     /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
     regs->uregs[10] = infop->start_data;
+
+    /* Support ARM FDPIC.  */
+    if (info_is_fdpic(infop)) {
+        /* As described in the ABI document, r7 points to the loadmap info
+         * prepared by the kernel. If an interpreter is needed, r8 points
+         * to the interpreter loadmap and r9 points to the interpreter
+         * PT_DYNAMIC info. If no interpreter is needed, r8 is zero, and
+         * r9 points to the main program PT_DYNAMIC info.
+         */
+        regs->uregs[7] = infop->loadmap_addr;
+        if (infop->interpreter_loadmap_addr) {
+            /* Executable is dynamically loaded.  */
+            regs->uregs[8] = infop->interpreter_loadmap_addr;
+            regs->uregs[9] = infop->interpreter_pt_dynamic_addr;
+        } else {
+            regs->uregs[8] = 0;
+            regs->uregs[9] = infop->pt_dynamic_addr;
+        }
+    }
 }
 
 #define ELF_NREG    18
@@ -354,7 +378,6 @@ enum {
 
 /* The commpage only exists for 32 bit kernels */
 
-#define TARGET_HAS_VALIDATE_GUEST_SPACE
 /* Return 1 if the proposed guest space is suitable for the guest.
  * Return 0 if the proposed guest space isn't suitable, but another
  * address space should be tried.
@@ -363,8 +386,8 @@ enum {
  * The guest code may leave a page mapped and populate it if the
  * address is suitable.
  */
-static int validate_guest_space(unsigned long guest_base,
-                                unsigned long guest_size)
+static int init_guest_commpage(unsigned long guest_base,
+                               unsigned long guest_size)
 {
     unsigned long real_start, test_page_addr;
 
@@ -375,6 +398,11 @@ static int validate_guest_space(unsigned long guest_base,
 
     /* If the commpage lies within the already allocated guest space,
      * then there is no way we can allocate it.
+     *
+     * You may be thinking that that this check is redundant because
+     * we already validated the guest size against MAX_RESERVED_VA;
+     * but if qemu_host_page_mask is unusually large, then
+     * test_page_addr may be lower.
      */
     if (test_page_addr >= guest_base
         && test_page_addr < (guest_base + guest_size)) {
@@ -430,6 +458,10 @@ static uint32_t get_elf_hwcap(void)
     /* probe for the extra features */
 #define GET_FEATURE(feat, hwcap) \
     do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
+
+#define GET_FEATURE_ID(feat, hwcap) \
+    do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
+
     /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */
     GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP);
     GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP);
@@ -439,8 +471,8 @@ static uint32_t get_elf_hwcap(void)
     GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3);
     GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS);
     GET_FEATURE(ARM_FEATURE_VFP4, ARM_HWCAP_ARM_VFPv4);
-    GET_FEATURE(ARM_FEATURE_ARM_DIV, ARM_HWCAP_ARM_IDIVA);
-    GET_FEATURE(ARM_FEATURE_THUMB_DIV, ARM_HWCAP_ARM_IDIVT);
+    GET_FEATURE_ID(arm_div, ARM_HWCAP_ARM_IDIVA);
+    GET_FEATURE_ID(thumb_div, ARM_HWCAP_ARM_IDIVT);
     /* All QEMU's VFPv3 CPUs have 32 registers, see VFP_DREG in translate.c.
      * Note that the ARM_HWCAP_ARM_VFPv3D16 bit is always the inverse of
      * ARM_HWCAP_ARM_VFPD32 (and so always clear for QEMU); it is unrelated
@@ -457,15 +489,16 @@ static uint32_t get_elf_hwcap2(void)
     ARMCPU *cpu = ARM_CPU(thread_cpu);
     uint32_t hwcaps = 0;
 
-    GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP2_ARM_AES);
-    GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP2_ARM_PMULL);
-    GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP2_ARM_SHA1);
-    GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP2_ARM_SHA2);
-    GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP2_ARM_CRC32);
+    GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES);
+    GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL);
+    GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1);
+    GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2);
+    GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32);
     return hwcaps;
 }
 
 #undef GET_FEATURE
+#undef GET_FEATURE_ID
 
 #else
 /* 64 bit ARM definitions */
@@ -540,22 +573,26 @@ static uint32_t get_elf_hwcap(void)
     hwcaps |= ARM_HWCAP_A64_ASIMD;
 
     /* probe for the extra features */
-#define GET_FEATURE(feat, hwcap) \
-    do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
-    GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP_A64_AES);
-    GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP_A64_PMULL);
-    GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP_A64_SHA1);
-    GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP_A64_SHA2);
-    GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP_A64_CRC32);
-    GET_FEATURE(ARM_FEATURE_V8_SHA3, ARM_HWCAP_A64_SHA3);
-    GET_FEATURE(ARM_FEATURE_V8_SM3, ARM_HWCAP_A64_SM3);
-    GET_FEATURE(ARM_FEATURE_V8_SM4, ARM_HWCAP_A64_SM4);
-    GET_FEATURE(ARM_FEATURE_V8_SHA512, ARM_HWCAP_A64_SHA512);
-    GET_FEATURE(ARM_FEATURE_V8_FP16,
-                ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
-    GET_FEATURE(ARM_FEATURE_V8_RDM, ARM_HWCAP_A64_ASIMDRDM);
-    GET_FEATURE(ARM_FEATURE_V8_FCMA, ARM_HWCAP_A64_FCMA);
-#undef GET_FEATURE
+#define GET_FEATURE_ID(feat, hwcap) \
+    do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
+
+    GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES);
+    GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL);
+    GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1);
+    GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2);
+    GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512);
+    GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32);
+    GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3);
+    GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3);
+    GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4);
+    GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
+    GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS);
+    GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
+    GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
+    GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
+    GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE);
+
+#undef GET_FEATURE_ID
 
     return hwcaps;
 }
@@ -563,78 +600,6 @@ static uint32_t get_elf_hwcap(void)
 #endif /* not TARGET_AARCH64 */
 #endif /* TARGET_ARM */
 
-#ifdef TARGET_UNICORE32
-
-#define ELF_START_MMAP          0x80000000
-
-#define ELF_CLASS               ELFCLASS32
-#define ELF_DATA                ELFDATA2LSB
-#define ELF_ARCH                EM_UNICORE32
-
-static inline void init_thread(struct target_pt_regs *regs,
-        struct image_info *infop)
-{
-    abi_long stack = infop->start_stack;
-    memset(regs, 0, sizeof(*regs));
-    regs->UC32_REG_asr = 0x10;
-    regs->UC32_REG_pc = infop->entry & 0xfffffffe;
-    regs->UC32_REG_sp = infop->start_stack;
-    /* FIXME - what to for failure of get_user()? */
-    get_user_ual(regs->UC32_REG_02, stack + 8); /* envp */
-    get_user_ual(regs->UC32_REG_01, stack + 4); /* envp */
-    /* XXX: it seems that r0 is zeroed after ! */
-    regs->UC32_REG_00 = 0;
-}
-
-#define ELF_NREG    34
-typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
-
-static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUUniCore32State *env)
-{
-    (*regs)[0] = env->regs[0];
-    (*regs)[1] = env->regs[1];
-    (*regs)[2] = env->regs[2];
-    (*regs)[3] = env->regs[3];
-    (*regs)[4] = env->regs[4];
-    (*regs)[5] = env->regs[5];
-    (*regs)[6] = env->regs[6];
-    (*regs)[7] = env->regs[7];
-    (*regs)[8] = env->regs[8];
-    (*regs)[9] = env->regs[9];
-    (*regs)[10] = env->regs[10];
-    (*regs)[11] = env->regs[11];
-    (*regs)[12] = env->regs[12];
-    (*regs)[13] = env->regs[13];
-    (*regs)[14] = env->regs[14];
-    (*regs)[15] = env->regs[15];
-    (*regs)[16] = env->regs[16];
-    (*regs)[17] = env->regs[17];
-    (*regs)[18] = env->regs[18];
-    (*regs)[19] = env->regs[19];
-    (*regs)[20] = env->regs[20];
-    (*regs)[21] = env->regs[21];
-    (*regs)[22] = env->regs[22];
-    (*regs)[23] = env->regs[23];
-    (*regs)[24] = env->regs[24];
-    (*regs)[25] = env->regs[25];
-    (*regs)[26] = env->regs[26];
-    (*regs)[27] = env->regs[27];
-    (*regs)[28] = env->regs[28];
-    (*regs)[29] = env->regs[29];
-    (*regs)[30] = env->regs[30];
-    (*regs)[31] = env->regs[31];
-
-    (*regs)[32] = cpu_asr_read((CPUUniCore32State *)env);
-    (*regs)[33] = env->regs[0]; /* XXX */
-}
-
-#define USE_ELF_CORE_DUMP
-#define ELF_EXEC_PAGESIZE               4096
-
-#define ELF_HWCAP                       (UC32_HWCAP_CMOV | UC32_HWCAP_UCF64)
-
-#endif
-
 #ifdef TARGET_SPARC
 #ifdef TARGET_SPARC64
 
@@ -751,6 +716,7 @@ enum {
     QEMU_PPC_FEATURE2_HAS_EBB = 0x10000000, /* Event Base Branching */
     QEMU_PPC_FEATURE2_HAS_ISEL = 0x08000000, /* Integer Select */
     QEMU_PPC_FEATURE2_HAS_TAR = 0x04000000, /* Target Address Register */
+    QEMU_PPC_FEATURE2_ARCH_3_00 = 0x00800000, /* ISA 3.00 */
 };
 
 #define ELF_HWCAP get_elf_hwcap()
@@ -805,6 +771,7 @@ static uint32_t get_elf_hwcap2(void)
     GET_FEATURE2(PPC2_BCTAR_ISA207, QEMU_PPC_FEATURE2_HAS_TAR);
     GET_FEATURE2((PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
                   PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07);
+    GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00);
 
 #undef GET_FEATURE
 #undef GET_FEATURE2
@@ -894,6 +861,8 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *en
 #endif
 #define ELF_ARCH    EM_MIPS
 
+#define elf_check_arch(x) ((x) == EM_MIPS || (x) == EM_NANOMIPS)
+
 static inline void init_thread(struct target_pt_regs *regs,
                                struct image_info *infop)
 {
@@ -950,6 +919,30 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMIPSState *e
 #define USE_ELF_CORE_DUMP
 #define ELF_EXEC_PAGESIZE        4096
 
+/* See arch/mips/include/uapi/asm/hwcap.h.  */
+enum {
+    HWCAP_MIPS_R6           = (1 << 0),
+    HWCAP_MIPS_MSA          = (1 << 1),
+};
+
+#define ELF_HWCAP get_elf_hwcap()
+
+static uint32_t get_elf_hwcap(void)
+{
+    MIPSCPU *cpu = MIPS_CPU(thread_cpu);
+    uint32_t hwcaps = 0;
+
+#define GET_FEATURE(flag, hwcap) \
+    do { if (cpu->env.insn_flags & (flag)) { hwcaps |= hwcap; } } while (0)
+
+    GET_FEATURE(ISA_MIPS32R6 | ISA_MIPS64R6, HWCAP_MIPS_R6);
+    GET_FEATURE(ASE_MSA, HWCAP_MIPS_MSA);
+
+#undef GET_FEATURE
+
+    return hwcaps;
+}
+
 #endif /* TARGET_MIPS */
 
 #ifdef TARGET_MICROBLAZE
@@ -1341,6 +1334,64 @@ static inline void init_thread(struct target_pt_regs *regs,
 
 #endif /* TARGET_HPPA */
 
+#ifdef TARGET_XTENSA
+
+#define ELF_START_MMAP 0x20000000
+
+#define ELF_CLASS       ELFCLASS32
+#define ELF_ARCH        EM_XTENSA
+
+static inline void init_thread(struct target_pt_regs *regs,
+                               struct image_info *infop)
+{
+    regs->windowbase = 0;
+    regs->windowstart = 1;
+    regs->areg[1] = infop->start_stack;
+    regs->pc = infop->entry;
+}
+
+/* See linux kernel: arch/xtensa/include/asm/elf.h.  */
+#define ELF_NREG 128
+typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
+
+enum {
+    TARGET_REG_PC,
+    TARGET_REG_PS,
+    TARGET_REG_LBEG,
+    TARGET_REG_LEND,
+    TARGET_REG_LCOUNT,
+    TARGET_REG_SAR,
+    TARGET_REG_WINDOWSTART,
+    TARGET_REG_WINDOWBASE,
+    TARGET_REG_THREADPTR,
+    TARGET_REG_AR0 = 64,
+};
+
+static void elf_core_copy_regs(target_elf_gregset_t *regs,
+                               const CPUXtensaState *env)
+{
+    unsigned i;
+
+    (*regs)[TARGET_REG_PC] = tswapreg(env->pc);
+    (*regs)[TARGET_REG_PS] = tswapreg(env->sregs[PS] & ~PS_EXCM);
+    (*regs)[TARGET_REG_LBEG] = tswapreg(env->sregs[LBEG]);
+    (*regs)[TARGET_REG_LEND] = tswapreg(env->sregs[LEND]);
+    (*regs)[TARGET_REG_LCOUNT] = tswapreg(env->sregs[LCOUNT]);
+    (*regs)[TARGET_REG_SAR] = tswapreg(env->sregs[SAR]);
+    (*regs)[TARGET_REG_WINDOWSTART] = tswapreg(env->sregs[WINDOW_START]);
+    (*regs)[TARGET_REG_WINDOWBASE] = tswapreg(env->sregs[WINDOW_BASE]);
+    (*regs)[TARGET_REG_THREADPTR] = tswapreg(env->uregs[THREADPTR]);
+    xtensa_sync_phys_from_window((CPUXtensaState *)env);
+    for (i = 0; i < env->config->nareg; ++i) {
+        (*regs)[TARGET_REG_AR0 + i] = tswapreg(env->phys_regs[i]);
+    }
+}
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE       4096
+
+#endif /* TARGET_XTENSA */
+
 #ifndef ELF_PLATFORM
 #define ELF_PLATFORM (NULL)
 #endif
@@ -1394,7 +1445,10 @@ struct exec
 #define QMAGIC 0314
 
 /* Necessary parameters */
-#define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
+#define TARGET_ELF_EXEC_PAGESIZE \
+        (((eppnt->p_align & ~qemu_host_page_mask) != 0) ? \
+         TARGET_PAGE_SIZE : MAX(qemu_host_page_size, TARGET_PAGE_SIZE))
+#define TARGET_ELF_PAGELENGTH(_v) ROUND_UP((_v), TARGET_ELF_EXEC_PAGESIZE)
 #define TARGET_ELF_PAGESTART(_v) ((_v) & \
                                  ~(abi_ulong)(TARGET_ELF_EXEC_PAGESIZE-1))
 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
@@ -1667,7 +1721,19 @@ static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
     }
 }
 
-#ifdef CONFIG_USE_FDPIC
+#ifdef TARGET_ARM
+static int elf_is_fdpic(struct elfhdr *exec)
+{
+    return exec->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC;
+}
+#else
+/* Default implementation, always false.  */
+static int elf_is_fdpic(struct elfhdr *exec)
+{
+    return 0;
+}
+#endif
+
 static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp)
 {
     uint16_t n;
@@ -1692,7 +1758,6 @@ static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong s
 
     return sp;
 }
-#endif
 
 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
                                    struct elfhdr *exec,
@@ -1711,7 +1776,6 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
 
     sp = p;
 
-#ifdef CONFIG_USE_FDPIC
     /* Needs to be before we load the env/argc/... */
     if (elf_is_fdpic(exec)) {
         /* Need 4 byte alignment for these structs */
@@ -1721,9 +1785,13 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
         if (interp_info) {
             interp_info->other_info = info;
             sp = loader_build_fdpic_loadmap(interp_info, sp);
+            info->interpreter_loadmap_addr = interp_info->loadmap_addr;
+            info->interpreter_pt_dynamic_addr = interp_info->pt_dynamic_addr;
+        } else {
+            info->interpreter_loadmap_addr = 0;
+            info->interpreter_pt_dynamic_addr = 0;
         }
     }
-#endif
 
     u_platform = 0;
     k_platform = ELF_PLATFORM;
@@ -1820,7 +1888,13 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
     NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff));
     NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
     NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
-    NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(MAX(TARGET_PAGE_SIZE, getpagesize())));
+    if ((info->alignment & ~qemu_host_page_mask) != 0) {
+        /* Target doesn't support host page size alignment */
+        NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
+    } else {
+        NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(MAX(TARGET_PAGE_SIZE,
+                                               qemu_host_page_size)));
+    }
     NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0));
     NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
     NEW_AUX_ENT(AT_ENTRY, info->entry);
@@ -1869,21 +1943,12 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
     return sp;
 }
 
-#ifndef TARGET_HAS_VALIDATE_GUEST_SPACE
-/* If the guest doesn't have a validation function just agree */
-static int validate_guest_space(unsigned long guest_base,
-                                unsigned long guest_size)
-{
-    return 1;
-}
-#endif
-
 unsigned long init_guest_space(unsigned long host_start,
                                unsigned long host_size,
                                unsigned long guest_start,
                                bool fixed)
 {
-    unsigned long current_start, real_start;
+    unsigned long current_start, aligned_start;
     int flags;
 
     assert(host_start || host_size);
@@ -1891,11 +1956,12 @@ unsigned long init_guest_space(unsigned long host_start,
     /* If just a starting address is given, then just verify that
      * address.  */
     if (host_start && !host_size) {
-        if (validate_guest_space(host_start, host_size) == 1) {
-            return host_start;
-        } else {
+#if defined(TARGET_ARM) && !defined(TARGET_AARCH64)
+        if (init_guest_commpage(host_start, host_size) != 1) {
             return (unsigned long)-1;
         }
+#endif
+        return host_start;
     }
 
     /* Setup the initial flags and start address.  */
@@ -1907,8 +1973,58 @@ unsigned long init_guest_space(unsigned long host_start,
 
     /* Otherwise, a non-zero size region of memory needs to be mapped
      * and validated.  */
+
+#if defined(TARGET_ARM) && !defined(TARGET_AARCH64)
+    /* On 32-bit ARM, we need to map not just the usable memory, but
+     * also the commpage.  Try to find a suitable place by allocating
+     * a big chunk for all of it.  If host_start, then the naive
+     * strategy probably does good enough.
+     */
+    if (!host_start) {
+        unsigned long guest_full_size, host_full_size, real_start;
+
+        guest_full_size =
+            (0xffff0f00 & qemu_host_page_mask) + qemu_host_page_size;
+        host_full_size = guest_full_size - guest_start;
+        real_start = (unsigned long)
+            mmap(NULL, host_full_size, PROT_NONE, flags, -1, 0);
+        if (real_start == (unsigned long)-1) {
+            if (host_size < host_full_size - qemu_host_page_size) {
+                /* We failed to map a continous segment, but we're
+                 * allowed to have a gap between the usable memory and
+                 * the commpage where other things can be mapped.
+                 * This sparseness gives us more flexibility to find
+                 * an address range.
+                 */
+                goto naive;
+            }
+            return (unsigned long)-1;
+        }
+        munmap((void *)real_start, host_full_size);
+        if (real_start & ~qemu_host_page_mask) {
+            /* The same thing again, but with an extra qemu_host_page_size
+             * so that we can shift around alignment.
+             */
+            unsigned long real_size = host_full_size + qemu_host_page_size;
+            real_start = (unsigned long)
+                mmap(NULL, real_size, PROT_NONE, flags, -1, 0);
+            if (real_start == (unsigned long)-1) {
+                if (host_size < host_full_size - qemu_host_page_size) {
+                    goto naive;
+                }
+                return (unsigned long)-1;
+            }
+            munmap((void *)real_start, real_size);
+            real_start = HOST_PAGE_ALIGN(real_start);
+        }
+        current_start = real_start;
+    }
+ naive:
+#endif
+
     while (1) {
-        unsigned long real_size = host_size;
+        unsigned long real_start, real_size, aligned_size;
+        aligned_size = real_size = host_size;
 
         /* Do not use mmap_find_vma here because that is limited to the
          * guest address space.  We are going to make the
@@ -1920,30 +2036,63 @@ unsigned long init_guest_space(unsigned long host_start,
             return (unsigned long)-1;
         }
 
+        /* Check to see if the address is valid.  */
+        if (host_start && real_start != current_start) {
+            goto try_again;
+        }
+
         /* Ensure the address is properly aligned.  */
         if (real_start & ~qemu_host_page_mask) {
+            /* Ideally, we adjust like
+             *
+             *    pages: [  ][  ][  ][  ][  ]
+             *      old:   [   real   ]
+             *             [ aligned  ]
+             *      new:   [     real     ]
+             *               [ aligned  ]
+             *
+             * But if there is something else mapped right after it,
+             * then obviously it won't have room to grow, and the
+             * kernel will put the new larger real someplace else with
+             * unknown alignment (if we made it to here, then
+             * fixed=false).  Which is why we grow real by a full page
+             * size, instead of by part of one; so that even if we get
+             * moved, we can still guarantee alignment.  But this does
+             * mean that there is a padding of < 1 page both before
+             * and after the aligned range; the "after" could could
+             * cause problems for ARM emulation where it could butt in
+             * to where we need to put the commpage.
+             */
             munmap((void *)real_start, host_size);
-            real_size = host_size + qemu_host_page_size;
+            real_size = aligned_size + qemu_host_page_size;
             real_start = (unsigned long)
                 mmap((void *)real_start, real_size, PROT_NONE, flags, -1, 0);
             if (real_start == (unsigned long)-1) {
                 return (unsigned long)-1;
             }
-            real_start = HOST_PAGE_ALIGN(real_start);
+            aligned_start = HOST_PAGE_ALIGN(real_start);
+        } else {
+            aligned_start = real_start;
         }
 
-        /* Check to see if the address is valid.  */
-        if (!host_start || real_start == current_start) {
-            int valid = validate_guest_space(real_start - guest_start,
-                                             real_size);
-            if (valid == 1) {
-                break;
-            } else if (valid == -1) {
-                return (unsigned long)-1;
-            }
-            /* valid == 0, so try again. */
+#if defined(TARGET_ARM) && !defined(TARGET_AARCH64)
+        /* On 32-bit ARM, we need to also be able to map the commpage.  */
+        int valid = init_guest_commpage(aligned_start - guest_start,
+                                        aligned_size + guest_start);
+        if (valid == -1) {
+            munmap((void *)real_start, real_size);
+            return (unsigned long)-1;
+        } else if (valid == 0) {
+            goto try_again;
         }
+#endif
+
+        /* If nothing has said `return -1` or `goto try_again` yet,
+         * then the address we have is good.
+         */
+        break;
 
+    try_again:
         /* That address didn't work.  Unmap and try a different one.
          * The address the host picked because is typically right at
          * the top of the host address space and leaves the guest with
@@ -1952,8 +2101,12 @@ unsigned long init_guest_space(unsigned long host_start,
          * happen often.  Probably means we got unlucky and host
          * address space randomization put a shared library somewhere
          * inconvenient.
+         *
+         * This is probably a good strategy if host_start, but is
+         * probably a bad strategy if not, which means we got here
+         * because of trouble with ARM commpage setup.
          */
-        munmap((void *)real_start, host_size);
+        munmap((void *)real_start, real_size);
         current_start += qemu_host_page_size;
         if (host_start == current_start) {
             /* Theoretically possible if host doesn't have any suitably
@@ -1965,7 +2118,7 @@ unsigned long init_guest_space(unsigned long host_start,
 
     qemu_log_mask(CPU_LOG_PAGE, "Reserved 0x%lx bytes of guest address space\n", host_size);
 
-    return real_start;
+    return aligned_start;
 }
 
 static void probe_guest_base(const char *image_name,
@@ -2060,16 +2213,15 @@ static void load_elf_image(const char *image_name, int image_fd,
     }
     bswap_phdr(phdr, ehdr->e_phnum);
 
-#ifdef CONFIG_USE_FDPIC
     info->nsegs = 0;
     info->pt_dynamic_addr = 0;
-#endif
 
     mmap_lock();
 
     /* Find the maximum size of the image and allocate an appropriate
        amount of memory to handle that.  */
     loaddr = -1, hiaddr = 0;
+    info->alignment = 0;
     for (i = 0; i < ehdr->e_phnum; ++i) {
         if (phdr[i].p_type == PT_LOAD) {
             abi_ulong a = phdr[i].p_vaddr - phdr[i].p_offset;
@@ -2080,9 +2232,8 @@ static void load_elf_image(const char *image_name, int image_fd,
             if (a > hiaddr) {
                 hiaddr = a;
             }
-#ifdef CONFIG_USE_FDPIC
             ++info->nsegs;
-#endif
+            info->alignment |= phdr[i].p_align;
         }
     }
 
@@ -2107,8 +2258,7 @@ static void load_elf_image(const char *image_name, int image_fd,
     }
     load_bias = load_addr - loaddr;
 
-#ifdef CONFIG_USE_FDPIC
-    {
+    if (elf_is_fdpic(ehdr)) {
         struct elf32_fdpic_loadseg *loadsegs = info->loadsegs =
             g_malloc(sizeof(*loadsegs) * info->nsegs);
 
@@ -2126,7 +2276,6 @@ static void load_elf_image(const char *image_name, int image_fd,
             }
         }
     }
-#endif
 
     info->load_bias = load_bias;
     info->load_addr = load_addr;
@@ -2141,7 +2290,7 @@ static void load_elf_image(const char *image_name, int image_fd,
     for (i = 0; i < ehdr->e_phnum; i++) {
         struct elf_phdr *eppnt = phdr + i;
         if (eppnt->p_type == PT_LOAD) {
-            abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em;
+            abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em, vaddr_len;
             int elf_prot = 0;
 
             if (eppnt->p_flags & PF_R) elf_prot =  PROT_READ;
@@ -2151,8 +2300,9 @@ static void load_elf_image(const char *image_name, int image_fd,
             vaddr = load_bias + eppnt->p_vaddr;
             vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr);
             vaddr_ps = TARGET_ELF_PAGESTART(vaddr);
+            vaddr_len = TARGET_ELF_PAGELENGTH(eppnt->p_filesz + vaddr_po);
 
-            error = target_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po,
+            error = target_mmap(vaddr_ps, vaddr_len,
                                 elf_prot, MAP_PRIVATE | MAP_FIXED,
                                 image_fd, eppnt->p_offset - vaddr_po);
             if (error == -1) {
This page took 0.043972 seconds and 4 git commands to generate.