*/
#define personality(pers) (pers & PER_MASK)
+int info_is_fdpic(struct image_info *info)
+{
+ return info->personality == PER_LINUX_FDPIC;
+}
+
/* this flag is uneffective under linux too, should be deleted */
#ifndef MAP_DENYWRITE
#define MAP_DENYWRITE 0
/* For uClinux PIC binaries. */
/* XXX: Linux does this only on ARM with no MMU (do we care ?) */
regs->uregs[10] = infop->start_data;
+
+ /* Support ARM FDPIC. */
+ if (info_is_fdpic(infop)) {
+ /* As described in the ABI document, r7 points to the loadmap info
+ * prepared by the kernel. If an interpreter is needed, r8 points
+ * to the interpreter loadmap and r9 points to the interpreter
+ * PT_DYNAMIC info. If no interpreter is needed, r8 is zero, and
+ * r9 points to the main program PT_DYNAMIC info.
+ */
+ regs->uregs[7] = infop->loadmap_addr;
+ if (infop->interpreter_loadmap_addr) {
+ /* Executable is dynamically loaded. */
+ regs->uregs[8] = infop->interpreter_loadmap_addr;
+ regs->uregs[9] = infop->interpreter_pt_dynamic_addr;
+ } else {
+ regs->uregs[8] = 0;
+ regs->uregs[9] = infop->pt_dynamic_addr;
+ }
+ }
}
#define ELF_NREG 18
/* The commpage only exists for 32 bit kernels */
-#define TARGET_HAS_VALIDATE_GUEST_SPACE
/* Return 1 if the proposed guest space is suitable for the guest.
* Return 0 if the proposed guest space isn't suitable, but another
* address space should be tried.
* The guest code may leave a page mapped and populate it if the
* address is suitable.
*/
-static int validate_guest_space(unsigned long guest_base,
- unsigned long guest_size)
+static int init_guest_commpage(unsigned long guest_base,
+ unsigned long guest_size)
{
unsigned long real_start, test_page_addr;
/* If the commpage lies within the already allocated guest space,
* then there is no way we can allocate it.
+ *
+ * You may be thinking that that this check is redundant because
+ * we already validated the guest size against MAX_RESERVED_VA;
+ * but if qemu_host_page_mask is unusually large, then
+ * test_page_addr may be lower.
*/
if (test_page_addr >= guest_base
&& test_page_addr < (guest_base + guest_size)) {
/* probe for the extra features */
#define GET_FEATURE(feat, hwcap) \
do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
+
+#define GET_FEATURE_ID(feat, hwcap) \
+ do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
+
/* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */
GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP);
GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP);
GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3);
GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS);
GET_FEATURE(ARM_FEATURE_VFP4, ARM_HWCAP_ARM_VFPv4);
- GET_FEATURE(ARM_FEATURE_ARM_DIV, ARM_HWCAP_ARM_IDIVA);
- GET_FEATURE(ARM_FEATURE_THUMB_DIV, ARM_HWCAP_ARM_IDIVT);
+ GET_FEATURE_ID(arm_div, ARM_HWCAP_ARM_IDIVA);
+ GET_FEATURE_ID(thumb_div, ARM_HWCAP_ARM_IDIVT);
/* All QEMU's VFPv3 CPUs have 32 registers, see VFP_DREG in translate.c.
* Note that the ARM_HWCAP_ARM_VFPv3D16 bit is always the inverse of
* ARM_HWCAP_ARM_VFPD32 (and so always clear for QEMU); it is unrelated
ARMCPU *cpu = ARM_CPU(thread_cpu);
uint32_t hwcaps = 0;
- GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP2_ARM_AES);
- GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP2_ARM_PMULL);
- GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP2_ARM_SHA1);
- GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP2_ARM_SHA2);
- GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP2_ARM_CRC32);
+ GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES);
+ GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL);
+ GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1);
+ GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2);
+ GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32);
return hwcaps;
}
#undef GET_FEATURE
+#undef GET_FEATURE_ID
#else
/* 64 bit ARM definitions */
hwcaps |= ARM_HWCAP_A64_ASIMD;
/* probe for the extra features */
-#define GET_FEATURE(feat, hwcap) \
- do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
- GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP_A64_AES);
- GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP_A64_PMULL);
- GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP_A64_SHA1);
- GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP_A64_SHA2);
- GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP_A64_CRC32);
- GET_FEATURE(ARM_FEATURE_V8_SHA3, ARM_HWCAP_A64_SHA3);
- GET_FEATURE(ARM_FEATURE_V8_SM3, ARM_HWCAP_A64_SM3);
- GET_FEATURE(ARM_FEATURE_V8_SM4, ARM_HWCAP_A64_SM4);
- GET_FEATURE(ARM_FEATURE_V8_SHA512, ARM_HWCAP_A64_SHA512);
- GET_FEATURE(ARM_FEATURE_V8_FP16,
- ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
- GET_FEATURE(ARM_FEATURE_V8_RDM, ARM_HWCAP_A64_ASIMDRDM);
- GET_FEATURE(ARM_FEATURE_V8_FCMA, ARM_HWCAP_A64_FCMA);
-#undef GET_FEATURE
+#define GET_FEATURE_ID(feat, hwcap) \
+ do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
+
+ GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES);
+ GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL);
+ GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1);
+ GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2);
+ GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512);
+ GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32);
+ GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3);
+ GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3);
+ GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4);
+ GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
+ GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS);
+ GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
+ GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
+ GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
+ GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE);
+
+#undef GET_FEATURE_ID
return hwcaps;
}
#endif /* not TARGET_AARCH64 */
#endif /* TARGET_ARM */
-#ifdef TARGET_UNICORE32
-
-#define ELF_START_MMAP 0x80000000
-
-#define ELF_CLASS ELFCLASS32
-#define ELF_DATA ELFDATA2LSB
-#define ELF_ARCH EM_UNICORE32
-
-static inline void init_thread(struct target_pt_regs *regs,
- struct image_info *infop)
-{
- abi_long stack = infop->start_stack;
- memset(regs, 0, sizeof(*regs));
- regs->UC32_REG_asr = 0x10;
- regs->UC32_REG_pc = infop->entry & 0xfffffffe;
- regs->UC32_REG_sp = infop->start_stack;
- /* FIXME - what to for failure of get_user()? */
- get_user_ual(regs->UC32_REG_02, stack + 8); /* envp */
- get_user_ual(regs->UC32_REG_01, stack + 4); /* envp */
- /* XXX: it seems that r0 is zeroed after ! */
- regs->UC32_REG_00 = 0;
-}
-
-#define ELF_NREG 34
-typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
-
-static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUUniCore32State *env)
-{
- (*regs)[0] = env->regs[0];
- (*regs)[1] = env->regs[1];
- (*regs)[2] = env->regs[2];
- (*regs)[3] = env->regs[3];
- (*regs)[4] = env->regs[4];
- (*regs)[5] = env->regs[5];
- (*regs)[6] = env->regs[6];
- (*regs)[7] = env->regs[7];
- (*regs)[8] = env->regs[8];
- (*regs)[9] = env->regs[9];
- (*regs)[10] = env->regs[10];
- (*regs)[11] = env->regs[11];
- (*regs)[12] = env->regs[12];
- (*regs)[13] = env->regs[13];
- (*regs)[14] = env->regs[14];
- (*regs)[15] = env->regs[15];
- (*regs)[16] = env->regs[16];
- (*regs)[17] = env->regs[17];
- (*regs)[18] = env->regs[18];
- (*regs)[19] = env->regs[19];
- (*regs)[20] = env->regs[20];
- (*regs)[21] = env->regs[21];
- (*regs)[22] = env->regs[22];
- (*regs)[23] = env->regs[23];
- (*regs)[24] = env->regs[24];
- (*regs)[25] = env->regs[25];
- (*regs)[26] = env->regs[26];
- (*regs)[27] = env->regs[27];
- (*regs)[28] = env->regs[28];
- (*regs)[29] = env->regs[29];
- (*regs)[30] = env->regs[30];
- (*regs)[31] = env->regs[31];
-
- (*regs)[32] = cpu_asr_read((CPUUniCore32State *)env);
- (*regs)[33] = env->regs[0]; /* XXX */
-}
-
-#define USE_ELF_CORE_DUMP
-#define ELF_EXEC_PAGESIZE 4096
-
-#define ELF_HWCAP (UC32_HWCAP_CMOV | UC32_HWCAP_UCF64)
-
-#endif
-
#ifdef TARGET_SPARC
#ifdef TARGET_SPARC64
QEMU_PPC_FEATURE2_HAS_EBB = 0x10000000, /* Event Base Branching */
QEMU_PPC_FEATURE2_HAS_ISEL = 0x08000000, /* Integer Select */
QEMU_PPC_FEATURE2_HAS_TAR = 0x04000000, /* Target Address Register */
+ QEMU_PPC_FEATURE2_ARCH_3_00 = 0x00800000, /* ISA 3.00 */
};
#define ELF_HWCAP get_elf_hwcap()
GET_FEATURE2(PPC2_BCTAR_ISA207, QEMU_PPC_FEATURE2_HAS_TAR);
GET_FEATURE2((PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07);
+ GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00);
#undef GET_FEATURE
#undef GET_FEATURE2
#endif
#define ELF_ARCH EM_MIPS
+#define elf_check_arch(x) ((x) == EM_MIPS || (x) == EM_NANOMIPS)
+
static inline void init_thread(struct target_pt_regs *regs,
struct image_info *infop)
{
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
+/* See arch/mips/include/uapi/asm/hwcap.h. */
+enum {
+ HWCAP_MIPS_R6 = (1 << 0),
+ HWCAP_MIPS_MSA = (1 << 1),
+};
+
+#define ELF_HWCAP get_elf_hwcap()
+
+static uint32_t get_elf_hwcap(void)
+{
+ MIPSCPU *cpu = MIPS_CPU(thread_cpu);
+ uint32_t hwcaps = 0;
+
+#define GET_FEATURE(flag, hwcap) \
+ do { if (cpu->env.insn_flags & (flag)) { hwcaps |= hwcap; } } while (0)
+
+ GET_FEATURE(ISA_MIPS32R6 | ISA_MIPS64R6, HWCAP_MIPS_R6);
+ GET_FEATURE(ASE_MSA, HWCAP_MIPS_MSA);
+
+#undef GET_FEATURE
+
+ return hwcaps;
+}
+
#endif /* TARGET_MIPS */
#ifdef TARGET_MICROBLAZE
#endif /* TARGET_HPPA */
+#ifdef TARGET_XTENSA
+
+#define ELF_START_MMAP 0x20000000
+
+#define ELF_CLASS ELFCLASS32
+#define ELF_ARCH EM_XTENSA
+
+static inline void init_thread(struct target_pt_regs *regs,
+ struct image_info *infop)
+{
+ regs->windowbase = 0;
+ regs->windowstart = 1;
+ regs->areg[1] = infop->start_stack;
+ regs->pc = infop->entry;
+}
+
+/* See linux kernel: arch/xtensa/include/asm/elf.h. */
+#define ELF_NREG 128
+typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
+
+enum {
+ TARGET_REG_PC,
+ TARGET_REG_PS,
+ TARGET_REG_LBEG,
+ TARGET_REG_LEND,
+ TARGET_REG_LCOUNT,
+ TARGET_REG_SAR,
+ TARGET_REG_WINDOWSTART,
+ TARGET_REG_WINDOWBASE,
+ TARGET_REG_THREADPTR,
+ TARGET_REG_AR0 = 64,
+};
+
+static void elf_core_copy_regs(target_elf_gregset_t *regs,
+ const CPUXtensaState *env)
+{
+ unsigned i;
+
+ (*regs)[TARGET_REG_PC] = tswapreg(env->pc);
+ (*regs)[TARGET_REG_PS] = tswapreg(env->sregs[PS] & ~PS_EXCM);
+ (*regs)[TARGET_REG_LBEG] = tswapreg(env->sregs[LBEG]);
+ (*regs)[TARGET_REG_LEND] = tswapreg(env->sregs[LEND]);
+ (*regs)[TARGET_REG_LCOUNT] = tswapreg(env->sregs[LCOUNT]);
+ (*regs)[TARGET_REG_SAR] = tswapreg(env->sregs[SAR]);
+ (*regs)[TARGET_REG_WINDOWSTART] = tswapreg(env->sregs[WINDOW_START]);
+ (*regs)[TARGET_REG_WINDOWBASE] = tswapreg(env->sregs[WINDOW_BASE]);
+ (*regs)[TARGET_REG_THREADPTR] = tswapreg(env->uregs[THREADPTR]);
+ xtensa_sync_phys_from_window((CPUXtensaState *)env);
+ for (i = 0; i < env->config->nareg; ++i) {
+ (*regs)[TARGET_REG_AR0 + i] = tswapreg(env->phys_regs[i]);
+ }
+}
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE 4096
+
+#endif /* TARGET_XTENSA */
+
#ifndef ELF_PLATFORM
#define ELF_PLATFORM (NULL)
#endif
#define QMAGIC 0314
/* Necessary parameters */
-#define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
+#define TARGET_ELF_EXEC_PAGESIZE \
+ (((eppnt->p_align & ~qemu_host_page_mask) != 0) ? \
+ TARGET_PAGE_SIZE : MAX(qemu_host_page_size, TARGET_PAGE_SIZE))
+#define TARGET_ELF_PAGELENGTH(_v) ROUND_UP((_v), TARGET_ELF_EXEC_PAGESIZE)
#define TARGET_ELF_PAGESTART(_v) ((_v) & \
~(abi_ulong)(TARGET_ELF_EXEC_PAGESIZE-1))
#define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
}
}
-#ifdef CONFIG_USE_FDPIC
+#ifdef TARGET_ARM
+static int elf_is_fdpic(struct elfhdr *exec)
+{
+ return exec->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC;
+}
+#else
+/* Default implementation, always false. */
+static int elf_is_fdpic(struct elfhdr *exec)
+{
+ return 0;
+}
+#endif
+
static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp)
{
uint16_t n;
return sp;
}
-#endif
static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
struct elfhdr *exec,
sp = p;
-#ifdef CONFIG_USE_FDPIC
/* Needs to be before we load the env/argc/... */
if (elf_is_fdpic(exec)) {
/* Need 4 byte alignment for these structs */
if (interp_info) {
interp_info->other_info = info;
sp = loader_build_fdpic_loadmap(interp_info, sp);
+ info->interpreter_loadmap_addr = interp_info->loadmap_addr;
+ info->interpreter_pt_dynamic_addr = interp_info->pt_dynamic_addr;
+ } else {
+ info->interpreter_loadmap_addr = 0;
+ info->interpreter_pt_dynamic_addr = 0;
}
}
-#endif
u_platform = 0;
k_platform = ELF_PLATFORM;
NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff));
NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
- NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(MAX(TARGET_PAGE_SIZE, getpagesize())));
+ if ((info->alignment & ~qemu_host_page_mask) != 0) {
+ /* Target doesn't support host page size alignment */
+ NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
+ } else {
+ NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(MAX(TARGET_PAGE_SIZE,
+ qemu_host_page_size)));
+ }
NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0));
NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
NEW_AUX_ENT(AT_ENTRY, info->entry);
return sp;
}
-#ifndef TARGET_HAS_VALIDATE_GUEST_SPACE
-/* If the guest doesn't have a validation function just agree */
-static int validate_guest_space(unsigned long guest_base,
- unsigned long guest_size)
-{
- return 1;
-}
-#endif
-
unsigned long init_guest_space(unsigned long host_start,
unsigned long host_size,
unsigned long guest_start,
bool fixed)
{
- unsigned long current_start, real_start;
+ unsigned long current_start, aligned_start;
int flags;
assert(host_start || host_size);
/* If just a starting address is given, then just verify that
* address. */
if (host_start && !host_size) {
- if (validate_guest_space(host_start, host_size) == 1) {
- return host_start;
- } else {
+#if defined(TARGET_ARM) && !defined(TARGET_AARCH64)
+ if (init_guest_commpage(host_start, host_size) != 1) {
return (unsigned long)-1;
}
+#endif
+ return host_start;
}
/* Setup the initial flags and start address. */
/* Otherwise, a non-zero size region of memory needs to be mapped
* and validated. */
+
+#if defined(TARGET_ARM) && !defined(TARGET_AARCH64)
+ /* On 32-bit ARM, we need to map not just the usable memory, but
+ * also the commpage. Try to find a suitable place by allocating
+ * a big chunk for all of it. If host_start, then the naive
+ * strategy probably does good enough.
+ */
+ if (!host_start) {
+ unsigned long guest_full_size, host_full_size, real_start;
+
+ guest_full_size =
+ (0xffff0f00 & qemu_host_page_mask) + qemu_host_page_size;
+ host_full_size = guest_full_size - guest_start;
+ real_start = (unsigned long)
+ mmap(NULL, host_full_size, PROT_NONE, flags, -1, 0);
+ if (real_start == (unsigned long)-1) {
+ if (host_size < host_full_size - qemu_host_page_size) {
+ /* We failed to map a continous segment, but we're
+ * allowed to have a gap between the usable memory and
+ * the commpage where other things can be mapped.
+ * This sparseness gives us more flexibility to find
+ * an address range.
+ */
+ goto naive;
+ }
+ return (unsigned long)-1;
+ }
+ munmap((void *)real_start, host_full_size);
+ if (real_start & ~qemu_host_page_mask) {
+ /* The same thing again, but with an extra qemu_host_page_size
+ * so that we can shift around alignment.
+ */
+ unsigned long real_size = host_full_size + qemu_host_page_size;
+ real_start = (unsigned long)
+ mmap(NULL, real_size, PROT_NONE, flags, -1, 0);
+ if (real_start == (unsigned long)-1) {
+ if (host_size < host_full_size - qemu_host_page_size) {
+ goto naive;
+ }
+ return (unsigned long)-1;
+ }
+ munmap((void *)real_start, real_size);
+ real_start = HOST_PAGE_ALIGN(real_start);
+ }
+ current_start = real_start;
+ }
+ naive:
+#endif
+
while (1) {
- unsigned long real_size = host_size;
+ unsigned long real_start, real_size, aligned_size;
+ aligned_size = real_size = host_size;
/* Do not use mmap_find_vma here because that is limited to the
* guest address space. We are going to make the
return (unsigned long)-1;
}
+ /* Check to see if the address is valid. */
+ if (host_start && real_start != current_start) {
+ goto try_again;
+ }
+
/* Ensure the address is properly aligned. */
if (real_start & ~qemu_host_page_mask) {
+ /* Ideally, we adjust like
+ *
+ * pages: [ ][ ][ ][ ][ ]
+ * old: [ real ]
+ * [ aligned ]
+ * new: [ real ]
+ * [ aligned ]
+ *
+ * But if there is something else mapped right after it,
+ * then obviously it won't have room to grow, and the
+ * kernel will put the new larger real someplace else with
+ * unknown alignment (if we made it to here, then
+ * fixed=false). Which is why we grow real by a full page
+ * size, instead of by part of one; so that even if we get
+ * moved, we can still guarantee alignment. But this does
+ * mean that there is a padding of < 1 page both before
+ * and after the aligned range; the "after" could could
+ * cause problems for ARM emulation where it could butt in
+ * to where we need to put the commpage.
+ */
munmap((void *)real_start, host_size);
- real_size = host_size + qemu_host_page_size;
+ real_size = aligned_size + qemu_host_page_size;
real_start = (unsigned long)
mmap((void *)real_start, real_size, PROT_NONE, flags, -1, 0);
if (real_start == (unsigned long)-1) {
return (unsigned long)-1;
}
- real_start = HOST_PAGE_ALIGN(real_start);
+ aligned_start = HOST_PAGE_ALIGN(real_start);
+ } else {
+ aligned_start = real_start;
}
- /* Check to see if the address is valid. */
- if (!host_start || real_start == current_start) {
- int valid = validate_guest_space(real_start - guest_start,
- real_size);
- if (valid == 1) {
- break;
- } else if (valid == -1) {
- return (unsigned long)-1;
- }
- /* valid == 0, so try again. */
+#if defined(TARGET_ARM) && !defined(TARGET_AARCH64)
+ /* On 32-bit ARM, we need to also be able to map the commpage. */
+ int valid = init_guest_commpage(aligned_start - guest_start,
+ aligned_size + guest_start);
+ if (valid == -1) {
+ munmap((void *)real_start, real_size);
+ return (unsigned long)-1;
+ } else if (valid == 0) {
+ goto try_again;
}
+#endif
+
+ /* If nothing has said `return -1` or `goto try_again` yet,
+ * then the address we have is good.
+ */
+ break;
+ try_again:
/* That address didn't work. Unmap and try a different one.
* The address the host picked because is typically right at
* the top of the host address space and leaves the guest with
* happen often. Probably means we got unlucky and host
* address space randomization put a shared library somewhere
* inconvenient.
+ *
+ * This is probably a good strategy if host_start, but is
+ * probably a bad strategy if not, which means we got here
+ * because of trouble with ARM commpage setup.
*/
- munmap((void *)real_start, host_size);
+ munmap((void *)real_start, real_size);
current_start += qemu_host_page_size;
if (host_start == current_start) {
/* Theoretically possible if host doesn't have any suitably
qemu_log_mask(CPU_LOG_PAGE, "Reserved 0x%lx bytes of guest address space\n", host_size);
- return real_start;
+ return aligned_start;
}
static void probe_guest_base(const char *image_name,
}
bswap_phdr(phdr, ehdr->e_phnum);
-#ifdef CONFIG_USE_FDPIC
info->nsegs = 0;
info->pt_dynamic_addr = 0;
-#endif
mmap_lock();
/* Find the maximum size of the image and allocate an appropriate
amount of memory to handle that. */
loaddr = -1, hiaddr = 0;
+ info->alignment = 0;
for (i = 0; i < ehdr->e_phnum; ++i) {
if (phdr[i].p_type == PT_LOAD) {
abi_ulong a = phdr[i].p_vaddr - phdr[i].p_offset;
if (a > hiaddr) {
hiaddr = a;
}
-#ifdef CONFIG_USE_FDPIC
++info->nsegs;
-#endif
+ info->alignment |= phdr[i].p_align;
}
}
}
load_bias = load_addr - loaddr;
-#ifdef CONFIG_USE_FDPIC
- {
+ if (elf_is_fdpic(ehdr)) {
struct elf32_fdpic_loadseg *loadsegs = info->loadsegs =
g_malloc(sizeof(*loadsegs) * info->nsegs);
}
}
}
-#endif
info->load_bias = load_bias;
info->load_addr = load_addr;
for (i = 0; i < ehdr->e_phnum; i++) {
struct elf_phdr *eppnt = phdr + i;
if (eppnt->p_type == PT_LOAD) {
- abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em;
+ abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em, vaddr_len;
int elf_prot = 0;
if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
vaddr = load_bias + eppnt->p_vaddr;
vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr);
vaddr_ps = TARGET_ELF_PAGESTART(vaddr);
+ vaddr_len = TARGET_ELF_PAGELENGTH(eppnt->p_filesz + vaddr_po);
- error = target_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po,
+ error = target_mmap(vaddr_ps, vaddr_len,
elf_prot, MAP_PRIVATE | MAP_FIXED,
image_fd, eppnt->p_offset - vaddr_po);
if (error == -1) {