/* probe for the extra features */
#define GET_FEATURE(feat, hwcap) \
do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
+
+#define GET_FEATURE_ID(feat, hwcap) \
+ do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
+
/* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */
GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP);
GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP);
GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3);
GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS);
GET_FEATURE(ARM_FEATURE_VFP4, ARM_HWCAP_ARM_VFPv4);
- GET_FEATURE(ARM_FEATURE_ARM_DIV, ARM_HWCAP_ARM_IDIVA);
- GET_FEATURE(ARM_FEATURE_THUMB_DIV, ARM_HWCAP_ARM_IDIVT);
+ GET_FEATURE_ID(arm_div, ARM_HWCAP_ARM_IDIVA);
+ GET_FEATURE_ID(thumb_div, ARM_HWCAP_ARM_IDIVT);
/* All QEMU's VFPv3 CPUs have 32 registers, see VFP_DREG in translate.c.
* Note that the ARM_HWCAP_ARM_VFPv3D16 bit is always the inverse of
* ARM_HWCAP_ARM_VFPD32 (and so always clear for QEMU); it is unrelated
ARMCPU *cpu = ARM_CPU(thread_cpu);
uint32_t hwcaps = 0;
- GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP2_ARM_AES);
- GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP2_ARM_PMULL);
- GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP2_ARM_SHA1);
- GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP2_ARM_SHA2);
- GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP2_ARM_CRC32);
+ GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES);
+ GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL);
+ GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1);
+ GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2);
+ GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32);
return hwcaps;
}
#undef GET_FEATURE
+#undef GET_FEATURE_ID
#else
/* 64 bit ARM definitions */
hwcaps |= ARM_HWCAP_A64_ASIMD;
/* probe for the extra features */
-#define GET_FEATURE(feat, hwcap) \
- do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
- GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP_A64_AES);
- GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP_A64_PMULL);
- GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP_A64_SHA1);
- GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP_A64_SHA2);
- GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP_A64_CRC32);
- GET_FEATURE(ARM_FEATURE_V8_SHA3, ARM_HWCAP_A64_SHA3);
- GET_FEATURE(ARM_FEATURE_V8_SM3, ARM_HWCAP_A64_SM3);
- GET_FEATURE(ARM_FEATURE_V8_SM4, ARM_HWCAP_A64_SM4);
- GET_FEATURE(ARM_FEATURE_V8_SHA512, ARM_HWCAP_A64_SHA512);
- GET_FEATURE(ARM_FEATURE_V8_FP16,
- ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
- GET_FEATURE(ARM_FEATURE_V8_ATOMICS, ARM_HWCAP_A64_ATOMICS);
- GET_FEATURE(ARM_FEATURE_V8_RDM, ARM_HWCAP_A64_ASIMDRDM);
- GET_FEATURE(ARM_FEATURE_V8_DOTPROD, ARM_HWCAP_A64_ASIMDDP);
- GET_FEATURE(ARM_FEATURE_V8_FCMA, ARM_HWCAP_A64_FCMA);
- GET_FEATURE(ARM_FEATURE_SVE, ARM_HWCAP_A64_SVE);
-#undef GET_FEATURE
+#define GET_FEATURE_ID(feat, hwcap) \
+ do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
+
+ GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES);
+ GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL);
+ GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1);
+ GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2);
+ GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512);
+ GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32);
+ GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3);
+ GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3);
+ GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4);
+ GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
+ GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS);
+ GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
+ GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
+ GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
+ GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE);
+
+#undef GET_FEATURE_ID
return hwcaps;
}
#define QMAGIC 0314
/* Necessary parameters */
-#define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
+#define TARGET_ELF_EXEC_PAGESIZE \
+ (((eppnt->p_align & ~qemu_host_page_mask) != 0) ? \
+ TARGET_PAGE_SIZE : MAX(qemu_host_page_size, TARGET_PAGE_SIZE))
+#define TARGET_ELF_PAGELENGTH(_v) ROUND_UP((_v), TARGET_ELF_EXEC_PAGESIZE)
#define TARGET_ELF_PAGESTART(_v) ((_v) & \
~(abi_ulong)(TARGET_ELF_EXEC_PAGESIZE-1))
#define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
for (i = 0; i < ehdr->e_phnum; i++) {
struct elf_phdr *eppnt = phdr + i;
if (eppnt->p_type == PT_LOAD) {
- abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em;
+ abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em, vaddr_len;
int elf_prot = 0;
if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
vaddr = load_bias + eppnt->p_vaddr;
vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr);
vaddr_ps = TARGET_ELF_PAGESTART(vaddr);
+ vaddr_len = TARGET_ELF_PAGELENGTH(eppnt->p_filesz + vaddr_po);
- error = target_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po,
+ error = target_mmap(vaddr_ps, vaddr_len,
elf_prot, MAP_PRIVATE | MAP_FIXED,
image_fd, eppnt->p_offset - vaddr_po);
if (error == -1) {