#include <sys/param.h>
#include <sys/resource.h>
+#include <sys/shm.h>
#include "qemu.h"
#include "disas/disas.h"
#include "qemu/path.h"
+#include "qemu/queue.h"
+#include "qemu/guest-random.h"
+#include "qemu/units.h"
#ifdef _ARCH_PPC64
#undef ARCH_DLINFO
GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3);
GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS);
GET_FEATURE(ARM_FEATURE_VFP4, ARM_HWCAP_ARM_VFPv4);
- GET_FEATURE_ID(arm_div, ARM_HWCAP_ARM_IDIVA);
- GET_FEATURE_ID(thumb_div, ARM_HWCAP_ARM_IDIVT);
+ GET_FEATURE_ID(aa32_arm_div, ARM_HWCAP_ARM_IDIVA);
+ GET_FEATURE_ID(aa32_thumb_div, ARM_HWCAP_ARM_IDIVT);
/* All QEMU's VFPv3 CPUs have 32 registers, see VFP_DREG in translate.c.
* Note that the ARM_HWCAP_ARM_VFPv3D16 bit is always the inverse of
* ARM_HWCAP_ARM_VFPD32 (and so always clear for QEMU); it is unrelated
ARM_HWCAP_A64_SB = 1 << 29,
ARM_HWCAP_A64_PACA = 1 << 30,
ARM_HWCAP_A64_PACG = 1UL << 31,
+
+ ARM_HWCAP2_A64_DCPODP = 1 << 0,
+ ARM_HWCAP2_A64_SVE2 = 1 << 1,
+ ARM_HWCAP2_A64_SVEAES = 1 << 2,
+ ARM_HWCAP2_A64_SVEPMULL = 1 << 3,
+ ARM_HWCAP2_A64_SVEBITPERM = 1 << 4,
+ ARM_HWCAP2_A64_SVESHA3 = 1 << 5,
+ ARM_HWCAP2_A64_SVESM4 = 1 << 6,
+ ARM_HWCAP2_A64_FLAGM2 = 1 << 7,
+ ARM_HWCAP2_A64_FRINT = 1 << 8,
};
-#define ELF_HWCAP get_elf_hwcap()
+#define ELF_HWCAP get_elf_hwcap()
+#define ELF_HWCAP2 get_elf_hwcap2()
+
+#define GET_FEATURE_ID(feat, hwcap) \
+ do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
static uint32_t get_elf_hwcap(void)
{
hwcaps |= ARM_HWCAP_A64_CPUID;
/* probe for the extra features */
-#define GET_FEATURE_ID(feat, hwcap) \
- do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES);
GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL);
GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT);
GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB);
GET_FEATURE_ID(aa64_condm_4, ARM_HWCAP_A64_FLAGM);
+ GET_FEATURE_ID(aa64_dcpop, ARM_HWCAP_A64_DCPOP);
-#undef GET_FEATURE_ID
+ return hwcaps;
+}
+
+static uint32_t get_elf_hwcap2(void)
+{
+ ARMCPU *cpu = ARM_CPU(thread_cpu);
+ uint32_t hwcaps = 0;
+
+ GET_FEATURE_ID(aa64_dcpodp, ARM_HWCAP2_A64_DCPODP);
+ GET_FEATURE_ID(aa64_condm_5, ARM_HWCAP2_A64_FLAGM2);
+ GET_FEATURE_ID(aa64_frint, ARM_HWCAP2_A64_FRINT);
return hwcaps;
}
+#undef GET_FEATURE_ID
+
#endif /* not TARGET_AARCH64 */
#endif /* TARGET_ARM */
QEMU_PPC_FEATURE2_HAS_EBB = 0x10000000, /* Event Base Branching */
QEMU_PPC_FEATURE2_HAS_ISEL = 0x08000000, /* Integer Select */
QEMU_PPC_FEATURE2_HAS_TAR = 0x04000000, /* Target Address Register */
+ QEMU_PPC_FEATURE2_VEC_CRYPTO = 0x02000000,
+ QEMU_PPC_FEATURE2_HTM_NOSC = 0x01000000,
QEMU_PPC_FEATURE2_ARCH_3_00 = 0x00800000, /* ISA 3.00 */
+ QEMU_PPC_FEATURE2_HAS_IEEE128 = 0x00400000, /* VSX IEEE Bin Float 128-bit */
+ QEMU_PPC_FEATURE2_DARN = 0x00200000, /* darn random number insn */
+ QEMU_PPC_FEATURE2_SCV = 0x00100000, /* scv syscall */
+ QEMU_PPC_FEATURE2_HTM_NO_SUSPEND = 0x00080000, /* TM w/o suspended state */
};
#define ELF_HWCAP get_elf_hwcap()
GET_FEATURE(PPC_ISEL, QEMU_PPC_FEATURE2_HAS_ISEL);
GET_FEATURE2(PPC2_BCTAR_ISA207, QEMU_PPC_FEATURE2_HAS_TAR);
GET_FEATURE2((PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
- PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07);
- GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00);
+ PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07 |
+ QEMU_PPC_FEATURE2_VEC_CRYPTO);
+ GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00 |
+ QEMU_PPC_FEATURE2_DARN);
#undef GET_FEATURE
#undef GET_FEATURE2
#define ELF_DATA ELFDATA2MSB
#define ELF_ARCH EM_S390
+#include "elf.h"
+
+#define ELF_HWCAP get_elf_hwcap()
+
+#define GET_FEATURE(_feat, _hwcap) \
+ do { if (s390_has_feat(_feat)) { hwcap |= _hwcap; } } while (0)
+
+static uint32_t get_elf_hwcap(void)
+{
+ /*
+ * Let's assume we always have esan3 and zarch.
+ * 31-bit processes can use 64-bit registers (high gprs).
+ */
+ uint32_t hwcap = HWCAP_S390_ESAN3 | HWCAP_S390_ZARCH | HWCAP_S390_HIGH_GPRS;
+
+ GET_FEATURE(S390_FEAT_STFLE, HWCAP_S390_STFLE);
+ GET_FEATURE(S390_FEAT_MSA, HWCAP_S390_MSA);
+ GET_FEATURE(S390_FEAT_LONG_DISPLACEMENT, HWCAP_S390_LDISP);
+ GET_FEATURE(S390_FEAT_EXTENDED_IMMEDIATE, HWCAP_S390_EIMM);
+ if (s390_has_feat(S390_FEAT_EXTENDED_TRANSLATION_3) &&
+ s390_has_feat(S390_FEAT_ETF3_ENH)) {
+ hwcap |= HWCAP_S390_ETF3EH;
+ }
+ GET_FEATURE(S390_FEAT_VECTOR, HWCAP_S390_VXRS);
+
+ return hwcap;
+}
+
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
{
regs->psw.addr = infop->entry;
}
/*
- * Generate 16 random bytes for userspace PRNG seeding (not
- * cryptically secure but it's not the aim of QEMU).
+ * Generate 16 random bytes for userspace PRNG seeding.
*/
- for (i = 0; i < 16; i++) {
- k_rand_bytes[i] = rand();
- }
+ qemu_guest_getrandom_nofail(k_rand_bytes, sizeof(k_rand_bytes));
if (STACK_GROWS_DOWN) {
sp -= 16;
u_rand_bytes = sp;
unsigned long guest_start,
bool fixed)
{
+ /* In order to use host shmat, we must be able to honor SHMLBA. */
+ unsigned long align = MAX(SHMLBA, qemu_host_page_size);
unsigned long current_start, aligned_start;
int flags;
}
/* Setup the initial flags and start address. */
- current_start = host_start & qemu_host_page_mask;
+ current_start = host_start & -align;
flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
if (fixed) {
flags |= MAP_FIXED;
return (unsigned long)-1;
}
munmap((void *)real_start, host_full_size);
- if (real_start & ~qemu_host_page_mask) {
- /* The same thing again, but with an extra qemu_host_page_size
+ if (real_start & (align - 1)) {
+ /* The same thing again, but with extra
* so that we can shift around alignment.
*/
unsigned long real_size = host_full_size + qemu_host_page_size;
return (unsigned long)-1;
}
munmap((void *)real_start, real_size);
- real_start = HOST_PAGE_ALIGN(real_start);
+ real_start = ROUND_UP(real_start, align);
}
current_start = real_start;
}
}
/* Ensure the address is properly aligned. */
- if (real_start & ~qemu_host_page_mask) {
+ if (real_start & (align - 1)) {
/* Ideally, we adjust like
*
* pages: [ ][ ][ ][ ][ ]
* to where we need to put the commpage.
*/
munmap((void *)real_start, host_size);
- real_size = aligned_size + qemu_host_page_size;
+ real_size = aligned_size + align;
real_start = (unsigned long)
mmap((void *)real_start, real_size, PROT_NONE, flags, -1, 0);
if (real_start == (unsigned long)-1) {
return (unsigned long)-1;
}
- aligned_start = HOST_PAGE_ALIGN(real_start);
+ aligned_start = ROUND_UP(real_start, align);
} else {
aligned_start = real_start;
}
* because of trouble with ARM commpage setup.
*/
munmap((void *)real_start, real_size);
- current_start += qemu_host_page_size;
+ current_start += align;
if (host_start == current_start) {
/* Theoretically possible if host doesn't have any suitably
* aligned areas. Normally the first mmap will fail.
}
}
- load_addr = loaddr;
- if (ehdr->e_type == ET_DYN) {
- /* The image indicates that it can be loaded anywhere. Find a
- location that can hold the memory space required. If the
- image is pre-linked, LOADDR will be non-zero. Since we do
- not supply MAP_FIXED here we'll use that address if and
- only if it remains available. */
- load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- -1, 0);
- if (load_addr == -1) {
- goto exit_perror;
+ if (pinterp_name != NULL) {
+ /*
+ * This is the main executable.
+ *
+ * Reserve extra space for brk.
+ * We hold on to this space while placing the interpreter
+ * and the stack, lest they be placed immediately after
+ * the data segment and block allocation from the brk.
+ *
+ * 16MB is chosen as "large enough" without being so large
+ * as to allow the result to not fit with a 32-bit guest on
+ * a 32-bit host.
+ */
+ info->reserve_brk = 16 * MiB;
+ hiaddr += info->reserve_brk;
+
+ if (ehdr->e_type == ET_EXEC) {
+ /*
+ * Make sure that the low address does not conflict with
+ * MMAP_MIN_ADDR or the QEMU application itself.
+ */
+ probe_guest_base(image_name, loaddr, hiaddr);
}
- } else if (pinterp_name != NULL) {
- /* This is the main executable. Make sure that the low
- address does not conflict with MMAP_MIN_ADDR or the
- QEMU application itself. */
- probe_guest_base(image_name, loaddr, hiaddr);
+ }
+
+ /*
+ * Reserve address space for all of this.
+ *
+ * In the case of ET_EXEC, we supply MAP_FIXED so that we get
+ * exactly the address range that is required.
+ *
+ * Otherwise this is ET_DYN, and we are searching for a location
+ * that can hold the memory space required. If the image is
+ * pre-linked, LOADDR will be non-zero, and the kernel should
+ * honor that address if it happens to be free.
+ *
+ * In both cases, we will overwrite pages in this range with mappings
+ * from the executable.
+ */
+ load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE |
+ (ehdr->e_type == ET_EXEC ? MAP_FIXED : 0),
+ -1, 0);
+ if (load_addr == -1) {
+ goto exit_perror;
}
load_bias = load_addr - loaddr;
}
info->load_bias = load_bias;
+ info->code_offset = load_bias;
+ info->data_offset = load_bias;
info->load_addr = load_addr;
info->entry = ehdr->e_entry + load_bias;
info->start_code = -1;
char *elf_interpreter = NULL;
char *scratch;
+ memset(&interp_info, 0, sizeof(interp_info));
+#ifdef TARGET_MIPS
+ interp_info.fp_abi = MIPS_ABI_FP_UNKNOWN;
+#endif
+
info->start_mmap = (abi_ulong)ELF_START_MMAP;
load_elf_image(bprm->filename, bprm->fd, info,
bprm->core_dump = &elf_core_dump;
#endif
+ /*
+ * If we reserved extra space for brk, release it now.
+ * The implementation of do_brk in syscalls.c expects to be able
+ * to mmap pages in this space.
+ */
+ if (info->reserve_brk) {
+ abi_ulong start_brk = HOST_PAGE_ALIGN(info->brk);
+ abi_ulong end_brk = HOST_PAGE_ALIGN(info->brk + info->reserve_brk);
+ target_munmap(start_brk, end_brk - start_brk);
+ }
+
return 0;
}
static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env)
{
- CPUState *cpu = ENV_GET_CPU((CPUArchState *)env);
+ CPUState *cpu = env_cpu((CPUArchState *)env);
TaskState *ts = (TaskState *)cpu->opaque;
struct elf_thread_status *ets;
long signr, const CPUArchState *env)
{
#define NUMNOTES 3
- CPUState *cpu = ENV_GET_CPU((CPUArchState *)env);
+ CPUState *cpu = env_cpu((CPUArchState *)env);
TaskState *ts = (TaskState *)cpu->opaque;
int i;
*/
static int elf_core_dump(int signr, const CPUArchState *env)
{
- const CPUState *cpu = ENV_GET_CPU((CPUArchState *)env);
+ const CPUState *cpu = env_cpu((CPUArchState *)env);
const TaskState *ts = (const TaskState *)cpu->opaque;
struct vm_area_struct *vma = NULL;
char corefile[PATH_MAX];