#include "qemu-common.h"
#define NO_CPU_IO_DEFS
#include "cpu.h"
+#include "trace.h"
#include "disas/disas.h"
#include "tcg.h"
#if defined(CONFIG_USER_ONLY)
gen_intermediate_code(env, tb);
+ trace_translate_block(tb, tb->pc, tb->tc_ptr);
+
/* generate machine code */
gen_code_buf = tb->tc_ptr;
tb->tb_next_offset[0] = 0xffff;
tb = tb_find_pc(retaddr);
if (tb) {
cpu_restore_state_from_tb(cpu, tb, retaddr);
+ if (tb->cflags & CF_NOCACHE) {
+ /* one-shot translation, invalidate it immediately */
+ cpu->current_tb = NULL;
+ tb_phys_invalidate(tb, -1);
+ tb_free(tb);
+ }
return true;
}
return false;
{
/* NOTE: we can always suppose that qemu_host_page_size >=
TARGET_PAGE_SIZE */
-#ifdef _WIN32
- SYSTEM_INFO system_info;
-
- GetSystemInfo(&system_info);
- qemu_real_host_page_size = system_info.dwPageSize;
-#else
qemu_real_host_page_size = getpagesize();
-#endif
if (qemu_host_page_size == 0) {
qemu_host_page_size = qemu_real_host_page_size;
}
#elif defined(__s390x__)
/* We have a +- 4GB range on the branches; leave some slop. */
# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
+#elif defined(__mips__)
+ /* We have a 256MB branch region, but leave room to make sure the
+ main executable is also within that region. */
+# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
#else
# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
#endif
return tb_size;
}
+#ifdef __mips__
+/* In order to use J and JAL within the code_gen_buffer, we require
+ that the buffer not cross a 256MB boundary. */
+static inline bool cross_256mb(void *addr, size_t size)
+{
+ return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000;
+}
+
+/* We weren't able to allocate a buffer without crossing that boundary,
+ so make do with the larger portion of the buffer that doesn't cross.
+ Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
+static inline void *split_cross_256mb(void *buf1, size_t size1)
+{
+ void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000);
+ size_t size2 = buf1 + size1 - buf2;
+
+ size1 = buf2 - buf1;
+ if (size1 < size2) {
+ size1 = size2;
+ buf1 = buf2;
+ }
+
+ tcg_ctx.code_gen_buffer_size = size1;
+ return buf1;
+}
+#endif
+
#ifdef USE_STATIC_CODE_GEN_BUFFER
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
__attribute__((aligned(CODE_GEN_ALIGN)));
static inline void *alloc_code_gen_buffer(void)
{
- map_exec(static_code_gen_buffer, tcg_ctx.code_gen_buffer_size);
- return static_code_gen_buffer;
+ void *buf = static_code_gen_buffer;
+#ifdef __mips__
+ if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
+ buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
+ }
+#endif
+ map_exec(buf, tcg_ctx.code_gen_buffer_size);
+ return buf;
}
#elif defined(USE_MMAP)
static inline void *alloc_code_gen_buffer(void)
start = 0x40000000ul;
# elif defined(__s390x__)
start = 0x90000000ul;
+# elif defined(__mips__)
+ /* ??? We ought to more explicitly manage layout for softmmu too. */
+# ifdef CONFIG_USER_ONLY
+ start = 0x68000000ul;
+# elif _MIPS_SIM == _ABI64
+ start = 0x128000000ul;
+# else
+ start = 0x08000000ul;
+# endif
# endif
buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
- return buf == MAP_FAILED ? NULL : buf;
+ if (buf == MAP_FAILED) {
+ return NULL;
+ }
+
+#ifdef __mips__
+ if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
+ /* Try again, with the original still mapped, to avoid re-acquiring
+ that 256mb crossing. This time don't specify an address. */
+ size_t size2, size1 = tcg_ctx.code_gen_buffer_size;
+ void *buf2 = mmap(NULL, size1, PROT_WRITE | PROT_READ | PROT_EXEC,
+ flags, -1, 0);
+ if (buf2 != MAP_FAILED) {
+ if (!cross_256mb(buf2, size1)) {
+ /* Success! Use the new buffer. */
+ munmap(buf, size1);
+ return buf2;
+ }
+ /* Failure. Work with what we had. */
+ munmap(buf2, size1);
+ }
+
+ /* Split the original buffer. Free the smaller half. */
+ buf2 = split_cross_256mb(buf, size1);
+ size2 = tcg_ctx.code_gen_buffer_size;
+ munmap(buf + (buf == buf2 ? size2 : 0), size1 - size2);
+ return buf2;
+ }
+#endif
+
+ return buf;
}
#else
static inline void *alloc_code_gen_buffer(void)
{
void *buf = g_malloc(tcg_ctx.code_gen_buffer_size);
- if (buf) {
- map_exec(buf, tcg_ctx.code_gen_buffer_size);
+ if (buf == NULL) {
+ return NULL;
}
+
+#ifdef __mips__
+ if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
+ void *buf2 = g_malloc(tcg_ctx.code_gen_buffer_size);
+ if (buf2 != NULL && !cross_256mb(buf2, size1)) {
+ /* Success! Use the new buffer. */
+ free(buf);
+ buf = buf2;
+ } else {
+ /* Failure. Work with what we had. Since this is malloc
+ and not mmap, we can't free the other half. */
+ free(buf2);
+ buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
+ }
+ }
+#endif
+
+ map_exec(buf, tcg_ctx.code_gen_buffer_size);
return buf;
}
#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
branch. */
#if defined(TARGET_MIPS)
if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
- env->active_tc.PC -= 4;
+ env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
cpu->icount_decr.u16.low++;
env->hflags &= ~MIPS_HFLAG_BMASK;
}
tcg_dump_info(f, cpu_fprintf);
}
+void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
+{
+ tcg_dump_op_count(f, cpu_fprintf);
+}
+
#else /* CONFIG_USER_ONLY */
void cpu_interrupt(CPUState *cpu, int mask)
struct walk_memory_regions_data {
walk_memory_regions_fn fn;
void *priv;
- uintptr_t start;
+ target_ulong start;
int prot;
};
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
- abi_ulong end, int new_prot)
+ target_ulong end, int new_prot)
{
- if (data->start != -1ul) {
+ if (data->start != -1u) {
int rc = data->fn(data->priv, data->start, end, data->prot);
if (rc != 0) {
return rc;
}
}
- data->start = (new_prot ? end : -1ul);
+ data->start = (new_prot ? end : -1u);
data->prot = new_prot;
return 0;
}
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
- abi_ulong base, int level, void **lp)
+ target_ulong base, int level, void **lp)
{
- abi_ulong pa;
+ target_ulong pa;
int i, rc;
if (*lp == NULL) {
void **pp = *lp;
for (i = 0; i < V_L2_SIZE; ++i) {
- pa = base | ((abi_ulong)i <<
+ pa = base | ((target_ulong)i <<
(TARGET_PAGE_BITS + V_L2_BITS * level));
rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
if (rc != 0) {
data.fn = fn;
data.priv = priv;
- data.start = -1ul;
+ data.start = -1u;
data.prot = 0;
for (i = 0; i < V_L1_SIZE; i++) {
- int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
+ int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
-
if (rc != 0) {
return rc;
}
return walk_memory_regions_end(&data, 0, 0);
}
-static int dump_region(void *priv, abi_ulong start,
- abi_ulong end, unsigned long prot)
+static int dump_region(void *priv, target_ulong start,
+ target_ulong end, unsigned long prot)
{
FILE *f = (FILE *)priv;
- (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
- " "TARGET_ABI_FMT_lx" %c%c%c\n",
+ (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
+ " "TARGET_FMT_lx" %c%c%c\n",
start, end, end - start,
((prot & PAGE_READ) ? 'r' : '-'),
((prot & PAGE_WRITE) ? 'w' : '-'),
/* dump memory mappings */
void page_dump(FILE *f)
{
- const int length = sizeof(abi_ulong) * 2;
+ const int length = sizeof(target_ulong) * 2;
(void) fprintf(f, "%-*s %-*s %-*s %s\n",
length, "start", length, "end", length, "size", "prot");
walk_memory_regions(f, dump_region);
guest address space. If this assert fires, it probably indicates
a missing call to h2g_valid. */
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
- assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
+ assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
#endif
assert(start < end);
guest address space. If this assert fires, it probably indicates
a missing call to h2g_valid. */
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
- assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
+ assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
#endif
if (len == 0) {