#undef MMUSUFFIX
#define MMUSUFFIX _cmmu
-#undef GETPC_ADJ
-#define GETPC_ADJ 0
-#undef GETRA
-#define GETRA() ((uintptr_t)0)
+#undef GETPC
+#define GETPC() ((uintptr_t)0)
#define SOFTMMU_CODE_ACCESS
#define SHIFT 0
tb_next->jmp_list_first = (uintptr_t)tb | n;
}
-/* GETRA is the true target of the return instruction that we'll execute,
- defined here for simplicity of defining the follow-up macros. */
+/* GETPC is the true target of the return instruction that we'll execute. */
#if defined(CONFIG_TCG_INTERPRETER)
extern uintptr_t tci_tb_ptr;
-# define GETRA() tci_tb_ptr
+# define GETPC() tci_tb_ptr
#else
-# define GETRA() \
+# define GETPC() \
((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
#endif
smaller than 4 bytes, so we don't worry about special-casing this. */
#define GETPC_ADJ 2
-#define GETPC() (GETRA() - GETPC_ADJ)
-
#if !defined(CONFIG_USER_ONLY)
struct MemoryRegion *iotlb_to_region(CPUState *cpu,
uintptr_t haddr;
DATA_TYPE res;
- /* Adjust the given return address. */
- retaddr -= GETPC_ADJ;
-
if (addr & ((1 << a_bits) - 1)) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
do_unaligned_access:
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
- /* Note the adjustment at the beginning of the function.
- Undo that for the recursion. */
- res1 = helper_le_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
- res2 = helper_le_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
+ res1 = helper_le_ld_name(env, addr1, oi, retaddr);
+ res2 = helper_le_ld_name(env, addr2, oi, retaddr);
shift = (addr & (DATA_SIZE - 1)) * 8;
/* Little-endian combine. */
uintptr_t haddr;
DATA_TYPE res;
- /* Adjust the given return address. */
- retaddr -= GETPC_ADJ;
-
if (addr & ((1 << a_bits) - 1)) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
do_unaligned_access:
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
- /* Note the adjustment at the beginning of the function.
- Undo that for the recursion. */
- res1 = helper_be_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
- res2 = helper_be_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
+ res1 = helper_be_ld_name(env, addr1, oi, retaddr);
+ res2 = helper_be_ld_name(env, addr2, oi, retaddr);
shift = (addr & (DATA_SIZE - 1)) * 8;
/* Big-endian combine. */
unsigned a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
- /* Adjust the given return address. */
- retaddr -= GETPC_ADJ;
-
if (addr & ((1 << a_bits) - 1)) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
for (i = 0; i < DATA_SIZE; ++i) {
/* Little-endian extract. */
uint8_t val8 = val >> (i * 8);
- /* Note the adjustment at the beginning of the function.
- Undo that for the recursion. */
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
- oi, retaddr + GETPC_ADJ);
+ oi, retaddr);
}
return;
}
unsigned a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
- /* Adjust the given return address. */
- retaddr -= GETPC_ADJ;
-
if (addr & ((1 << a_bits) - 1)) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
for (i = 0; i < DATA_SIZE; ++i) {
/* Big-endian extract. */
uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
- /* Note the adjustment at the beginning of the function.
- Undo that for the recursion. */
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
- oi, retaddr + GETPC_ADJ);
+ oi, retaddr);
}
return;
}
* this purpose use the actual register value passed to us
* so that we get the fault address right.
*/
- helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETRA());
+ helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
/* Now we can populate the other TLB entries, if any */
for (i = 0; i < maxidx; i++) {
uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
if (va != (vaddr_in & TARGET_PAGE_MASK)) {
- helper_ret_stb_mmu(env, va, 0, oi, GETRA());
+ helper_ret_stb_mmu(env, va, 0, oi, GETPC());
}
}
}
* bounce buffer was in use
*/
for (i = 0; i < blocklen; i++) {
- helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETRA());
+ helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
}
}
#else
}
#if !defined(CONFIG_USER_ONLY)
-MSA_LD_DF(DF_BYTE, b, helper_ret_ldub_mmu, oi, GETRA())
-MSA_LD_DF(DF_HALF, h, helper_ret_lduw_mmu, oi, GETRA())
-MSA_LD_DF(DF_WORD, w, helper_ret_ldul_mmu, oi, GETRA())
-MSA_LD_DF(DF_DOUBLE, d, helper_ret_ldq_mmu, oi, GETRA())
+MSA_LD_DF(DF_BYTE, b, helper_ret_ldub_mmu, oi, GETPC())
+MSA_LD_DF(DF_HALF, h, helper_ret_lduw_mmu, oi, GETPC())
+MSA_LD_DF(DF_WORD, w, helper_ret_ldul_mmu, oi, GETPC())
+MSA_LD_DF(DF_DOUBLE, d, helper_ret_ldq_mmu, oi, GETPC())
#else
MSA_LD_DF(DF_BYTE, b, cpu_ldub_data)
MSA_LD_DF(DF_HALF, h, cpu_lduw_data)
int mmu_idx = cpu_mmu_index(env, false); \
int i; \
MEMOP_IDX(DF) \
- ensure_writable_pages(env, addr, mmu_idx, GETRA()); \
+ ensure_writable_pages(env, addr, mmu_idx, GETPC()); \
for (i = 0; i < DF_ELEMENTS(DF); i++) { \
ST_INSN(env, addr + (i << DF), pwd->TYPE[i], ##__VA_ARGS__); \
} \
}
#if !defined(CONFIG_USER_ONLY)
-MSA_ST_DF(DF_BYTE, b, helper_ret_stb_mmu, oi, GETRA())
-MSA_ST_DF(DF_HALF, h, helper_ret_stw_mmu, oi, GETRA())
-MSA_ST_DF(DF_WORD, w, helper_ret_stl_mmu, oi, GETRA())
-MSA_ST_DF(DF_DOUBLE, d, helper_ret_stq_mmu, oi, GETRA())
+MSA_ST_DF(DF_BYTE, b, helper_ret_stb_mmu, oi, GETPC())
+MSA_ST_DF(DF_HALF, h, helper_ret_stw_mmu, oi, GETPC())
+MSA_ST_DF(DF_WORD, w, helper_ret_stl_mmu, oi, GETPC())
+MSA_ST_DF(DF_DOUBLE, d, helper_ret_stq_mmu, oi, GETPC())
#else
MSA_ST_DF(DF_BYTE, b, cpu_stb_data)
MSA_ST_DF(DF_HALF, h, cpu_stw_data)
int64_t ti = profile_getclock();
#endif
+ searched_pc -= GETPC_ADJ;
+
if (searched_pc < host_pc) {
return -1;
}
if (ret == 0) {
return 1; /* the MMU fault was handled without causing real CPU fault */
}
- /* now we have a real cpu fault */
- cpu_restore_state(cpu, pc);
+
+ /* Now we have a real cpu fault. Since this is the exact location of
+ * the exception, we must undo the adjustment done by cpu_restore_state
+ * for handling call return addresses. */
+ cpu_restore_state(cpu, pc + GETPC_ADJ);
sigprocmask(SIG_SETMASK, old_set, NULL);
cpu_loop_exit(cpu);