The return address argument to the softmmu template helpers was
confused. In the legacy case, we wanted to indicate that there
is no return address, and so passed in NULL. However, we then
immediately subtracted GETPC_ADJ from NULL, resulting in a non-zero
value, indicating the presence of an (invalid) return address.
Push the GETPC_ADJ subtraction down to the only point it's required:
immediately before use within cpu_restore_state_from_tb, after all
NULL pointer checks have been completed.
This makes GETPC and GETRA identical. Remove GETRA as the lesser
used macro, replacing all uses with GETPC.
Signed-off-by: Richard Henderson <[email protected]>
#undef MMUSUFFIX
#define MMUSUFFIX _cmmu
#undef MMUSUFFIX
#define MMUSUFFIX _cmmu
-#undef GETPC_ADJ
-#define GETPC_ADJ 0
-#undef GETRA
-#define GETRA() ((uintptr_t)0)
+#undef GETPC
+#define GETPC() ((uintptr_t)0)
#define SOFTMMU_CODE_ACCESS
#define SHIFT 0
#define SOFTMMU_CODE_ACCESS
#define SHIFT 0
tb_next->jmp_list_first = (uintptr_t)tb | n;
}
tb_next->jmp_list_first = (uintptr_t)tb | n;
}
-/* GETRA is the true target of the return instruction that we'll execute,
- defined here for simplicity of defining the follow-up macros. */
+/* GETPC is the true target of the return instruction that we'll execute. */
#if defined(CONFIG_TCG_INTERPRETER)
extern uintptr_t tci_tb_ptr;
#if defined(CONFIG_TCG_INTERPRETER)
extern uintptr_t tci_tb_ptr;
-# define GETRA() tci_tb_ptr
+# define GETPC() tci_tb_ptr
((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
#endif
((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
#endif
smaller than 4 bytes, so we don't worry about special-casing this. */
#define GETPC_ADJ 2
smaller than 4 bytes, so we don't worry about special-casing this. */
#define GETPC_ADJ 2
-#define GETPC() (GETRA() - GETPC_ADJ)
-
#if !defined(CONFIG_USER_ONLY)
struct MemoryRegion *iotlb_to_region(CPUState *cpu,
#if !defined(CONFIG_USER_ONLY)
struct MemoryRegion *iotlb_to_region(CPUState *cpu,
uintptr_t haddr;
DATA_TYPE res;
uintptr_t haddr;
DATA_TYPE res;
- /* Adjust the given return address. */
- retaddr -= GETPC_ADJ;
-
if (addr & ((1 << a_bits) - 1)) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
if (addr & ((1 << a_bits) - 1)) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
do_unaligned_access:
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
do_unaligned_access:
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
- /* Note the adjustment at the beginning of the function.
- Undo that for the recursion. */
- res1 = helper_le_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
- res2 = helper_le_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
+ res1 = helper_le_ld_name(env, addr1, oi, retaddr);
+ res2 = helper_le_ld_name(env, addr2, oi, retaddr);
shift = (addr & (DATA_SIZE - 1)) * 8;
/* Little-endian combine. */
shift = (addr & (DATA_SIZE - 1)) * 8;
/* Little-endian combine. */
uintptr_t haddr;
DATA_TYPE res;
uintptr_t haddr;
DATA_TYPE res;
- /* Adjust the given return address. */
- retaddr -= GETPC_ADJ;
-
if (addr & ((1 << a_bits) - 1)) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
if (addr & ((1 << a_bits) - 1)) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
do_unaligned_access:
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
do_unaligned_access:
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
- /* Note the adjustment at the beginning of the function.
- Undo that for the recursion. */
- res1 = helper_be_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
- res2 = helper_be_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
+ res1 = helper_be_ld_name(env, addr1, oi, retaddr);
+ res2 = helper_be_ld_name(env, addr2, oi, retaddr);
shift = (addr & (DATA_SIZE - 1)) * 8;
/* Big-endian combine. */
shift = (addr & (DATA_SIZE - 1)) * 8;
/* Big-endian combine. */
unsigned a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
unsigned a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
- /* Adjust the given return address. */
- retaddr -= GETPC_ADJ;
-
if (addr & ((1 << a_bits) - 1)) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
if (addr & ((1 << a_bits) - 1)) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
for (i = 0; i < DATA_SIZE; ++i) {
/* Little-endian extract. */
uint8_t val8 = val >> (i * 8);
for (i = 0; i < DATA_SIZE; ++i) {
/* Little-endian extract. */
uint8_t val8 = val >> (i * 8);
- /* Note the adjustment at the beginning of the function.
- Undo that for the recursion. */
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
- oi, retaddr + GETPC_ADJ);
unsigned a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
unsigned a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
- /* Adjust the given return address. */
- retaddr -= GETPC_ADJ;
-
if (addr & ((1 << a_bits) - 1)) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
if (addr & ((1 << a_bits) - 1)) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
for (i = 0; i < DATA_SIZE; ++i) {
/* Big-endian extract. */
uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
for (i = 0; i < DATA_SIZE; ++i) {
/* Big-endian extract. */
uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
- /* Note the adjustment at the beginning of the function.
- Undo that for the recursion. */
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
- oi, retaddr + GETPC_ADJ);
* this purpose use the actual register value passed to us
* so that we get the fault address right.
*/
* this purpose use the actual register value passed to us
* so that we get the fault address right.
*/
- helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETRA());
+ helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
/* Now we can populate the other TLB entries, if any */
for (i = 0; i < maxidx; i++) {
uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
if (va != (vaddr_in & TARGET_PAGE_MASK)) {
/* Now we can populate the other TLB entries, if any */
for (i = 0; i < maxidx; i++) {
uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
if (va != (vaddr_in & TARGET_PAGE_MASK)) {
- helper_ret_stb_mmu(env, va, 0, oi, GETRA());
+ helper_ret_stb_mmu(env, va, 0, oi, GETPC());
* bounce buffer was in use
*/
for (i = 0; i < blocklen; i++) {
* bounce buffer was in use
*/
for (i = 0; i < blocklen; i++) {
- helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETRA());
+ helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
}
#if !defined(CONFIG_USER_ONLY)
}
#if !defined(CONFIG_USER_ONLY)
-MSA_LD_DF(DF_BYTE, b, helper_ret_ldub_mmu, oi, GETRA())
-MSA_LD_DF(DF_HALF, h, helper_ret_lduw_mmu, oi, GETRA())
-MSA_LD_DF(DF_WORD, w, helper_ret_ldul_mmu, oi, GETRA())
-MSA_LD_DF(DF_DOUBLE, d, helper_ret_ldq_mmu, oi, GETRA())
+MSA_LD_DF(DF_BYTE, b, helper_ret_ldub_mmu, oi, GETPC())
+MSA_LD_DF(DF_HALF, h, helper_ret_lduw_mmu, oi, GETPC())
+MSA_LD_DF(DF_WORD, w, helper_ret_ldul_mmu, oi, GETPC())
+MSA_LD_DF(DF_DOUBLE, d, helper_ret_ldq_mmu, oi, GETPC())
#else
MSA_LD_DF(DF_BYTE, b, cpu_ldub_data)
MSA_LD_DF(DF_HALF, h, cpu_lduw_data)
#else
MSA_LD_DF(DF_BYTE, b, cpu_ldub_data)
MSA_LD_DF(DF_HALF, h, cpu_lduw_data)
int mmu_idx = cpu_mmu_index(env, false); \
int i; \
MEMOP_IDX(DF) \
int mmu_idx = cpu_mmu_index(env, false); \
int i; \
MEMOP_IDX(DF) \
- ensure_writable_pages(env, addr, mmu_idx, GETRA()); \
+ ensure_writable_pages(env, addr, mmu_idx, GETPC()); \
for (i = 0; i < DF_ELEMENTS(DF); i++) { \
ST_INSN(env, addr + (i << DF), pwd->TYPE[i], ##__VA_ARGS__); \
} \
}
#if !defined(CONFIG_USER_ONLY)
for (i = 0; i < DF_ELEMENTS(DF); i++) { \
ST_INSN(env, addr + (i << DF), pwd->TYPE[i], ##__VA_ARGS__); \
} \
}
#if !defined(CONFIG_USER_ONLY)
-MSA_ST_DF(DF_BYTE, b, helper_ret_stb_mmu, oi, GETRA())
-MSA_ST_DF(DF_HALF, h, helper_ret_stw_mmu, oi, GETRA())
-MSA_ST_DF(DF_WORD, w, helper_ret_stl_mmu, oi, GETRA())
-MSA_ST_DF(DF_DOUBLE, d, helper_ret_stq_mmu, oi, GETRA())
+MSA_ST_DF(DF_BYTE, b, helper_ret_stb_mmu, oi, GETPC())
+MSA_ST_DF(DF_HALF, h, helper_ret_stw_mmu, oi, GETPC())
+MSA_ST_DF(DF_WORD, w, helper_ret_stl_mmu, oi, GETPC())
+MSA_ST_DF(DF_DOUBLE, d, helper_ret_stq_mmu, oi, GETPC())
#else
MSA_ST_DF(DF_BYTE, b, cpu_stb_data)
MSA_ST_DF(DF_HALF, h, cpu_stw_data)
#else
MSA_ST_DF(DF_BYTE, b, cpu_stb_data)
MSA_ST_DF(DF_HALF, h, cpu_stw_data)
int64_t ti = profile_getclock();
#endif
int64_t ti = profile_getclock();
#endif
+ searched_pc -= GETPC_ADJ;
+
if (searched_pc < host_pc) {
return -1;
}
if (searched_pc < host_pc) {
return -1;
}
if (ret == 0) {
return 1; /* the MMU fault was handled without causing real CPU fault */
}
if (ret == 0) {
return 1; /* the MMU fault was handled without causing real CPU fault */
}
- /* now we have a real cpu fault */
- cpu_restore_state(cpu, pc);
+
+ /* Now we have a real cpu fault. Since this is the exact location of
+ * the exception, we must undo the adjustment done by cpu_restore_state
+ * for handling call return addresses. */
+ cpu_restore_state(cpu, pc + GETPC_ADJ);
sigprocmask(SIG_SETMASK, old_set, NULL);
cpu_loop_exit(cpu);
sigprocmask(SIG_SETMASK, old_set, NULL);
cpu_loop_exit(cpu);