* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "qemu/timer.h"
-#include "exec/address-spaces.h"
-#include "exec/memory.h"
-
-#define DATA_SIZE (1 << SHIFT)
-
#if DATA_SIZE == 8
#define SUFFIX q
#define LSUFFIX q
# define BSWAP(X) (X)
#endif
-#ifdef TARGET_WORDS_BIGENDIAN
-# define TGT_BE(X) (X)
-# define TGT_LE(X) BSWAP(X)
-#else
-# define TGT_BE(X) BSWAP(X)
-# define TGT_LE(X) (X)
-#endif
-
#if DATA_SIZE == 1
# define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
# define helper_be_ld_name helper_le_ld_name
# define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
#endif
-#ifdef TARGET_WORDS_BIGENDIAN
-# define helper_te_ld_name helper_be_ld_name
-# define helper_te_st_name helper_be_st_name
-#else
-# define helper_te_ld_name helper_le_ld_name
-# define helper_te_st_name helper_le_st_name
-#endif
-
-/* macro to check the victim tlb */
-#define VICTIM_TLB_HIT(ty) \
-({ \
- /* we are about to do a page table walk. our last hope is the \
- * victim tlb. try to refill from the victim tlb before walking the \
- * page table. */ \
- int vidx; \
- hwaddr tmpiotlb; \
- CPUTLBEntry tmptlb; \
- for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \
- if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\
- /* found entry in victim tlb, swap tlb and iotlb */ \
- tmptlb = env->tlb_table[mmu_idx][index]; \
- env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx]; \
- env->tlb_v_table[mmu_idx][vidx] = tmptlb; \
- tmpiotlb = env->iotlb[mmu_idx][index]; \
- env->iotlb[mmu_idx][index] = env->iotlb_v[mmu_idx][vidx]; \
- env->iotlb_v[mmu_idx][vidx] = tmpiotlb; \
- break; \
- } \
- } \
- /* return true when there is a vtlb hit, i.e. vidx >=0 */ \
- vidx >= 0; \
-})
-
#ifndef SOFTMMU_CODE_ACCESS
static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
- hwaddr physaddr,
+ size_t mmu_idx, size_t index,
target_ulong addr,
uintptr_t retaddr)
{
- uint64_t val;
- CPUState *cpu = ENV_GET_CPU(env);
- MemoryRegion *mr = iotlb_to_region(cpu, physaddr);
-
- physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
- cpu->mem_io_pc = retaddr;
- if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) {
- cpu_io_recompile(cpu, retaddr);
- }
-
- cpu->mem_io_vaddr = addr;
- memory_region_dispatch_read(mr, physaddr, &val, 1 << SHIFT,
- MEMTXATTRS_UNSPECIFIED);
- return val;
+ CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
+ return io_readx(env, iotlbentry, addr, retaddr, DATA_SIZE);
}
#endif
-#ifdef SOFTMMU_CODE_ACCESS
-static __attribute__((unused))
-#endif
-WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
- uintptr_t retaddr)
+WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
+ unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
+ unsigned a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
DATA_TYPE res;
- /* Adjust the given return address. */
- retaddr -= GETPC_ADJ;
+ if (addr & ((1 << a_bits) - 1)) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
+ mmu_idx, retaddr);
+ }
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
-#endif
- if (!VICTIM_TLB_HIT(ADDR_READ)) {
+ if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
/* Handle an IO access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
- hwaddr ioaddr;
if ((addr & (DATA_SIZE - 1)) != 0) {
goto do_unaligned_access;
}
- ioaddr = env->iotlb[mmu_idx][index];
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
- res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
+ res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr);
res = TGT_LE(res);
return res;
}
DATA_TYPE res1, res2;
unsigned shift;
do_unaligned_access:
-#ifdef ALIGNED_ONLY
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
-#endif
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
- /* Note the adjustment at the beginning of the function.
- Undo that for the recursion. */
- res1 = helper_le_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
- res2 = helper_le_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
+ res1 = helper_le_ld_name(env, addr1, oi, retaddr);
+ res2 = helper_le_ld_name(env, addr2, oi, retaddr);
shift = (addr & (DATA_SIZE - 1)) * 8;
/* Little-endian combine. */
return res;
}
- /* Handle aligned access or unaligned access in the same page. */
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
-#endif
-
haddr = addr + env->tlb_table[mmu_idx][index].addend;
#if DATA_SIZE == 1
res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
}
#if DATA_SIZE > 1
-#ifdef SOFTMMU_CODE_ACCESS
-static __attribute__((unused))
-#endif
-WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
- uintptr_t retaddr)
+WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
+ unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
+ unsigned a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
DATA_TYPE res;
- /* Adjust the given return address. */
- retaddr -= GETPC_ADJ;
+ if (addr & ((1 << a_bits) - 1)) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
+ mmu_idx, retaddr);
+ }
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
-#endif
- if (!VICTIM_TLB_HIT(ADDR_READ)) {
+ if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
/* Handle an IO access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
- hwaddr ioaddr;
if ((addr & (DATA_SIZE - 1)) != 0) {
goto do_unaligned_access;
}
- ioaddr = env->iotlb[mmu_idx][index];
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
- res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
+ res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr);
res = TGT_BE(res);
return res;
}
DATA_TYPE res1, res2;
unsigned shift;
do_unaligned_access:
-#ifdef ALIGNED_ONLY
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
-#endif
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
- /* Note the adjustment at the beginning of the function.
- Undo that for the recursion. */
- res1 = helper_be_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
- res2 = helper_be_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
+ res1 = helper_be_ld_name(env, addr1, oi, retaddr);
+ res2 = helper_be_ld_name(env, addr2, oi, retaddr);
shift = (addr & (DATA_SIZE - 1)) * 8;
/* Big-endian combine. */
return res;
}
- /* Handle aligned access or unaligned access in the same page. */
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
-#endif
-
haddr = addr + env->tlb_table[mmu_idx][index].addend;
res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
return res;
}
#endif /* DATA_SIZE > 1 */
-DATA_TYPE
-glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
- int mmu_idx)
-{
- return helper_te_ld_name (env, addr, mmu_idx, GETRA());
-}
-
#ifndef SOFTMMU_CODE_ACCESS
/* Provide signed versions of the load routines as well. We can of course
avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr)
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
- return (SDATA_TYPE)helper_le_ld_name(env, addr, mmu_idx, retaddr);
+ return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr);
}
# if DATA_SIZE > 1
WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr)
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
- return (SDATA_TYPE)helper_be_ld_name(env, addr, mmu_idx, retaddr);
+ return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr);
}
# endif
#endif
static inline void glue(io_write, SUFFIX)(CPUArchState *env,
- hwaddr physaddr,
+ size_t mmu_idx, size_t index,
DATA_TYPE val,
target_ulong addr,
uintptr_t retaddr)
{
- CPUState *cpu = ENV_GET_CPU(env);
- MemoryRegion *mr = iotlb_to_region(cpu, physaddr);
-
- physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
- if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) {
- cpu_io_recompile(cpu, retaddr);
- }
-
- cpu->mem_io_vaddr = addr;
- cpu->mem_io_pc = retaddr;
- memory_region_dispatch_write(mr, physaddr, val, 1 << SHIFT,
- MEMTXATTRS_UNSPECIFIED);
+ CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
+ return io_writex(env, iotlbentry, val, addr, retaddr, DATA_SIZE);
}
void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
- int mmu_idx, uintptr_t retaddr)
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
+ unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+ unsigned a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
- /* Adjust the given return address. */
- retaddr -= GETPC_ADJ;
+ if (addr & ((1 << a_bits) - 1)) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-#endif
- if (!VICTIM_TLB_HIT(addr_write)) {
+ if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
}
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
/* Handle an IO access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
- hwaddr ioaddr;
if ((addr & (DATA_SIZE - 1)) != 0) {
goto do_unaligned_access;
}
- ioaddr = env->iotlb[mmu_idx][index];
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
val = TGT_LE(val);
- glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
+ glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr);
return;
}
if (DATA_SIZE > 1
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>= TARGET_PAGE_SIZE)) {
- int i;
+ int i, index2;
+ target_ulong page2, tlb_addr2;
do_unaligned_access:
-#ifdef ALIGNED_ONLY
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
-#endif
- /* XXX: not efficient, but simple */
- /* Note: relies on the fact that tlb_fill() does not remove the
- * previous page from the TLB cache. */
- for (i = DATA_SIZE - 1; i >= 0; i--) {
+ /* Ensure the second page is in the TLB. Note that the first page
+ is already guaranteed to be filled, and that the second page
+ cannot evict the first. */
+ page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
+ index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
+ if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
+ && !VICTIM_TLB_HIT(addr_write, page2)) {
+ tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+
+ /* XXX: not efficient, but simple. */
+ /* This loop must go in the forward direction to avoid issues
+ with self-modifying code in Windows 64-bit. */
+ for (i = 0; i < DATA_SIZE; ++i) {
/* Little-endian extract. */
uint8_t val8 = val >> (i * 8);
- /* Note the adjustment at the beginning of the function.
- Undo that for the recursion. */
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
- mmu_idx, retaddr + GETPC_ADJ);
+ oi, retaddr);
}
return;
}
- /* Handle aligned access or unaligned access in the same page. */
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-#endif
-
haddr = addr + env->tlb_table[mmu_idx][index].addend;
#if DATA_SIZE == 1
glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
#if DATA_SIZE > 1
void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
- int mmu_idx, uintptr_t retaddr)
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
+ unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+ unsigned a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
- /* Adjust the given return address. */
- retaddr -= GETPC_ADJ;
+ if (addr & ((1 << a_bits) - 1)) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-#endif
- if (!VICTIM_TLB_HIT(addr_write)) {
+ if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
}
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
/* Handle an IO access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
- hwaddr ioaddr;
if ((addr & (DATA_SIZE - 1)) != 0) {
goto do_unaligned_access;
}
- ioaddr = env->iotlb[mmu_idx][index];
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
val = TGT_BE(val);
- glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
+ glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr);
return;
}
if (DATA_SIZE > 1
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>= TARGET_PAGE_SIZE)) {
- int i;
+ int i, index2;
+ target_ulong page2, tlb_addr2;
do_unaligned_access:
-#ifdef ALIGNED_ONLY
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
-#endif
+ /* Ensure the second page is in the TLB. Note that the first page
+ is already guaranteed to be filled, and that the second page
+ cannot evict the first. */
+ page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
+ index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
+ if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
+ && !VICTIM_TLB_HIT(addr_write, page2)) {
+ tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+
/* XXX: not efficient, but simple */
- /* Note: relies on the fact that tlb_fill() does not remove the
- * previous page from the TLB cache. */
- for (i = DATA_SIZE - 1; i >= 0; i--) {
+ /* This loop must go in the forward direction to avoid issues
+ with self-modifying code. */
+ for (i = 0; i < DATA_SIZE; ++i) {
/* Big-endian extract. */
uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
- /* Note the adjustment at the beginning of the function.
- Undo that for the recursion. */
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
- mmu_idx, retaddr + GETPC_ADJ);
+ oi, retaddr);
}
return;
}
- /* Handle aligned access or unaligned access in the same page. */
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-#endif
-
haddr = addr + env->tlb_table[mmu_idx][index].addend;
glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
}
#endif /* DATA_SIZE > 1 */
-
-void
-glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
- DATA_TYPE val, int mmu_idx)
-{
- helper_te_st_name(env, addr, val, mmu_idx, GETRA());
-}
-
#endif /* !defined(SOFTMMU_CODE_ACCESS) */
#undef READ_ACCESS_TYPE
-#undef SHIFT
#undef DATA_TYPE
#undef SUFFIX
#undef LSUFFIX
#undef USUFFIX
#undef SSUFFIX
#undef BSWAP
-#undef TGT_BE
-#undef TGT_LE
-#undef CPU_BE
-#undef CPU_LE
#undef helper_le_ld_name
#undef helper_be_ld_name
#undef helper_le_lds_name
#undef helper_be_lds_name
#undef helper_le_st_name
#undef helper_be_st_name
-#undef helper_te_ld_name
-#undef helper_te_st_name