# define helper_te_st_name helper_le_st_name
#endif
-/* macro to check the victim tlb */
-#define VICTIM_TLB_HIT(ty) \
-({ \
- /* we are about to do a page table walk. our last hope is the \
- * victim tlb. try to refill from the victim tlb before walking the \
- * page table. */ \
- int vidx; \
- CPUIOTLBEntry tmpiotlb; \
- CPUTLBEntry tmptlb; \
- for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \
- if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\
- /* found entry in victim tlb, swap tlb and iotlb */ \
- tmptlb = env->tlb_table[mmu_idx][index]; \
- env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx]; \
- env->tlb_v_table[mmu_idx][vidx] = tmptlb; \
- tmpiotlb = env->iotlb[mmu_idx][index]; \
- env->iotlb[mmu_idx][index] = env->iotlb_v[mmu_idx][vidx]; \
- env->iotlb_v[mmu_idx][vidx] = tmpiotlb; \
- break; \
- } \
- } \
- /* return true when there is a vtlb hit, i.e. vidx >=0 */ \
- vidx >= 0; \
-})
-
#ifndef SOFTMMU_CODE_ACCESS
static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
CPUIOTLBEntry *iotlbentry,
uint64_t val;
CPUState *cpu = ENV_GET_CPU(env);
hwaddr physaddr = iotlbentry->addr;
- MemoryRegion *mr = iotlb_to_region(cpu, physaddr);
+ MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
cpu->mem_io_pc = retaddr;
- if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) {
+ if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
cpu_io_recompile(cpu, retaddr);
}
}
#endif
-#ifdef SOFTMMU_CODE_ACCESS
-static __attribute__((unused))
-#endif
WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
+ int a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
DATA_TYPE res;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
+ if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
+ mmu_idx, retaddr);
+ }
+
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
- if (!VICTIM_TLB_HIT(ADDR_READ)) {
+ if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
DATA_TYPE res1, res2;
unsigned shift;
do_unaligned_access:
- if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
/* Note the adjustment at the beginning of the function.
return res;
}
- /* Handle aligned access or unaligned access in the same page. */
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
-
haddr = addr + env->tlb_table[mmu_idx][index].addend;
#if DATA_SIZE == 1
res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
}
#if DATA_SIZE > 1
-#ifdef SOFTMMU_CODE_ACCESS
-static __attribute__((unused))
-#endif
WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
+ int a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
DATA_TYPE res;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
+ if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
+ mmu_idx, retaddr);
+ }
+
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
- if (!VICTIM_TLB_HIT(ADDR_READ)) {
+ if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
DATA_TYPE res1, res2;
unsigned shift;
do_unaligned_access:
- if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
/* Note the adjustment at the beginning of the function.
return res;
}
- /* Handle aligned access or unaligned access in the same page. */
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
-
haddr = addr + env->tlb_table[mmu_idx][index].addend;
res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
return res;
}
#endif /* DATA_SIZE > 1 */
-DATA_TYPE
-glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
- int mmu_idx)
-{
- TCGMemOpIdx oi = make_memop_idx(SHIFT, mmu_idx);
- return helper_te_ld_name (env, addr, oi, GETRA());
-}
-
#ifndef SOFTMMU_CODE_ACCESS
/* Provide signed versions of the load routines as well. We can of course
{
CPUState *cpu = ENV_GET_CPU(env);
hwaddr physaddr = iotlbentry->addr;
- MemoryRegion *mr = iotlb_to_region(cpu, physaddr);
+ MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
- if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) {
+ if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
cpu_io_recompile(cpu, retaddr);
}
unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+ int a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
+ if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
- if (!VICTIM_TLB_HIT(addr_write)) {
+ if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
}
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
if (DATA_SIZE > 1
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>= TARGET_PAGE_SIZE)) {
- int i;
+ int i, index2;
+ target_ulong page2, tlb_addr2;
do_unaligned_access:
- if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
+ /* Ensure the second page is in the TLB. Note that the first page
+ is already guaranteed to be filled, and that the second page
+ cannot evict the first. */
+ page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
+ index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
+ if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
+ && !VICTIM_TLB_HIT(addr_write, page2)) {
+ tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
+ mmu_idx, retaddr);
}
- /* XXX: not efficient, but simple */
- /* Note: relies on the fact that tlb_fill() does not remove the
- * previous page from the TLB cache. */
- for (i = DATA_SIZE - 1; i >= 0; i--) {
+
+ /* XXX: not efficient, but simple. */
+ /* This loop must go in the forward direction to avoid issues
+ with self-modifying code in Windows 64-bit. */
+ for (i = 0; i < DATA_SIZE; ++i) {
/* Little-endian extract. */
uint8_t val8 = val >> (i * 8);
/* Note the adjustment at the beginning of the function.
return;
}
- /* Handle aligned access or unaligned access in the same page. */
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-
haddr = addr + env->tlb_table[mmu_idx][index].addend;
#if DATA_SIZE == 1
glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+ int a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
+ if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
- if (!VICTIM_TLB_HIT(addr_write)) {
+ if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
}
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
if (DATA_SIZE > 1
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>= TARGET_PAGE_SIZE)) {
- int i;
+ int i, index2;
+ target_ulong page2, tlb_addr2;
do_unaligned_access:
- if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
+ /* Ensure the second page is in the TLB. Note that the first page
+ is already guaranteed to be filled, and that the second page
+ cannot evict the first. */
+ page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
+ index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
+ if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
+ && !VICTIM_TLB_HIT(addr_write, page2)) {
+ tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
+ mmu_idx, retaddr);
}
+
/* XXX: not efficient, but simple */
- /* Note: relies on the fact that tlb_fill() does not remove the
- * previous page from the TLB cache. */
- for (i = DATA_SIZE - 1; i >= 0; i--) {
+ /* This loop must go in the forward direction to avoid issues
+ with self-modifying code. */
+ for (i = 0; i < DATA_SIZE; ++i) {
/* Big-endian extract. */
uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
/* Note the adjustment at the beginning of the function.
return;
}
- /* Handle aligned access or unaligned access in the same page. */
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-
haddr = addr + env->tlb_table[mmu_idx][index].addend;
glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
}
#endif /* DATA_SIZE > 1 */
-void
-glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
- DATA_TYPE val, int mmu_idx)
-{
- TCGMemOpIdx oi = make_memop_idx(SHIFT, mmu_idx);
- helper_te_st_name(env, addr, val, oi, GETRA());
-}
-
#if DATA_SIZE == 1
/* Probe for whether the specified guest write access is permitted.
* If it is not permitted then an exception will be taken in the same
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
/* TLB entry is for a different page */
- if (!VICTIM_TLB_HIT(addr_write)) {
+ if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
}
}