+ cpu->mem_io_vaddr = addr;
+ cpu->mem_io_pc = retaddr;
+ memory_region_dispatch_write(mr, physaddr, val, 1 << SHIFT,
+ iotlbentry->attrs);
+}
+
+void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ unsigned mmu_idx = get_mmuidx(oi);
+ int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+ unsigned a_bits = get_alignment_bits(get_memop(oi));
+ uintptr_t haddr;
+
+ if (addr & ((1 << a_bits) - 1)) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+
+ /* If the TLB entry is for a different page, reload and try again. */
+ if ((addr & TARGET_PAGE_MASK)
+ != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+ if (!VICTIM_TLB_HIT(addr_write, addr)) {
+ tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
+ }
+ tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+ }
+
+ /* Handle an IO access. */
+ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
+ CPUIOTLBEntry *iotlbentry;
+ if ((addr & (DATA_SIZE - 1)) != 0) {
+ goto do_unaligned_access;
+ }
+ iotlbentry = &env->iotlb[mmu_idx][index];
+
+ /* ??? Note that the io helpers always read data in the target
+ byte ordering. We should push the LE/BE request down into io. */
+ val = TGT_LE(val);
+ glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
+ return;
+ }
+
+ /* Handle slow unaligned access (it spans two pages or IO). */
+ if (DATA_SIZE > 1
+ && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
+ >= TARGET_PAGE_SIZE)) {
+ int i, index2;
+ target_ulong page2, tlb_addr2;
+ do_unaligned_access:
+ /* Ensure the second page is in the TLB. Note that the first page
+ is already guaranteed to be filled, and that the second page
+ cannot evict the first. */
+ page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
+ index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
+ if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
+ && !VICTIM_TLB_HIT(addr_write, page2)) {
+ tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+
+ /* XXX: not efficient, but simple. */
+ /* This loop must go in the forward direction to avoid issues
+ with self-modifying code in Windows 64-bit. */
+ for (i = 0; i < DATA_SIZE; ++i) {
+ /* Little-endian extract. */
+ uint8_t val8 = val >> (i * 8);
+ glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
+ oi, retaddr);
+ }
+ return;
+ }
+
+ haddr = addr + env->tlb_table[mmu_idx][index].addend;
+#if DATA_SIZE == 1
+ glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);