2 * Common CPU TLB handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/address-spaces.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
31 #include "qemu/error-report.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/atomic.h"
35 #include "qemu/atomic128.h"
37 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
38 /* #define DEBUG_TLB */
39 /* #define DEBUG_TLB_LOG */
42 # define DEBUG_TLB_GATE 1
44 # define DEBUG_TLB_LOG_GATE 1
46 # define DEBUG_TLB_LOG_GATE 0
49 # define DEBUG_TLB_GATE 0
50 # define DEBUG_TLB_LOG_GATE 0
53 #define tlb_debug(fmt, ...) do { \
54 if (DEBUG_TLB_LOG_GATE) { \
55 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
57 } else if (DEBUG_TLB_GATE) { \
58 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
62 #define assert_cpu_is_self(cpu) do { \
63 if (DEBUG_TLB_GATE) { \
64 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
68 /* run_on_cpu_data.target_ptr should always be big enough for a
69 * target_ulong even on 32 bit builds */
70 QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
72 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
74 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
75 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
77 static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
79 return env->tlb_mask[mmu_idx] + (1 << CPU_TLB_ENTRY_BITS);
82 static void tlb_window_reset(CPUTLBWindow *window, int64_t ns,
85 window->begin_ns = ns;
86 window->max_entries = max_entries;
89 static void tlb_dyn_init(CPUArchState *env)
93 for (i = 0; i < NB_MMU_MODES; i++) {
94 CPUTLBDesc *desc = &env->tlb_d[i];
95 size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
97 tlb_window_reset(&desc->window, get_clock_realtime(), 0);
98 desc->n_used_entries = 0;
99 env->tlb_mask[i] = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
100 env->tlb_table[i] = g_new(CPUTLBEntry, n_entries);
101 env->iotlb[i] = g_new(CPUIOTLBEntry, n_entries);
106 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
107 * @env: CPU that owns the TLB
108 * @mmu_idx: MMU index of the TLB
110 * Called with tlb_lock_held.
112 * We have two main constraints when resizing a TLB: (1) we only resize it
113 * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
114 * the array or unnecessarily flushing it), which means we do not control how
115 * frequently the resizing can occur; (2) we don't have access to the guest's
116 * future scheduling decisions, and therefore have to decide the magnitude of
117 * the resize based on past observations.
119 * In general, a memory-hungry process can benefit greatly from an appropriately
120 * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
121 * we just have to make the TLB as large as possible; while an oversized TLB
122 * results in minimal TLB miss rates, it also takes longer to be flushed
123 * (flushes can be _very_ frequent), and the reduced locality can also hurt
126 * To achieve near-optimal performance for all kinds of workloads, we:
128 * 1. Aggressively increase the size of the TLB when the use rate of the
129 * TLB being flushed is high, since it is likely that in the near future this
130 * memory-hungry process will execute again, and its memory hungriness will
131 * probably be similar.
133 * 2. Slowly reduce the size of the TLB as the use rate declines over a
134 * reasonably large time window. The rationale is that if in such a time window
135 * we have not observed a high TLB use rate, it is likely that we won't observe
136 * it in the near future. In that case, once a time window expires we downsize
137 * the TLB to match the maximum use rate observed in the window.
139 * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
140 * since in that range performance is likely near-optimal. Recall that the TLB
141 * is direct mapped, so we want the use rate to be low (or at least not too
142 * high), since otherwise we are likely to have a significant amount of
145 static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
147 CPUTLBDesc *desc = &env->tlb_d[mmu_idx];
148 size_t old_size = tlb_n_entries(env, mmu_idx);
150 size_t new_size = old_size;
151 int64_t now = get_clock_realtime();
152 int64_t window_len_ms = 100;
153 int64_t window_len_ns = window_len_ms * 1000 * 1000;
154 bool window_expired = now > desc->window.begin_ns + window_len_ns;
156 if (desc->n_used_entries > desc->window.max_entries) {
157 desc->window.max_entries = desc->n_used_entries;
159 rate = desc->window.max_entries * 100 / old_size;
162 new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
163 } else if (rate < 30 && window_expired) {
164 size_t ceil = pow2ceil(desc->window.max_entries);
165 size_t expected_rate = desc->window.max_entries * 100 / ceil;
168 * Avoid undersizing when the max number of entries seen is just below
169 * a pow2. For instance, if max_entries == 1025, the expected use rate
170 * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
171 * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
172 * later. Thus, make sure that the expected use rate remains below 70%.
173 * (and since we double the size, that means the lowest rate we'd
174 * expect to get is 35%, which is still in the 30-70% range where
175 * we consider that the size is appropriate.)
177 if (expected_rate > 70) {
180 new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
183 if (new_size == old_size) {
184 if (window_expired) {
185 tlb_window_reset(&desc->window, now, desc->n_used_entries);
190 g_free(env->tlb_table[mmu_idx]);
191 g_free(env->iotlb[mmu_idx]);
193 tlb_window_reset(&desc->window, now, 0);
194 /* desc->n_used_entries is cleared by the caller */
195 env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS;
196 env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size);
197 env->iotlb[mmu_idx] = g_try_new(CPUIOTLBEntry, new_size);
199 * If the allocations fail, try smaller sizes. We just freed some
200 * memory, so going back to half of new_size has a good chance of working.
201 * Increased memory pressure elsewhere in the system might cause the
202 * allocations to fail though, so we progressively reduce the allocation
203 * size, aborting if we cannot even allocate the smallest TLB we support.
205 while (env->tlb_table[mmu_idx] == NULL || env->iotlb[mmu_idx] == NULL) {
206 if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
207 error_report("%s: %s", __func__, strerror(errno));
210 new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
211 env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS;
213 g_free(env->tlb_table[mmu_idx]);
214 g_free(env->iotlb[mmu_idx]);
215 env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size);
216 env->iotlb[mmu_idx] = g_try_new(CPUIOTLBEntry, new_size);
220 static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
222 tlb_mmu_resize_locked(env, mmu_idx);
223 memset(env->tlb_table[mmu_idx], -1, sizeof_tlb(env, mmu_idx));
224 env->tlb_d[mmu_idx].n_used_entries = 0;
227 static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
229 env->tlb_d[mmu_idx].n_used_entries++;
232 static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
234 env->tlb_d[mmu_idx].n_used_entries--;
237 void tlb_init(CPUState *cpu)
239 CPUArchState *env = cpu->env_ptr;
241 qemu_spin_init(&env->tlb_c.lock);
243 /* Ensure that cpu_reset performs a full flush. */
244 env->tlb_c.dirty = ALL_MMUIDX_BITS;
249 /* flush_all_helper: run fn across all cpus
251 * If the wait flag is set then the src cpu's helper will be queued as
252 * "safe" work and the loop exited creating a synchronisation point
253 * where all queued work will be finished before execution starts
256 static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
263 async_run_on_cpu(cpu, fn, d);
268 void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
271 size_t full = 0, part = 0, elide = 0;
274 CPUArchState *env = cpu->env_ptr;
276 full += atomic_read(&env->tlb_c.full_flush_count);
277 part += atomic_read(&env->tlb_c.part_flush_count);
278 elide += atomic_read(&env->tlb_c.elide_flush_count);
285 static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
287 tlb_table_flush_by_mmuidx(env, mmu_idx);
288 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
289 env->tlb_d[mmu_idx].large_page_addr = -1;
290 env->tlb_d[mmu_idx].large_page_mask = -1;
291 env->tlb_d[mmu_idx].vindex = 0;
294 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
296 CPUArchState *env = cpu->env_ptr;
297 uint16_t asked = data.host_int;
298 uint16_t all_dirty, work, to_clean;
300 assert_cpu_is_self(cpu);
302 tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
304 qemu_spin_lock(&env->tlb_c.lock);
306 all_dirty = env->tlb_c.dirty;
307 to_clean = asked & all_dirty;
308 all_dirty &= ~to_clean;
309 env->tlb_c.dirty = all_dirty;
311 for (work = to_clean; work != 0; work &= work - 1) {
312 int mmu_idx = ctz32(work);
313 tlb_flush_one_mmuidx_locked(env, mmu_idx);
316 qemu_spin_unlock(&env->tlb_c.lock);
318 cpu_tb_jmp_cache_clear(cpu);
320 if (to_clean == ALL_MMUIDX_BITS) {
321 atomic_set(&env->tlb_c.full_flush_count,
322 env->tlb_c.full_flush_count + 1);
324 atomic_set(&env->tlb_c.part_flush_count,
325 env->tlb_c.part_flush_count + ctpop16(to_clean));
326 if (to_clean != asked) {
327 atomic_set(&env->tlb_c.elide_flush_count,
328 env->tlb_c.elide_flush_count +
329 ctpop16(asked & ~to_clean));
334 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
336 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
338 if (cpu->created && !qemu_cpu_is_self(cpu)) {
339 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
340 RUN_ON_CPU_HOST_INT(idxmap));
342 tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
346 void tlb_flush(CPUState *cpu)
348 tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
351 void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
353 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
355 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
357 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
358 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
361 void tlb_flush_all_cpus(CPUState *src_cpu)
363 tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
366 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
368 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
370 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
372 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
373 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
376 void tlb_flush_all_cpus_synced(CPUState *src_cpu)
378 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
381 static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
384 return tlb_hit_page(tlb_entry->addr_read, page) ||
385 tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
386 tlb_hit_page(tlb_entry->addr_code, page);
390 * tlb_entry_is_empty - return true if the entry is not in use
391 * @te: pointer to CPUTLBEntry
393 static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
395 return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
398 /* Called with tlb_c.lock held */
399 static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
402 if (tlb_hit_page_anyprot(tlb_entry, page)) {
403 memset(tlb_entry, -1, sizeof(*tlb_entry));
409 /* Called with tlb_c.lock held */
410 static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
415 assert_cpu_is_self(ENV_GET_CPU(env));
416 for (k = 0; k < CPU_VTLB_SIZE; k++) {
417 if (tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page)) {
418 tlb_n_used_entries_dec(env, mmu_idx);
423 static void tlb_flush_page_locked(CPUArchState *env, int midx,
426 target_ulong lp_addr = env->tlb_d[midx].large_page_addr;
427 target_ulong lp_mask = env->tlb_d[midx].large_page_mask;
429 /* Check if we need to flush due to large pages. */
430 if ((page & lp_mask) == lp_addr) {
431 tlb_debug("forcing full flush midx %d ("
432 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
433 midx, lp_addr, lp_mask);
434 tlb_flush_one_mmuidx_locked(env, midx);
436 if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
437 tlb_n_used_entries_dec(env, midx);
439 tlb_flush_vtlb_page_locked(env, midx, page);
443 /* As we are going to hijack the bottom bits of the page address for a
444 * mmuidx bit mask we need to fail to build if we can't do that
446 QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
448 static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
449 run_on_cpu_data data)
451 CPUArchState *env = cpu->env_ptr;
452 target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
453 target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
454 unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
457 assert_cpu_is_self(cpu);
459 tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
460 addr, mmu_idx_bitmap);
462 qemu_spin_lock(&env->tlb_c.lock);
463 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
464 if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
465 tlb_flush_page_locked(env, mmu_idx, addr);
468 qemu_spin_unlock(&env->tlb_c.lock);
470 tb_flush_jmp_cache(cpu, addr);
473 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
475 target_ulong addr_and_mmu_idx;
477 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
479 /* This should already be page aligned */
480 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
481 addr_and_mmu_idx |= idxmap;
483 if (!qemu_cpu_is_self(cpu)) {
484 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
485 RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
487 tlb_flush_page_by_mmuidx_async_work(
488 cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
492 void tlb_flush_page(CPUState *cpu, target_ulong addr)
494 tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
497 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
500 const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
501 target_ulong addr_and_mmu_idx;
503 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
505 /* This should already be page aligned */
506 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
507 addr_and_mmu_idx |= idxmap;
509 flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
510 fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
513 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
515 tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
518 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
522 const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
523 target_ulong addr_and_mmu_idx;
525 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
527 /* This should already be page aligned */
528 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
529 addr_and_mmu_idx |= idxmap;
531 flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
532 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
535 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
537 tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
540 /* update the TLBs so that writes to code in the virtual page 'addr'
542 void tlb_protect_code(ram_addr_t ram_addr)
544 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
548 /* update the TLB so that writes in physical page 'phys_addr' are no longer
549 tested for self modifying code */
550 void tlb_unprotect_code(ram_addr_t ram_addr)
552 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
557 * Dirty write flag handling
559 * When the TCG code writes to a location it looks up the address in
560 * the TLB and uses that data to compute the final address. If any of
561 * the lower bits of the address are set then the slow path is forced.
562 * There are a number of reasons to do this but for normal RAM the
563 * most usual is detecting writes to code regions which may invalidate
566 * Other vCPUs might be reading their TLBs during guest execution, so we update
567 * te->addr_write with atomic_set. We don't need to worry about this for
568 * oversized guests as MTTCG is disabled for them.
570 * Called with tlb_c.lock held.
572 static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
573 uintptr_t start, uintptr_t length)
575 uintptr_t addr = tlb_entry->addr_write;
577 if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
578 addr &= TARGET_PAGE_MASK;
579 addr += tlb_entry->addend;
580 if ((addr - start) < length) {
581 #if TCG_OVERSIZED_GUEST
582 tlb_entry->addr_write |= TLB_NOTDIRTY;
584 atomic_set(&tlb_entry->addr_write,
585 tlb_entry->addr_write | TLB_NOTDIRTY);
592 * Called with tlb_c.lock held.
593 * Called only from the vCPU context, i.e. the TLB's owner thread.
595 static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
600 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
602 * We must take tlb_c.lock to avoid racing with another vCPU update. The only
603 * thing actually updated is the target TLB entry ->addr_write flags.
605 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
612 qemu_spin_lock(&env->tlb_c.lock);
613 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
615 unsigned int n = tlb_n_entries(env, mmu_idx);
617 for (i = 0; i < n; i++) {
618 tlb_reset_dirty_range_locked(&env->tlb_table[mmu_idx][i], start1,
622 for (i = 0; i < CPU_VTLB_SIZE; i++) {
623 tlb_reset_dirty_range_locked(&env->tlb_v_table[mmu_idx][i], start1,
627 qemu_spin_unlock(&env->tlb_c.lock);
630 /* Called with tlb_c.lock held */
631 static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
634 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
635 tlb_entry->addr_write = vaddr;
639 /* update the TLB corresponding to virtual page vaddr
640 so that it is no longer dirty */
641 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
643 CPUArchState *env = cpu->env_ptr;
646 assert_cpu_is_self(cpu);
648 vaddr &= TARGET_PAGE_MASK;
649 qemu_spin_lock(&env->tlb_c.lock);
650 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
651 tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
654 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
656 for (k = 0; k < CPU_VTLB_SIZE; k++) {
657 tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr);
660 qemu_spin_unlock(&env->tlb_c.lock);
663 /* Our TLB does not support large pages, so remember the area covered by
664 large pages and trigger a full TLB flush if these are invalidated. */
665 static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
666 target_ulong vaddr, target_ulong size)
668 target_ulong lp_addr = env->tlb_d[mmu_idx].large_page_addr;
669 target_ulong lp_mask = ~(size - 1);
671 if (lp_addr == (target_ulong)-1) {
672 /* No previous large page. */
675 /* Extend the existing region to include the new page.
676 This is a compromise between unnecessary flushes and
677 the cost of maintaining a full variable size TLB. */
678 lp_mask &= env->tlb_d[mmu_idx].large_page_mask;
679 while (((lp_addr ^ vaddr) & lp_mask) != 0) {
683 env->tlb_d[mmu_idx].large_page_addr = lp_addr & lp_mask;
684 env->tlb_d[mmu_idx].large_page_mask = lp_mask;
687 /* Add a new TLB entry. At most one entry for a given virtual address
688 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
689 * supplied size is only used by tlb_flush_page.
691 * Called from TCG-generated code, which is under an RCU read-side
694 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
695 hwaddr paddr, MemTxAttrs attrs, int prot,
696 int mmu_idx, target_ulong size)
698 CPUArchState *env = cpu->env_ptr;
699 MemoryRegionSection *section;
701 target_ulong address;
702 target_ulong code_address;
705 hwaddr iotlb, xlat, sz, paddr_page;
706 target_ulong vaddr_page;
707 int asidx = cpu_asidx_from_attrs(cpu, attrs);
709 assert_cpu_is_self(cpu);
711 if (size <= TARGET_PAGE_SIZE) {
712 sz = TARGET_PAGE_SIZE;
714 tlb_add_large_page(env, mmu_idx, vaddr, size);
717 vaddr_page = vaddr & TARGET_PAGE_MASK;
718 paddr_page = paddr & TARGET_PAGE_MASK;
720 section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
721 &xlat, &sz, attrs, &prot);
722 assert(sz >= TARGET_PAGE_SIZE);
724 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
726 vaddr, paddr, prot, mmu_idx);
728 address = vaddr_page;
729 if (size < TARGET_PAGE_SIZE) {
731 * Slow-path the TLB entries; we will repeat the MMU check and TLB
732 * fill on every access.
734 address |= TLB_RECHECK;
736 if (!memory_region_is_ram(section->mr) &&
737 !memory_region_is_romd(section->mr)) {
742 /* TLB_MMIO for rom/romd handled below */
743 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
746 code_address = address;
747 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
748 paddr_page, xlat, prot, &address);
750 index = tlb_index(env, mmu_idx, vaddr_page);
751 te = tlb_entry(env, mmu_idx, vaddr_page);
754 * Hold the TLB lock for the rest of the function. We could acquire/release
755 * the lock several times in the function, but it is faster to amortize the
756 * acquisition cost by acquiring it just once. Note that this leads to
757 * a longer critical section, but this is not a concern since the TLB lock
758 * is unlikely to be contended.
760 qemu_spin_lock(&env->tlb_c.lock);
762 /* Note that the tlb is no longer clean. */
763 env->tlb_c.dirty |= 1 << mmu_idx;
765 /* Make sure there's no cached translation for the new page. */
766 tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
769 * Only evict the old entry to the victim tlb if it's for a
770 * different page; otherwise just overwrite the stale data.
772 if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
773 unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE;
774 CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
776 /* Evict the old entry into the victim tlb. */
777 copy_tlb_helper_locked(tv, te);
778 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
779 tlb_n_used_entries_dec(env, mmu_idx);
784 * At this point iotlb contains a physical section number in the lower
785 * TARGET_PAGE_BITS, and either
786 * + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
787 * + the offset within section->mr of the page base (otherwise)
788 * We subtract the vaddr_page (which is page aligned and thus won't
789 * disturb the low bits) to give an offset which can be added to the
790 * (non-page-aligned) vaddr of the eventual memory access to get
791 * the MemoryRegion offset for the access. Note that the vaddr we
792 * subtract here is that of the page base, and not the same as the
793 * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
795 env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page;
796 env->iotlb[mmu_idx][index].attrs = attrs;
798 /* Now calculate the new entry */
799 tn.addend = addend - vaddr_page;
800 if (prot & PAGE_READ) {
801 tn.addr_read = address;
806 if (prot & PAGE_EXEC) {
807 tn.addr_code = code_address;
813 if (prot & PAGE_WRITE) {
814 if ((memory_region_is_ram(section->mr) && section->readonly)
815 || memory_region_is_romd(section->mr)) {
816 /* Write access calls the I/O callback. */
817 tn.addr_write = address | TLB_MMIO;
818 } else if (memory_region_is_ram(section->mr)
819 && cpu_physical_memory_is_clean(
820 memory_region_get_ram_addr(section->mr) + xlat)) {
821 tn.addr_write = address | TLB_NOTDIRTY;
823 tn.addr_write = address;
825 if (prot & PAGE_WRITE_INV) {
826 tn.addr_write |= TLB_INVALID_MASK;
830 copy_tlb_helper_locked(te, &tn);
831 tlb_n_used_entries_inc(env, mmu_idx);
832 qemu_spin_unlock(&env->tlb_c.lock);
835 /* Add a new TLB entry, but without specifying the memory
836 * transaction attributes to be used.
838 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
839 hwaddr paddr, int prot,
840 int mmu_idx, target_ulong size)
842 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
843 prot, mmu_idx, size);
846 static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
850 ram_addr = qemu_ram_addr_from_host(ptr);
851 if (ram_addr == RAM_ADDR_INVALID) {
852 error_report("Bad ram pointer %p", ptr);
858 static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
859 int mmu_idx, target_ulong addr, uintptr_t retaddr,
860 MMUAccessType access_type, int size)
862 CPUState *cpu = ENV_GET_CPU(env);
864 MemoryRegionSection *section;
870 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
872 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
873 cpu->mem_io_pc = retaddr;
874 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
875 cpu_io_recompile(cpu, retaddr);
878 cpu->mem_io_vaddr = addr;
879 cpu->mem_io_access_type = access_type;
881 if (mr->global_locking && !qemu_mutex_iothread_locked()) {
882 qemu_mutex_lock_iothread();
885 r = memory_region_dispatch_read(mr, mr_offset,
886 &val, size, iotlbentry->attrs);
888 hwaddr physaddr = mr_offset +
889 section->offset_within_address_space -
890 section->offset_within_region;
892 cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
893 mmu_idx, iotlbentry->attrs, r, retaddr);
896 qemu_mutex_unlock_iothread();
902 static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
903 int mmu_idx, uint64_t val, target_ulong addr,
904 uintptr_t retaddr, int size)
906 CPUState *cpu = ENV_GET_CPU(env);
908 MemoryRegionSection *section;
913 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
915 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
916 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
917 cpu_io_recompile(cpu, retaddr);
919 cpu->mem_io_vaddr = addr;
920 cpu->mem_io_pc = retaddr;
922 if (mr->global_locking && !qemu_mutex_iothread_locked()) {
923 qemu_mutex_lock_iothread();
926 r = memory_region_dispatch_write(mr, mr_offset,
927 val, size, iotlbentry->attrs);
929 hwaddr physaddr = mr_offset +
930 section->offset_within_address_space -
931 section->offset_within_region;
933 cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
934 mmu_idx, iotlbentry->attrs, r, retaddr);
937 qemu_mutex_unlock_iothread();
941 /* Return true if ADDR is present in the victim tlb, and has been copied
942 back to the main tlb. */
943 static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
944 size_t elt_ofs, target_ulong page)
948 assert_cpu_is_self(ENV_GET_CPU(env));
949 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
950 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
953 /* elt_ofs might correspond to .addr_write, so use atomic_read */
954 #if TCG_OVERSIZED_GUEST
955 cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
957 cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
961 /* Found entry in victim tlb, swap tlb and iotlb. */
962 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
964 qemu_spin_lock(&env->tlb_c.lock);
965 copy_tlb_helper_locked(&tmptlb, tlb);
966 copy_tlb_helper_locked(tlb, vtlb);
967 copy_tlb_helper_locked(vtlb, &tmptlb);
968 qemu_spin_unlock(&env->tlb_c.lock);
970 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
971 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
972 tmpio = *io; *io = *vio; *vio = tmpio;
979 /* Macro to call the above, with local variables from the use context. */
980 #define VICTIM_TLB_HIT(TY, ADDR) \
981 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
982 (ADDR) & TARGET_PAGE_MASK)
984 /* NOTE: this function can trigger an exception */
985 /* NOTE2: the returned address is not exactly the physical address: it
986 * is actually a ram_addr_t (in system mode; the user mode emulation
987 * version of this function returns a guest virtual address).
989 tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
991 uintptr_t mmu_idx = cpu_mmu_index(env, true);
992 uintptr_t index = tlb_index(env, mmu_idx, addr);
993 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
996 if (unlikely(!tlb_hit(entry->addr_code, addr))) {
997 if (!VICTIM_TLB_HIT(addr_code, addr)) {
998 tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
999 index = tlb_index(env, mmu_idx, addr);
1000 entry = tlb_entry(env, mmu_idx, addr);
1002 assert(tlb_hit(entry->addr_code, addr));
1005 if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) {
1007 * Return -1 if we can't translate and execute from an entire
1008 * page of RAM here, which will cause us to execute by loading
1009 * and translating one insn at a time, without caching:
1010 * - TLB_RECHECK: means the MMU protection covers a smaller range
1011 * than a target page, so we must redo the MMU check every insn
1012 * - TLB_MMIO: region is not backed by RAM
1017 p = (void *)((uintptr_t)addr + entry->addend);
1018 return qemu_ram_addr_from_host_nofail(p);
1021 /* Probe for whether the specified guest write access is permitted.
1022 * If it is not permitted then an exception will be taken in the same
1023 * way as if this were a real write access (and we will not return).
1024 * Otherwise the function will return, and there will be a valid
1025 * entry in the TLB for this access.
1027 void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
1030 uintptr_t index = tlb_index(env, mmu_idx, addr);
1031 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1033 if (!tlb_hit(tlb_addr_write(entry), addr)) {
1034 /* TLB entry is for a different page */
1035 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1036 tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
1042 /* Probe for a read-modify-write atomic operation. Do not allow unaligned
1043 * operations, or io operations to proceed. Return the host address. */
1044 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1045 TCGMemOpIdx oi, uintptr_t retaddr,
1048 size_t mmu_idx = get_mmuidx(oi);
1049 uintptr_t index = tlb_index(env, mmu_idx, addr);
1050 CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1051 target_ulong tlb_addr = tlb_addr_write(tlbe);
1052 TCGMemOp mop = get_memop(oi);
1053 int a_bits = get_alignment_bits(mop);
1054 int s_bits = mop & MO_SIZE;
1057 /* Adjust the given return address. */
1058 retaddr -= GETPC_ADJ;
1060 /* Enforce guest required alignment. */
1061 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1062 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1063 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
1067 /* Enforce qemu required alignment. */
1068 if (unlikely(addr & ((1 << s_bits) - 1))) {
1069 /* We get here if guest alignment was not requested,
1070 or was not enforced by cpu_unaligned_access above.
1071 We might widen the access and emulate, but for now
1072 mark an exception and exit the cpu loop. */
1073 goto stop_the_world;
1076 /* Check TLB entry and enforce page permissions. */
1077 if (!tlb_hit(tlb_addr, addr)) {
1078 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1079 tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
1081 index = tlb_index(env, mmu_idx, addr);
1082 tlbe = tlb_entry(env, mmu_idx, addr);
1084 tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1087 /* Notice an IO access or a needs-MMU-lookup access */
1088 if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) {
1089 /* There's really nothing that can be done to
1090 support this apart from stop-the-world. */
1091 goto stop_the_world;
1094 /* Let the guest notice RMW on a write-only page. */
1095 if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1096 tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD,
1098 /* Since we don't support reads and writes to different addresses,
1099 and we do have the proper page loaded for write, this shouldn't
1100 ever return. But just in case, handle via stop-the-world. */
1101 goto stop_the_world;
1104 hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1106 ndi->active = false;
1107 if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1109 memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr,
1110 qemu_ram_addr_from_host_nofail(hostaddr),
1117 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
1120 #ifdef TARGET_WORDS_BIGENDIAN
1121 #define NEED_BE_BSWAP 0
1122 #define NEED_LE_BSWAP 1
1124 #define NEED_BE_BSWAP 1
1125 #define NEED_LE_BSWAP 0
1131 * This should all dead code away depending on the build host and
1135 static inline uint64_t handle_bswap(uint64_t val, int size, bool big_endian)
1137 if ((big_endian && NEED_BE_BSWAP) || (!big_endian && NEED_LE_BSWAP)) {
1140 case 2: return bswap16(val);
1141 case 4: return bswap32(val);
1142 case 8: return bswap64(val);
1144 g_assert_not_reached();
1154 * We support two different access types. SOFTMMU_CODE_ACCESS is
1155 * specifically for reading instructions from system memory. It is
1156 * called by the translation loop and in some helpers where the code
1157 * is disassembled. It shouldn't be called directly by guest code.
1160 static uint64_t load_helper(CPUArchState *env, target_ulong addr,
1161 TCGMemOpIdx oi, uintptr_t retaddr,
1162 size_t size, bool big_endian,
1165 uintptr_t mmu_idx = get_mmuidx(oi);
1166 uintptr_t index = tlb_index(env, mmu_idx, addr);
1167 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1168 target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1169 const size_t tlb_off = code_read ?
1170 offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1171 const MMUAccessType access_type =
1172 code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1173 unsigned a_bits = get_alignment_bits(get_memop(oi));
1177 /* Handle CPU specific unaligned behaviour */
1178 if (addr & ((1 << a_bits) - 1)) {
1179 cpu_unaligned_access(ENV_GET_CPU(env), addr, access_type,
1183 /* If the TLB entry is for a different page, reload and try again. */
1184 if (!tlb_hit(tlb_addr, addr)) {
1185 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1186 addr & TARGET_PAGE_MASK)) {
1187 tlb_fill(ENV_GET_CPU(env), addr, size,
1188 access_type, mmu_idx, retaddr);
1189 index = tlb_index(env, mmu_idx, addr);
1190 entry = tlb_entry(env, mmu_idx, addr);
1192 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1195 /* Handle an IO access. */
1196 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1197 if ((addr & (size - 1)) != 0) {
1198 goto do_unaligned_access;
1201 if (tlb_addr & TLB_RECHECK) {
1203 * This is a TLB_RECHECK access, where the MMU protection
1204 * covers a smaller range than a target page, and we must
1205 * repeat the MMU check here. This tlb_fill() call might
1206 * longjump out if this access should cause a guest exception.
1208 tlb_fill(ENV_GET_CPU(env), addr, size,
1209 access_type, mmu_idx, retaddr);
1210 index = tlb_index(env, mmu_idx, addr);
1211 entry = tlb_entry(env, mmu_idx, addr);
1213 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1214 tlb_addr &= ~TLB_RECHECK;
1215 if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1217 goto do_aligned_access;
1221 res = io_readx(env, &env->iotlb[mmu_idx][index], mmu_idx, addr,
1222 retaddr, access_type, size);
1223 return handle_bswap(res, size, big_endian);
1226 /* Handle slow unaligned access (it spans two pages or IO). */
1228 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1229 >= TARGET_PAGE_SIZE)) {
1230 target_ulong addr1, addr2;
1231 tcg_target_ulong r1, r2;
1233 do_unaligned_access:
1234 addr1 = addr & ~(size - 1);
1235 addr2 = addr1 + size;
1236 r1 = load_helper(env, addr1, oi, retaddr, size, big_endian, code_read);
1237 r2 = load_helper(env, addr2, oi, retaddr, size, big_endian, code_read);
1238 shift = (addr & (size - 1)) * 8;
1241 /* Big-endian combine. */
1242 res = (r1 << shift) | (r2 >> ((size * 8) - shift));
1244 /* Little-endian combine. */
1245 res = (r1 >> shift) | (r2 << ((size * 8) - shift));
1247 return res & MAKE_64BIT_MASK(0, size * 8);
1251 haddr = (void *)((uintptr_t)addr + entry->addend);
1254 res = ldub_p(haddr);
1258 res = lduw_be_p(haddr);
1260 res = lduw_le_p(haddr);
1265 res = (uint32_t)ldl_be_p(haddr);
1267 res = (uint32_t)ldl_le_p(haddr);
1272 res = ldq_be_p(haddr);
1274 res = ldq_le_p(haddr);
1278 g_assert_not_reached();
1285 * For the benefit of TCG generated code, we want to avoid the
1286 * complication of ABI-specific return type promotion and always
1287 * return a value extended to the register size of the host. This is
1288 * tcg_target_long, except in the case of a 32-bit host and 64-bit
1289 * data, and for that we always have uint64_t.
1291 * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1294 tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1295 TCGMemOpIdx oi, uintptr_t retaddr)
1297 return load_helper(env, addr, oi, retaddr, 1, false, false);
1300 tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1301 TCGMemOpIdx oi, uintptr_t retaddr)
1303 return load_helper(env, addr, oi, retaddr, 2, false, false);
1306 tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1307 TCGMemOpIdx oi, uintptr_t retaddr)
1309 return load_helper(env, addr, oi, retaddr, 2, true, false);
1312 tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1313 TCGMemOpIdx oi, uintptr_t retaddr)
1315 return load_helper(env, addr, oi, retaddr, 4, false, false);
1318 tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1319 TCGMemOpIdx oi, uintptr_t retaddr)
1321 return load_helper(env, addr, oi, retaddr, 4, true, false);
1324 uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1325 TCGMemOpIdx oi, uintptr_t retaddr)
1327 return load_helper(env, addr, oi, retaddr, 8, false, false);
1330 uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1331 TCGMemOpIdx oi, uintptr_t retaddr)
1333 return load_helper(env, addr, oi, retaddr, 8, true, false);
1337 * Provide signed versions of the load routines as well. We can of course
1338 * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
1342 tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1343 TCGMemOpIdx oi, uintptr_t retaddr)
1345 return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
1348 tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1349 TCGMemOpIdx oi, uintptr_t retaddr)
1351 return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
1354 tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1355 TCGMemOpIdx oi, uintptr_t retaddr)
1357 return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
1360 tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1361 TCGMemOpIdx oi, uintptr_t retaddr)
1363 return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
1366 tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1367 TCGMemOpIdx oi, uintptr_t retaddr)
1369 return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
1376 static void store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
1377 TCGMemOpIdx oi, uintptr_t retaddr, size_t size,
1380 uintptr_t mmu_idx = get_mmuidx(oi);
1381 uintptr_t index = tlb_index(env, mmu_idx, addr);
1382 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1383 target_ulong tlb_addr = tlb_addr_write(entry);
1384 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
1385 unsigned a_bits = get_alignment_bits(get_memop(oi));
1388 /* Handle CPU specific unaligned behaviour */
1389 if (addr & ((1 << a_bits) - 1)) {
1390 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
1394 /* If the TLB entry is for a different page, reload and try again. */
1395 if (!tlb_hit(tlb_addr, addr)) {
1396 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1397 addr & TARGET_PAGE_MASK)) {
1398 tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
1400 index = tlb_index(env, mmu_idx, addr);
1401 entry = tlb_entry(env, mmu_idx, addr);
1403 tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
1406 /* Handle an IO access. */
1407 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1408 if ((addr & (size - 1)) != 0) {
1409 goto do_unaligned_access;
1412 if (tlb_addr & TLB_RECHECK) {
1414 * This is a TLB_RECHECK access, where the MMU protection
1415 * covers a smaller range than a target page, and we must
1416 * repeat the MMU check here. This tlb_fill() call might
1417 * longjump out if this access should cause a guest exception.
1419 tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
1421 index = tlb_index(env, mmu_idx, addr);
1422 entry = tlb_entry(env, mmu_idx, addr);
1424 tlb_addr = tlb_addr_write(entry);
1425 tlb_addr &= ~TLB_RECHECK;
1426 if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1428 goto do_aligned_access;
1432 io_writex(env, &env->iotlb[mmu_idx][index], mmu_idx,
1433 handle_bswap(val, size, big_endian),
1434 addr, retaddr, size);
1438 /* Handle slow unaligned access (it spans two pages or IO). */
1440 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1441 >= TARGET_PAGE_SIZE)) {
1444 CPUTLBEntry *entry2;
1445 target_ulong page2, tlb_addr2;
1446 do_unaligned_access:
1448 * Ensure the second page is in the TLB. Note that the first page
1449 * is already guaranteed to be filled, and that the second page
1450 * cannot evict the first.
1452 page2 = (addr + size) & TARGET_PAGE_MASK;
1453 index2 = tlb_index(env, mmu_idx, page2);
1454 entry2 = tlb_entry(env, mmu_idx, page2);
1455 tlb_addr2 = tlb_addr_write(entry2);
1456 if (!tlb_hit_page(tlb_addr2, page2)
1457 && !victim_tlb_hit(env, mmu_idx, index2, tlb_off,
1458 page2 & TARGET_PAGE_MASK)) {
1459 tlb_fill(ENV_GET_CPU(env), page2, size, MMU_DATA_STORE,
1464 * XXX: not efficient, but simple.
1465 * This loop must go in the forward direction to avoid issues
1466 * with self-modifying code in Windows 64-bit.
1468 for (i = 0; i < size; ++i) {
1471 /* Big-endian extract. */
1472 val8 = val >> (((size - 1) * 8) - (i * 8));
1474 /* Little-endian extract. */
1475 val8 = val >> (i * 8);
1477 store_helper(env, addr + i, val8, oi, retaddr, 1, big_endian);
1483 haddr = (void *)((uintptr_t)addr + entry->addend);
1490 stw_be_p(haddr, val);
1492 stw_le_p(haddr, val);
1497 stl_be_p(haddr, val);
1499 stl_le_p(haddr, val);
1504 stq_be_p(haddr, val);
1506 stq_le_p(haddr, val);
1510 g_assert_not_reached();
1515 void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1516 TCGMemOpIdx oi, uintptr_t retaddr)
1518 store_helper(env, addr, val, oi, retaddr, 1, false);
1521 void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1522 TCGMemOpIdx oi, uintptr_t retaddr)
1524 store_helper(env, addr, val, oi, retaddr, 2, false);
1527 void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1528 TCGMemOpIdx oi, uintptr_t retaddr)
1530 store_helper(env, addr, val, oi, retaddr, 2, true);
1533 void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1534 TCGMemOpIdx oi, uintptr_t retaddr)
1536 store_helper(env, addr, val, oi, retaddr, 4, false);
1539 void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1540 TCGMemOpIdx oi, uintptr_t retaddr)
1542 store_helper(env, addr, val, oi, retaddr, 4, true);
1545 void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1546 TCGMemOpIdx oi, uintptr_t retaddr)
1548 store_helper(env, addr, val, oi, retaddr, 8, false);
1551 void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1552 TCGMemOpIdx oi, uintptr_t retaddr)
1554 store_helper(env, addr, val, oi, retaddr, 8, true);
1557 /* First set of helpers allows passing in of OI and RETADDR. This makes
1558 them callable from other helpers. */
1560 #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
1561 #define ATOMIC_NAME(X) \
1562 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
1563 #define ATOMIC_MMU_DECLS NotDirtyInfo ndi
1564 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
1565 #define ATOMIC_MMU_CLEANUP \
1567 if (unlikely(ndi.active)) { \
1568 memory_notdirty_write_complete(&ndi); \
1573 #include "atomic_template.h"
1576 #include "atomic_template.h"
1579 #include "atomic_template.h"
1581 #ifdef CONFIG_ATOMIC64
1583 #include "atomic_template.h"
1586 #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
1587 #define DATA_SIZE 16
1588 #include "atomic_template.h"
1591 /* Second set of helpers are directly callable from TCG as helpers. */
1595 #undef ATOMIC_MMU_LOOKUP
1596 #define EXTRA_ARGS , TCGMemOpIdx oi
1597 #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1598 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1601 #include "atomic_template.h"
1604 #include "atomic_template.h"
1607 #include "atomic_template.h"
1609 #ifdef CONFIG_ATOMIC64
1611 #include "atomic_template.h"
1614 /* Code access functions. */
1616 uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
1617 TCGMemOpIdx oi, uintptr_t retaddr)
1619 return load_helper(env, addr, oi, retaddr, 1, false, true);
1622 uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
1623 TCGMemOpIdx oi, uintptr_t retaddr)
1625 return load_helper(env, addr, oi, retaddr, 2, false, true);
1628 uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
1629 TCGMemOpIdx oi, uintptr_t retaddr)
1631 return load_helper(env, addr, oi, retaddr, 2, true, true);
1634 uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
1635 TCGMemOpIdx oi, uintptr_t retaddr)
1637 return load_helper(env, addr, oi, retaddr, 4, false, true);
1640 uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
1641 TCGMemOpIdx oi, uintptr_t retaddr)
1643 return load_helper(env, addr, oi, retaddr, 4, true, true);
1646 uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
1647 TCGMemOpIdx oi, uintptr_t retaddr)
1649 return load_helper(env, addr, oi, retaddr, 8, false, true);
1652 uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
1653 TCGMemOpIdx oi, uintptr_t retaddr)
1655 return load_helper(env, addr, oi, retaddr, 8, true, true);