]>
Commit | Line | Data |
---|---|---|
0cac1b66 BS |
1 | /* |
2 | * Common CPU TLB handling | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
fb0343d5 | 9 | * version 2.1 of the License, or (at your option) any later version. |
0cac1b66 BS |
10 | * |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
7b31bbc2 | 20 | #include "qemu/osdep.h" |
8d04fb55 | 21 | #include "qemu/main-loop.h" |
78271684 | 22 | #include "hw/core/tcg-cpu-ops.h" |
022c62cb PB |
23 | #include "exec/exec-all.h" |
24 | #include "exec/memory.h" | |
f08b6170 | 25 | #include "exec/cpu_ldst.h" |
022c62cb | 26 | #include "exec/cputlb.h" |
022c62cb | 27 | #include "exec/memory-internal.h" |
220c3ebd | 28 | #include "exec/ram_addr.h" |
0f590e74 | 29 | #include "tcg/tcg.h" |
d7f30403 PM |
30 | #include "qemu/error-report.h" |
31 | #include "exec/log.h" | |
c482cb11 RH |
32 | #include "exec/helper-proto.h" |
33 | #include "qemu/atomic.h" | |
e6cd4bb5 | 34 | #include "qemu/atomic128.h" |
3b9bd3f4 | 35 | #include "exec/translate-all.h" |
243af022 | 36 | #include "trace/trace-root.h" |
e5ceadff | 37 | #include "tb-hash.h" |
65269192 | 38 | #include "internal.h" |
235537fa AB |
39 | #ifdef CONFIG_PLUGIN |
40 | #include "qemu/plugin-memory.h" | |
41 | #endif | |
d2ba8026 | 42 | #include "tcg/tcg-ldst.h" |
0cac1b66 | 43 | |
8526e1f4 AB |
44 | /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ |
45 | /* #define DEBUG_TLB */ | |
46 | /* #define DEBUG_TLB_LOG */ | |
47 | ||
48 | #ifdef DEBUG_TLB | |
49 | # define DEBUG_TLB_GATE 1 | |
50 | # ifdef DEBUG_TLB_LOG | |
51 | # define DEBUG_TLB_LOG_GATE 1 | |
52 | # else | |
53 | # define DEBUG_TLB_LOG_GATE 0 | |
54 | # endif | |
55 | #else | |
56 | # define DEBUG_TLB_GATE 0 | |
57 | # define DEBUG_TLB_LOG_GATE 0 | |
58 | #endif | |
59 | ||
60 | #define tlb_debug(fmt, ...) do { \ | |
61 | if (DEBUG_TLB_LOG_GATE) { \ | |
62 | qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ | |
63 | ## __VA_ARGS__); \ | |
64 | } else if (DEBUG_TLB_GATE) { \ | |
65 | fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ | |
66 | } \ | |
67 | } while (0) | |
0cac1b66 | 68 | |
ea9025cb | 69 | #define assert_cpu_is_self(cpu) do { \ |
f0aff0f1 | 70 | if (DEBUG_TLB_GATE) { \ |
ea9025cb | 71 | g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ |
f0aff0f1 AB |
72 | } \ |
73 | } while (0) | |
74 | ||
e3b9ca81 FK |
75 | /* run_on_cpu_data.target_ptr should always be big enough for a |
76 | * target_ulong even on 32 bit builds */ | |
77 | QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); | |
78 | ||
e7218445 AB |
79 | /* We currently can't handle more than 16 bits in the MMUIDX bitmask. |
80 | */ | |
81 | QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); | |
82 | #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) | |
83 | ||
722a1c1e | 84 | static inline size_t tlb_n_entries(CPUTLBDescFast *fast) |
7a1efe1b | 85 | { |
722a1c1e | 86 | return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; |
7a1efe1b RH |
87 | } |
88 | ||
722a1c1e | 89 | static inline size_t sizeof_tlb(CPUTLBDescFast *fast) |
86e1eff8 | 90 | { |
722a1c1e | 91 | return fast->mask + (1 << CPU_TLB_ENTRY_BITS); |
86e1eff8 EC |
92 | } |
93 | ||
79e42085 | 94 | static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, |
86e1eff8 EC |
95 | size_t max_entries) |
96 | { | |
79e42085 RH |
97 | desc->window_begin_ns = ns; |
98 | desc->window_max_entries = max_entries; | |
86e1eff8 EC |
99 | } |
100 | ||
0f4abea8 RH |
101 | static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) |
102 | { | |
103 | unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr); | |
104 | ||
105 | for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { | |
106 | qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL); | |
107 | } | |
108 | } | |
109 | ||
110 | static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) | |
111 | { | |
112 | /* Discard jump cache entries for any tb which might potentially | |
113 | overlap the flushed page. */ | |
114 | tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); | |
115 | tb_jmp_cache_clear_page(cpu, addr); | |
116 | } | |
117 | ||
86e1eff8 EC |
118 | /** |
119 | * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary | |
71ccd47b RH |
120 | * @desc: The CPUTLBDesc portion of the TLB |
121 | * @fast: The CPUTLBDescFast portion of the same TLB | |
86e1eff8 EC |
122 | * |
123 | * Called with tlb_lock_held. | |
124 | * | |
125 | * We have two main constraints when resizing a TLB: (1) we only resize it | |
126 | * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing | |
127 | * the array or unnecessarily flushing it), which means we do not control how | |
128 | * frequently the resizing can occur; (2) we don't have access to the guest's | |
129 | * future scheduling decisions, and therefore have to decide the magnitude of | |
130 | * the resize based on past observations. | |
131 | * | |
132 | * In general, a memory-hungry process can benefit greatly from an appropriately | |
133 | * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that | |
134 | * we just have to make the TLB as large as possible; while an oversized TLB | |
135 | * results in minimal TLB miss rates, it also takes longer to be flushed | |
136 | * (flushes can be _very_ frequent), and the reduced locality can also hurt | |
137 | * performance. | |
138 | * | |
139 | * To achieve near-optimal performance for all kinds of workloads, we: | |
140 | * | |
141 | * 1. Aggressively increase the size of the TLB when the use rate of the | |
142 | * TLB being flushed is high, since it is likely that in the near future this | |
143 | * memory-hungry process will execute again, and its memory hungriness will | |
144 | * probably be similar. | |
145 | * | |
146 | * 2. Slowly reduce the size of the TLB as the use rate declines over a | |
147 | * reasonably large time window. The rationale is that if in such a time window | |
148 | * we have not observed a high TLB use rate, it is likely that we won't observe | |
149 | * it in the near future. In that case, once a time window expires we downsize | |
150 | * the TLB to match the maximum use rate observed in the window. | |
151 | * | |
152 | * 3. Try to keep the maximum use rate in a time window in the 30-70% range, | |
153 | * since in that range performance is likely near-optimal. Recall that the TLB | |
154 | * is direct mapped, so we want the use rate to be low (or at least not too | |
155 | * high), since otherwise we are likely to have a significant amount of | |
156 | * conflict misses. | |
157 | */ | |
3c3959f2 RH |
158 | static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, |
159 | int64_t now) | |
86e1eff8 | 160 | { |
71ccd47b | 161 | size_t old_size = tlb_n_entries(fast); |
86e1eff8 EC |
162 | size_t rate; |
163 | size_t new_size = old_size; | |
86e1eff8 EC |
164 | int64_t window_len_ms = 100; |
165 | int64_t window_len_ns = window_len_ms * 1000 * 1000; | |
79e42085 | 166 | bool window_expired = now > desc->window_begin_ns + window_len_ns; |
86e1eff8 | 167 | |
79e42085 RH |
168 | if (desc->n_used_entries > desc->window_max_entries) { |
169 | desc->window_max_entries = desc->n_used_entries; | |
86e1eff8 | 170 | } |
79e42085 | 171 | rate = desc->window_max_entries * 100 / old_size; |
86e1eff8 EC |
172 | |
173 | if (rate > 70) { | |
174 | new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); | |
175 | } else if (rate < 30 && window_expired) { | |
79e42085 RH |
176 | size_t ceil = pow2ceil(desc->window_max_entries); |
177 | size_t expected_rate = desc->window_max_entries * 100 / ceil; | |
86e1eff8 EC |
178 | |
179 | /* | |
180 | * Avoid undersizing when the max number of entries seen is just below | |
181 | * a pow2. For instance, if max_entries == 1025, the expected use rate | |
182 | * would be 1025/2048==50%. However, if max_entries == 1023, we'd get | |
183 | * 1023/1024==99.9% use rate, so we'd likely end up doubling the size | |
184 | * later. Thus, make sure that the expected use rate remains below 70%. | |
185 | * (and since we double the size, that means the lowest rate we'd | |
186 | * expect to get is 35%, which is still in the 30-70% range where | |
187 | * we consider that the size is appropriate.) | |
188 | */ | |
189 | if (expected_rate > 70) { | |
190 | ceil *= 2; | |
191 | } | |
192 | new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); | |
193 | } | |
194 | ||
195 | if (new_size == old_size) { | |
196 | if (window_expired) { | |
79e42085 | 197 | tlb_window_reset(desc, now, desc->n_used_entries); |
86e1eff8 EC |
198 | } |
199 | return; | |
200 | } | |
201 | ||
71ccd47b RH |
202 | g_free(fast->table); |
203 | g_free(desc->iotlb); | |
86e1eff8 | 204 | |
79e42085 | 205 | tlb_window_reset(desc, now, 0); |
86e1eff8 | 206 | /* desc->n_used_entries is cleared by the caller */ |
71ccd47b RH |
207 | fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; |
208 | fast->table = g_try_new(CPUTLBEntry, new_size); | |
209 | desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); | |
210 | ||
86e1eff8 EC |
211 | /* |
212 | * If the allocations fail, try smaller sizes. We just freed some | |
213 | * memory, so going back to half of new_size has a good chance of working. | |
214 | * Increased memory pressure elsewhere in the system might cause the | |
215 | * allocations to fail though, so we progressively reduce the allocation | |
216 | * size, aborting if we cannot even allocate the smallest TLB we support. | |
217 | */ | |
71ccd47b | 218 | while (fast->table == NULL || desc->iotlb == NULL) { |
86e1eff8 EC |
219 | if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { |
220 | error_report("%s: %s", __func__, strerror(errno)); | |
221 | abort(); | |
222 | } | |
223 | new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); | |
71ccd47b | 224 | fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; |
86e1eff8 | 225 | |
71ccd47b RH |
226 | g_free(fast->table); |
227 | g_free(desc->iotlb); | |
228 | fast->table = g_try_new(CPUTLBEntry, new_size); | |
229 | desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); | |
86e1eff8 EC |
230 | } |
231 | } | |
232 | ||
bbf021b0 | 233 | static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) |
86e1eff8 | 234 | { |
5c948e31 RH |
235 | desc->n_used_entries = 0; |
236 | desc->large_page_addr = -1; | |
237 | desc->large_page_mask = -1; | |
238 | desc->vindex = 0; | |
239 | memset(fast->table, -1, sizeof_tlb(fast)); | |
240 | memset(desc->vtable, -1, sizeof(desc->vtable)); | |
86e1eff8 EC |
241 | } |
242 | ||
3c3959f2 RH |
243 | static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx, |
244 | int64_t now) | |
bbf021b0 RH |
245 | { |
246 | CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; | |
247 | CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; | |
248 | ||
3c3959f2 | 249 | tlb_mmu_resize_locked(desc, fast, now); |
bbf021b0 RH |
250 | tlb_mmu_flush_locked(desc, fast); |
251 | } | |
252 | ||
56e89f76 RH |
253 | static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) |
254 | { | |
255 | size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; | |
256 | ||
257 | tlb_window_reset(desc, now, 0); | |
258 | desc->n_used_entries = 0; | |
259 | fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; | |
260 | fast->table = g_new(CPUTLBEntry, n_entries); | |
261 | desc->iotlb = g_new(CPUIOTLBEntry, n_entries); | |
3c16304a | 262 | tlb_mmu_flush_locked(desc, fast); |
56e89f76 RH |
263 | } |
264 | ||
86e1eff8 EC |
265 | static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) |
266 | { | |
a40ec84e | 267 | env_tlb(env)->d[mmu_idx].n_used_entries++; |
86e1eff8 EC |
268 | } |
269 | ||
270 | static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) | |
271 | { | |
a40ec84e | 272 | env_tlb(env)->d[mmu_idx].n_used_entries--; |
86e1eff8 EC |
273 | } |
274 | ||
5005e253 EC |
275 | void tlb_init(CPUState *cpu) |
276 | { | |
71aec354 | 277 | CPUArchState *env = cpu->env_ptr; |
56e89f76 RH |
278 | int64_t now = get_clock_realtime(); |
279 | int i; | |
71aec354 | 280 | |
a40ec84e | 281 | qemu_spin_init(&env_tlb(env)->c.lock); |
3d1523ce | 282 | |
3c16304a RH |
283 | /* All tlbs are initialized flushed. */ |
284 | env_tlb(env)->c.dirty = 0; | |
86e1eff8 | 285 | |
56e89f76 RH |
286 | for (i = 0; i < NB_MMU_MODES; i++) { |
287 | tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now); | |
288 | } | |
5005e253 EC |
289 | } |
290 | ||
816d9be5 EC |
291 | void tlb_destroy(CPUState *cpu) |
292 | { | |
293 | CPUArchState *env = cpu->env_ptr; | |
294 | int i; | |
295 | ||
296 | qemu_spin_destroy(&env_tlb(env)->c.lock); | |
297 | for (i = 0; i < NB_MMU_MODES; i++) { | |
298 | CPUTLBDesc *desc = &env_tlb(env)->d[i]; | |
299 | CPUTLBDescFast *fast = &env_tlb(env)->f[i]; | |
300 | ||
301 | g_free(fast->table); | |
302 | g_free(desc->iotlb); | |
303 | } | |
304 | } | |
305 | ||
c3b9a07a AB |
306 | /* flush_all_helper: run fn across all cpus |
307 | * | |
308 | * If the wait flag is set then the src cpu's helper will be queued as | |
309 | * "safe" work and the loop exited creating a synchronisation point | |
310 | * where all queued work will be finished before execution starts | |
311 | * again. | |
312 | */ | |
313 | static void flush_all_helper(CPUState *src, run_on_cpu_func fn, | |
314 | run_on_cpu_data d) | |
315 | { | |
316 | CPUState *cpu; | |
317 | ||
318 | CPU_FOREACH(cpu) { | |
319 | if (cpu != src) { | |
320 | async_run_on_cpu(cpu, fn, d); | |
321 | } | |
322 | } | |
323 | } | |
324 | ||
e09de0a2 | 325 | void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) |
83974cf4 EC |
326 | { |
327 | CPUState *cpu; | |
e09de0a2 | 328 | size_t full = 0, part = 0, elide = 0; |
83974cf4 EC |
329 | |
330 | CPU_FOREACH(cpu) { | |
331 | CPUArchState *env = cpu->env_ptr; | |
332 | ||
d73415a3 SH |
333 | full += qatomic_read(&env_tlb(env)->c.full_flush_count); |
334 | part += qatomic_read(&env_tlb(env)->c.part_flush_count); | |
335 | elide += qatomic_read(&env_tlb(env)->c.elide_flush_count); | |
83974cf4 | 336 | } |
e09de0a2 RH |
337 | *pfull = full; |
338 | *ppart = part; | |
339 | *pelide = elide; | |
83974cf4 | 340 | } |
0cac1b66 | 341 | |
e7218445 | 342 | static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) |
d7a74a9d PM |
343 | { |
344 | CPUArchState *env = cpu->env_ptr; | |
3d1523ce RH |
345 | uint16_t asked = data.host_int; |
346 | uint16_t all_dirty, work, to_clean; | |
3c3959f2 | 347 | int64_t now = get_clock_realtime(); |
d7a74a9d | 348 | |
f0aff0f1 | 349 | assert_cpu_is_self(cpu); |
d7a74a9d | 350 | |
3d1523ce | 351 | tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); |
e7218445 | 352 | |
a40ec84e | 353 | qemu_spin_lock(&env_tlb(env)->c.lock); |
60a2ad7d | 354 | |
a40ec84e | 355 | all_dirty = env_tlb(env)->c.dirty; |
3d1523ce RH |
356 | to_clean = asked & all_dirty; |
357 | all_dirty &= ~to_clean; | |
a40ec84e | 358 | env_tlb(env)->c.dirty = all_dirty; |
3d1523ce RH |
359 | |
360 | for (work = to_clean; work != 0; work &= work - 1) { | |
361 | int mmu_idx = ctz32(work); | |
3c3959f2 | 362 | tlb_flush_one_mmuidx_locked(env, mmu_idx, now); |
d7a74a9d | 363 | } |
3d1523ce | 364 | |
a40ec84e | 365 | qemu_spin_unlock(&env_tlb(env)->c.lock); |
d7a74a9d | 366 | |
f3ced3c5 | 367 | cpu_tb_jmp_cache_clear(cpu); |
64f2674b | 368 | |
3d1523ce | 369 | if (to_clean == ALL_MMUIDX_BITS) { |
d73415a3 | 370 | qatomic_set(&env_tlb(env)->c.full_flush_count, |
a40ec84e | 371 | env_tlb(env)->c.full_flush_count + 1); |
e09de0a2 | 372 | } else { |
d73415a3 | 373 | qatomic_set(&env_tlb(env)->c.part_flush_count, |
a40ec84e | 374 | env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); |
3d1523ce | 375 | if (to_clean != asked) { |
d73415a3 | 376 | qatomic_set(&env_tlb(env)->c.elide_flush_count, |
a40ec84e | 377 | env_tlb(env)->c.elide_flush_count + |
3d1523ce RH |
378 | ctpop16(asked & ~to_clean)); |
379 | } | |
64f2674b | 380 | } |
d7a74a9d PM |
381 | } |
382 | ||
0336cbf8 | 383 | void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) |
d7a74a9d | 384 | { |
e7218445 AB |
385 | tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); |
386 | ||
64f2674b | 387 | if (cpu->created && !qemu_cpu_is_self(cpu)) { |
ab651105 RH |
388 | async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, |
389 | RUN_ON_CPU_HOST_INT(idxmap)); | |
e7218445 | 390 | } else { |
60a2ad7d | 391 | tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); |
e7218445 | 392 | } |
d7a74a9d PM |
393 | } |
394 | ||
64f2674b RH |
395 | void tlb_flush(CPUState *cpu) |
396 | { | |
397 | tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); | |
398 | } | |
399 | ||
c3b9a07a AB |
400 | void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) |
401 | { | |
402 | const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; | |
403 | ||
404 | tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); | |
405 | ||
406 | flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); | |
407 | fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); | |
408 | } | |
409 | ||
64f2674b RH |
410 | void tlb_flush_all_cpus(CPUState *src_cpu) |
411 | { | |
412 | tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); | |
413 | } | |
414 | ||
415 | void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) | |
c3b9a07a AB |
416 | { |
417 | const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; | |
418 | ||
419 | tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); | |
420 | ||
421 | flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); | |
422 | async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); | |
423 | } | |
424 | ||
64f2674b RH |
425 | void tlb_flush_all_cpus_synced(CPUState *src_cpu) |
426 | { | |
427 | tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); | |
428 | } | |
429 | ||
3ab6e68c RH |
430 | static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry, |
431 | target_ulong page, target_ulong mask) | |
432 | { | |
433 | page &= mask; | |
434 | mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK; | |
435 | ||
436 | return (page == (tlb_entry->addr_read & mask) || | |
437 | page == (tlb_addr_write(tlb_entry) & mask) || | |
438 | page == (tlb_entry->addr_code & mask)); | |
439 | } | |
440 | ||
68fea038 RH |
441 | static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, |
442 | target_ulong page) | |
443 | { | |
3ab6e68c | 444 | return tlb_hit_page_mask_anyprot(tlb_entry, page, -1); |
68fea038 | 445 | } |
c3b9a07a | 446 | |
3cea94bb EC |
447 | /** |
448 | * tlb_entry_is_empty - return true if the entry is not in use | |
449 | * @te: pointer to CPUTLBEntry | |
450 | */ | |
451 | static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) | |
452 | { | |
453 | return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; | |
454 | } | |
455 | ||
53d28455 | 456 | /* Called with tlb_c.lock held */ |
3ab6e68c RH |
457 | static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry, |
458 | target_ulong page, | |
459 | target_ulong mask) | |
0cac1b66 | 460 | { |
3ab6e68c | 461 | if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) { |
4fadb3bb | 462 | memset(tlb_entry, -1, sizeof(*tlb_entry)); |
86e1eff8 | 463 | return true; |
0cac1b66 | 464 | } |
86e1eff8 | 465 | return false; |
0cac1b66 BS |
466 | } |
467 | ||
3ab6e68c RH |
468 | static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, |
469 | target_ulong page) | |
470 | { | |
471 | return tlb_flush_entry_mask_locked(tlb_entry, page, -1); | |
472 | } | |
473 | ||
53d28455 | 474 | /* Called with tlb_c.lock held */ |
3ab6e68c RH |
475 | static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx, |
476 | target_ulong page, | |
477 | target_ulong mask) | |
68fea038 | 478 | { |
a40ec84e | 479 | CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; |
68fea038 | 480 | int k; |
71aec354 | 481 | |
29a0af61 | 482 | assert_cpu_is_self(env_cpu(env)); |
68fea038 | 483 | for (k = 0; k < CPU_VTLB_SIZE; k++) { |
3ab6e68c | 484 | if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) { |
86e1eff8 EC |
485 | tlb_n_used_entries_dec(env, mmu_idx); |
486 | } | |
68fea038 RH |
487 | } |
488 | } | |
489 | ||
3ab6e68c RH |
490 | static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, |
491 | target_ulong page) | |
492 | { | |
493 | tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1); | |
494 | } | |
495 | ||
1308e026 RH |
496 | static void tlb_flush_page_locked(CPUArchState *env, int midx, |
497 | target_ulong page) | |
498 | { | |
a40ec84e RH |
499 | target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; |
500 | target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; | |
1308e026 RH |
501 | |
502 | /* Check if we need to flush due to large pages. */ | |
503 | if ((page & lp_mask) == lp_addr) { | |
504 | tlb_debug("forcing full flush midx %d (" | |
505 | TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", | |
506 | midx, lp_addr, lp_mask); | |
3c3959f2 | 507 | tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); |
1308e026 | 508 | } else { |
86e1eff8 EC |
509 | if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { |
510 | tlb_n_used_entries_dec(env, midx); | |
511 | } | |
1308e026 RH |
512 | tlb_flush_vtlb_page_locked(env, midx, page); |
513 | } | |
514 | } | |
515 | ||
7b7d00e0 RH |
516 | /** |
517 | * tlb_flush_page_by_mmuidx_async_0: | |
518 | * @cpu: cpu on which to flush | |
519 | * @addr: page of virtual address to flush | |
520 | * @idxmap: set of mmu_idx to flush | |
521 | * | |
522 | * Helper for tlb_flush_page_by_mmuidx and friends, flush one page | |
523 | * at @addr from the tlbs indicated by @idxmap from @cpu. | |
e7218445 | 524 | */ |
7b7d00e0 RH |
525 | static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, |
526 | target_ulong addr, | |
527 | uint16_t idxmap) | |
d7a74a9d PM |
528 | { |
529 | CPUArchState *env = cpu->env_ptr; | |
e7218445 | 530 | int mmu_idx; |
d7a74a9d | 531 | |
f0aff0f1 | 532 | assert_cpu_is_self(cpu); |
d7a74a9d | 533 | |
7b7d00e0 | 534 | tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap); |
d7a74a9d | 535 | |
a40ec84e | 536 | qemu_spin_lock(&env_tlb(env)->c.lock); |
0336cbf8 | 537 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
7b7d00e0 | 538 | if ((idxmap >> mmu_idx) & 1) { |
1308e026 | 539 | tlb_flush_page_locked(env, mmu_idx, addr); |
d7a74a9d PM |
540 | } |
541 | } | |
a40ec84e | 542 | qemu_spin_unlock(&env_tlb(env)->c.lock); |
d7a74a9d | 543 | |
d7a74a9d PM |
544 | tb_flush_jmp_cache(cpu, addr); |
545 | } | |
546 | ||
7b7d00e0 RH |
547 | /** |
548 | * tlb_flush_page_by_mmuidx_async_1: | |
549 | * @cpu: cpu on which to flush | |
550 | * @data: encoded addr + idxmap | |
551 | * | |
552 | * Helper for tlb_flush_page_by_mmuidx and friends, called through | |
553 | * async_run_on_cpu. The idxmap parameter is encoded in the page | |
554 | * offset of the target_ptr field. This limits the set of mmu_idx | |
555 | * that can be passed via this method. | |
556 | */ | |
557 | static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, | |
558 | run_on_cpu_data data) | |
559 | { | |
560 | target_ulong addr_and_idxmap = (target_ulong) data.target_ptr; | |
561 | target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK; | |
562 | uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; | |
563 | ||
564 | tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); | |
565 | } | |
566 | ||
567 | typedef struct { | |
568 | target_ulong addr; | |
569 | uint16_t idxmap; | |
570 | } TLBFlushPageByMMUIdxData; | |
571 | ||
572 | /** | |
573 | * tlb_flush_page_by_mmuidx_async_2: | |
574 | * @cpu: cpu on which to flush | |
575 | * @data: allocated addr + idxmap | |
576 | * | |
577 | * Helper for tlb_flush_page_by_mmuidx and friends, called through | |
578 | * async_run_on_cpu. The addr+idxmap parameters are stored in a | |
579 | * TLBFlushPageByMMUIdxData structure that has been allocated | |
580 | * specifically for this helper. Free the structure when done. | |
581 | */ | |
582 | static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, | |
583 | run_on_cpu_data data) | |
e7218445 | 584 | { |
7b7d00e0 RH |
585 | TLBFlushPageByMMUIdxData *d = data.host_ptr; |
586 | ||
587 | tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); | |
588 | g_free(d); | |
589 | } | |
e7218445 | 590 | |
7b7d00e0 RH |
591 | void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) |
592 | { | |
e7218445 AB |
593 | tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); |
594 | ||
595 | /* This should already be page aligned */ | |
7b7d00e0 | 596 | addr &= TARGET_PAGE_MASK; |
e7218445 | 597 | |
7b7d00e0 RH |
598 | if (qemu_cpu_is_self(cpu)) { |
599 | tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); | |
600 | } else if (idxmap < TARGET_PAGE_SIZE) { | |
601 | /* | |
602 | * Most targets have only a few mmu_idx. In the case where | |
603 | * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid | |
604 | * allocating memory for this operation. | |
605 | */ | |
606 | async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, | |
607 | RUN_ON_CPU_TARGET_PTR(addr | idxmap)); | |
e7218445 | 608 | } else { |
7b7d00e0 RH |
609 | TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); |
610 | ||
611 | /* Otherwise allocate a structure, freed by the worker. */ | |
612 | d->addr = addr; | |
613 | d->idxmap = idxmap; | |
614 | async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, | |
615 | RUN_ON_CPU_HOST_PTR(d)); | |
e7218445 AB |
616 | } |
617 | } | |
618 | ||
f8144c6c RH |
619 | void tlb_flush_page(CPUState *cpu, target_ulong addr) |
620 | { | |
621 | tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); | |
622 | } | |
623 | ||
c3b9a07a AB |
624 | void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, |
625 | uint16_t idxmap) | |
e3b9ca81 | 626 | { |
c3b9a07a AB |
627 | tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); |
628 | ||
629 | /* This should already be page aligned */ | |
7b7d00e0 RH |
630 | addr &= TARGET_PAGE_MASK; |
631 | ||
632 | /* | |
633 | * Allocate memory to hold addr+idxmap only when needed. | |
634 | * See tlb_flush_page_by_mmuidx for details. | |
635 | */ | |
636 | if (idxmap < TARGET_PAGE_SIZE) { | |
637 | flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, | |
638 | RUN_ON_CPU_TARGET_PTR(addr | idxmap)); | |
639 | } else { | |
640 | CPUState *dst_cpu; | |
641 | ||
642 | /* Allocate a separate data block for each destination cpu. */ | |
643 | CPU_FOREACH(dst_cpu) { | |
644 | if (dst_cpu != src_cpu) { | |
645 | TLBFlushPageByMMUIdxData *d | |
646 | = g_new(TLBFlushPageByMMUIdxData, 1); | |
647 | ||
648 | d->addr = addr; | |
649 | d->idxmap = idxmap; | |
650 | async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, | |
651 | RUN_ON_CPU_HOST_PTR(d)); | |
652 | } | |
653 | } | |
654 | } | |
c3b9a07a | 655 | |
7b7d00e0 | 656 | tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); |
c3b9a07a AB |
657 | } |
658 | ||
f8144c6c RH |
659 | void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) |
660 | { | |
661 | tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); | |
662 | } | |
663 | ||
c3b9a07a | 664 | void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, |
1308e026 RH |
665 | target_ulong addr, |
666 | uint16_t idxmap) | |
c3b9a07a | 667 | { |
c3b9a07a AB |
668 | tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); |
669 | ||
670 | /* This should already be page aligned */ | |
7b7d00e0 | 671 | addr &= TARGET_PAGE_MASK; |
c3b9a07a | 672 | |
7b7d00e0 RH |
673 | /* |
674 | * Allocate memory to hold addr+idxmap only when needed. | |
675 | * See tlb_flush_page_by_mmuidx for details. | |
676 | */ | |
677 | if (idxmap < TARGET_PAGE_SIZE) { | |
678 | flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, | |
679 | RUN_ON_CPU_TARGET_PTR(addr | idxmap)); | |
680 | async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, | |
681 | RUN_ON_CPU_TARGET_PTR(addr | idxmap)); | |
682 | } else { | |
683 | CPUState *dst_cpu; | |
684 | TLBFlushPageByMMUIdxData *d; | |
685 | ||
686 | /* Allocate a separate data block for each destination cpu. */ | |
687 | CPU_FOREACH(dst_cpu) { | |
688 | if (dst_cpu != src_cpu) { | |
689 | d = g_new(TLBFlushPageByMMUIdxData, 1); | |
690 | d->addr = addr; | |
691 | d->idxmap = idxmap; | |
692 | async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, | |
693 | RUN_ON_CPU_HOST_PTR(d)); | |
694 | } | |
695 | } | |
696 | ||
697 | d = g_new(TLBFlushPageByMMUIdxData, 1); | |
698 | d->addr = addr; | |
699 | d->idxmap = idxmap; | |
700 | async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, | |
701 | RUN_ON_CPU_HOST_PTR(d)); | |
702 | } | |
c3b9a07a AB |
703 | } |
704 | ||
f8144c6c | 705 | void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) |
c3b9a07a | 706 | { |
f8144c6c | 707 | tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); |
e3b9ca81 FK |
708 | } |
709 | ||
3c4ddec1 RH |
710 | static void tlb_flush_range_locked(CPUArchState *env, int midx, |
711 | target_ulong addr, target_ulong len, | |
712 | unsigned bits) | |
3ab6e68c RH |
713 | { |
714 | CPUTLBDesc *d = &env_tlb(env)->d[midx]; | |
715 | CPUTLBDescFast *f = &env_tlb(env)->f[midx]; | |
716 | target_ulong mask = MAKE_64BIT_MASK(0, bits); | |
717 | ||
718 | /* | |
719 | * If @bits is smaller than the tlb size, there may be multiple entries | |
720 | * within the TLB; otherwise all addresses that match under @mask hit | |
721 | * the same TLB entry. | |
3ab6e68c RH |
722 | * TODO: Perhaps allow bits to be a few bits less than the size. |
723 | * For now, just flush the entire TLB. | |
3c4ddec1 RH |
724 | * |
725 | * If @len is larger than the tlb size, then it will take longer to | |
726 | * test all of the entries in the TLB than it will to flush it all. | |
3ab6e68c | 727 | */ |
3c4ddec1 | 728 | if (mask < f->mask || len > f->mask) { |
3ab6e68c | 729 | tlb_debug("forcing full flush midx %d (" |
3c4ddec1 RH |
730 | TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n", |
731 | midx, addr, mask, len); | |
3ab6e68c RH |
732 | tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); |
733 | return; | |
734 | } | |
735 | ||
3c4ddec1 RH |
736 | /* |
737 | * Check if we need to flush due to large pages. | |
738 | * Because large_page_mask contains all 1's from the msb, | |
739 | * we only need to test the end of the range. | |
740 | */ | |
741 | if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) { | |
3ab6e68c RH |
742 | tlb_debug("forcing full flush midx %d (" |
743 | TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", | |
744 | midx, d->large_page_addr, d->large_page_mask); | |
745 | tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); | |
746 | return; | |
747 | } | |
748 | ||
3c4ddec1 RH |
749 | for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) { |
750 | target_ulong page = addr + i; | |
751 | CPUTLBEntry *entry = tlb_entry(env, midx, page); | |
752 | ||
753 | if (tlb_flush_entry_mask_locked(entry, page, mask)) { | |
754 | tlb_n_used_entries_dec(env, midx); | |
755 | } | |
756 | tlb_flush_vtlb_page_mask_locked(env, midx, page, mask); | |
3ab6e68c | 757 | } |
3ab6e68c RH |
758 | } |
759 | ||
760 | typedef struct { | |
761 | target_ulong addr; | |
3c4ddec1 | 762 | target_ulong len; |
3ab6e68c RH |
763 | uint16_t idxmap; |
764 | uint16_t bits; | |
3960a59f | 765 | } TLBFlushRangeData; |
3ab6e68c | 766 | |
6be48e45 RH |
767 | static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, |
768 | TLBFlushRangeData d) | |
3ab6e68c RH |
769 | { |
770 | CPUArchState *env = cpu->env_ptr; | |
771 | int mmu_idx; | |
772 | ||
773 | assert_cpu_is_self(cpu); | |
774 | ||
3c4ddec1 RH |
775 | tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n", |
776 | d.addr, d.bits, d.len, d.idxmap); | |
3ab6e68c RH |
777 | |
778 | qemu_spin_lock(&env_tlb(env)->c.lock); | |
779 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | |
780 | if ((d.idxmap >> mmu_idx) & 1) { | |
3c4ddec1 | 781 | tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits); |
3ab6e68c RH |
782 | } |
783 | } | |
784 | qemu_spin_unlock(&env_tlb(env)->c.lock); | |
785 | ||
3c4ddec1 RH |
786 | for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) { |
787 | tb_flush_jmp_cache(cpu, d.addr + i); | |
788 | } | |
3ab6e68c RH |
789 | } |
790 | ||
206a583d RH |
791 | static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu, |
792 | run_on_cpu_data data) | |
3ab6e68c | 793 | { |
3960a59f | 794 | TLBFlushRangeData *d = data.host_ptr; |
6be48e45 | 795 | tlb_flush_range_by_mmuidx_async_0(cpu, *d); |
3ab6e68c RH |
796 | g_free(d); |
797 | } | |
798 | ||
e5b1921b RH |
799 | void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, |
800 | target_ulong len, uint16_t idxmap, | |
801 | unsigned bits) | |
3ab6e68c | 802 | { |
3960a59f | 803 | TLBFlushRangeData d; |
3ab6e68c | 804 | |
e5b1921b RH |
805 | /* |
806 | * If all bits are significant, and len is small, | |
807 | * this devolves to tlb_flush_page. | |
808 | */ | |
809 | if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { | |
3ab6e68c RH |
810 | tlb_flush_page_by_mmuidx(cpu, addr, idxmap); |
811 | return; | |
812 | } | |
813 | /* If no page bits are significant, this devolves to tlb_flush. */ | |
814 | if (bits < TARGET_PAGE_BITS) { | |
815 | tlb_flush_by_mmuidx(cpu, idxmap); | |
816 | return; | |
817 | } | |
818 | ||
819 | /* This should already be page aligned */ | |
820 | d.addr = addr & TARGET_PAGE_MASK; | |
e5b1921b | 821 | d.len = len; |
3ab6e68c RH |
822 | d.idxmap = idxmap; |
823 | d.bits = bits; | |
824 | ||
825 | if (qemu_cpu_is_self(cpu)) { | |
6be48e45 | 826 | tlb_flush_range_by_mmuidx_async_0(cpu, d); |
3ab6e68c | 827 | } else { |
3ab6e68c | 828 | /* Otherwise allocate a structure, freed by the worker. */ |
3960a59f | 829 | TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); |
206a583d | 830 | async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1, |
3ab6e68c RH |
831 | RUN_ON_CPU_HOST_PTR(p)); |
832 | } | |
833 | } | |
834 | ||
e5b1921b RH |
835 | void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, |
836 | uint16_t idxmap, unsigned bits) | |
837 | { | |
838 | tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); | |
839 | } | |
840 | ||
600b819f RH |
841 | void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, |
842 | target_ulong addr, target_ulong len, | |
843 | uint16_t idxmap, unsigned bits) | |
3ab6e68c | 844 | { |
3960a59f | 845 | TLBFlushRangeData d; |
d34e4d1a | 846 | CPUState *dst_cpu; |
3ab6e68c | 847 | |
600b819f RH |
848 | /* |
849 | * If all bits are significant, and len is small, | |
850 | * this devolves to tlb_flush_page. | |
851 | */ | |
852 | if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { | |
3ab6e68c RH |
853 | tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap); |
854 | return; | |
855 | } | |
856 | /* If no page bits are significant, this devolves to tlb_flush. */ | |
857 | if (bits < TARGET_PAGE_BITS) { | |
858 | tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap); | |
859 | return; | |
860 | } | |
861 | ||
862 | /* This should already be page aligned */ | |
863 | d.addr = addr & TARGET_PAGE_MASK; | |
600b819f | 864 | d.len = len; |
3ab6e68c RH |
865 | d.idxmap = idxmap; |
866 | d.bits = bits; | |
867 | ||
d34e4d1a RH |
868 | /* Allocate a separate data block for each destination cpu. */ |
869 | CPU_FOREACH(dst_cpu) { | |
870 | if (dst_cpu != src_cpu) { | |
871 | TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); | |
872 | async_run_on_cpu(dst_cpu, | |
206a583d | 873 | tlb_flush_range_by_mmuidx_async_1, |
d34e4d1a | 874 | RUN_ON_CPU_HOST_PTR(p)); |
3ab6e68c RH |
875 | } |
876 | } | |
877 | ||
6be48e45 | 878 | tlb_flush_range_by_mmuidx_async_0(src_cpu, d); |
3ab6e68c RH |
879 | } |
880 | ||
600b819f RH |
881 | void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, |
882 | target_ulong addr, | |
883 | uint16_t idxmap, unsigned bits) | |
884 | { | |
885 | tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE, | |
886 | idxmap, bits); | |
887 | } | |
888 | ||
c13b27d8 RH |
889 | void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, |
890 | target_ulong addr, | |
891 | target_ulong len, | |
892 | uint16_t idxmap, | |
893 | unsigned bits) | |
3ab6e68c | 894 | { |
d34e4d1a RH |
895 | TLBFlushRangeData d, *p; |
896 | CPUState *dst_cpu; | |
3ab6e68c | 897 | |
c13b27d8 RH |
898 | /* |
899 | * If all bits are significant, and len is small, | |
900 | * this devolves to tlb_flush_page. | |
901 | */ | |
902 | if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { | |
3ab6e68c RH |
903 | tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); |
904 | return; | |
905 | } | |
906 | /* If no page bits are significant, this devolves to tlb_flush. */ | |
907 | if (bits < TARGET_PAGE_BITS) { | |
908 | tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); | |
909 | return; | |
910 | } | |
911 | ||
912 | /* This should already be page aligned */ | |
913 | d.addr = addr & TARGET_PAGE_MASK; | |
c13b27d8 | 914 | d.len = len; |
3ab6e68c RH |
915 | d.idxmap = idxmap; |
916 | d.bits = bits; | |
917 | ||
d34e4d1a RH |
918 | /* Allocate a separate data block for each destination cpu. */ |
919 | CPU_FOREACH(dst_cpu) { | |
920 | if (dst_cpu != src_cpu) { | |
921 | p = g_memdup(&d, sizeof(d)); | |
206a583d | 922 | async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1, |
d34e4d1a | 923 | RUN_ON_CPU_HOST_PTR(p)); |
3ab6e68c | 924 | } |
3ab6e68c | 925 | } |
d34e4d1a RH |
926 | |
927 | p = g_memdup(&d, sizeof(d)); | |
206a583d | 928 | async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1, |
d34e4d1a | 929 | RUN_ON_CPU_HOST_PTR(p)); |
3ab6e68c RH |
930 | } |
931 | ||
c13b27d8 RH |
932 | void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, |
933 | target_ulong addr, | |
934 | uint16_t idxmap, | |
935 | unsigned bits) | |
936 | { | |
937 | tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE, | |
938 | idxmap, bits); | |
939 | } | |
940 | ||
0cac1b66 BS |
941 | /* update the TLBs so that writes to code in the virtual page 'addr' |
942 | can be detected */ | |
943 | void tlb_protect_code(ram_addr_t ram_addr) | |
944 | { | |
03eebc9e SH |
945 | cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, |
946 | DIRTY_MEMORY_CODE); | |
0cac1b66 BS |
947 | } |
948 | ||
949 | /* update the TLB so that writes in physical page 'phys_addr' are no longer | |
950 | tested for self modifying code */ | |
9564f52d | 951 | void tlb_unprotect_code(ram_addr_t ram_addr) |
0cac1b66 | 952 | { |
52159192 | 953 | cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); |
0cac1b66 BS |
954 | } |
955 | ||
0cac1b66 | 956 | |
b0706b71 AB |
957 | /* |
958 | * Dirty write flag handling | |
959 | * | |
960 | * When the TCG code writes to a location it looks up the address in | |
961 | * the TLB and uses that data to compute the final address. If any of | |
962 | * the lower bits of the address are set then the slow path is forced. | |
963 | * There are a number of reasons to do this but for normal RAM the | |
964 | * most usual is detecting writes to code regions which may invalidate | |
965 | * generated code. | |
966 | * | |
71aec354 | 967 | * Other vCPUs might be reading their TLBs during guest execution, so we update |
d73415a3 | 968 | * te->addr_write with qatomic_set. We don't need to worry about this for |
71aec354 | 969 | * oversized guests as MTTCG is disabled for them. |
b0706b71 | 970 | * |
53d28455 | 971 | * Called with tlb_c.lock held. |
b0706b71 | 972 | */ |
71aec354 EC |
973 | static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, |
974 | uintptr_t start, uintptr_t length) | |
0cac1b66 | 975 | { |
b0706b71 | 976 | uintptr_t addr = tlb_entry->addr_write; |
0cac1b66 | 977 | |
7b0d792c RH |
978 | if ((addr & (TLB_INVALID_MASK | TLB_MMIO | |
979 | TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { | |
b0706b71 AB |
980 | addr &= TARGET_PAGE_MASK; |
981 | addr += tlb_entry->addend; | |
0cac1b66 | 982 | if ((addr - start) < length) { |
71aec354 | 983 | #if TCG_OVERSIZED_GUEST |
0cac1b66 | 984 | tlb_entry->addr_write |= TLB_NOTDIRTY; |
b0706b71 | 985 | #else |
d73415a3 | 986 | qatomic_set(&tlb_entry->addr_write, |
71aec354 EC |
987 | tlb_entry->addr_write | TLB_NOTDIRTY); |
988 | #endif | |
b0706b71 AB |
989 | } |
990 | } | |
b0706b71 AB |
991 | } |
992 | ||
71aec354 | 993 | /* |
53d28455 | 994 | * Called with tlb_c.lock held. |
71aec354 EC |
995 | * Called only from the vCPU context, i.e. the TLB's owner thread. |
996 | */ | |
997 | static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) | |
b0706b71 | 998 | { |
b0706b71 | 999 | *d = *s; |
0cac1b66 BS |
1000 | } |
1001 | ||
b0706b71 | 1002 | /* This is a cross vCPU call (i.e. another vCPU resetting the flags of |
71aec354 | 1003 | * the target vCPU). |
53d28455 | 1004 | * We must take tlb_c.lock to avoid racing with another vCPU update. The only |
71aec354 | 1005 | * thing actually updated is the target TLB entry ->addr_write flags. |
b0706b71 | 1006 | */ |
9a13565d | 1007 | void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) |
0cac1b66 BS |
1008 | { |
1009 | CPUArchState *env; | |
1010 | ||
9a13565d | 1011 | int mmu_idx; |
0cac1b66 | 1012 | |
9a13565d | 1013 | env = cpu->env_ptr; |
a40ec84e | 1014 | qemu_spin_lock(&env_tlb(env)->c.lock); |
9a13565d PC |
1015 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
1016 | unsigned int i; | |
722a1c1e | 1017 | unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); |
0cac1b66 | 1018 | |
86e1eff8 | 1019 | for (i = 0; i < n; i++) { |
a40ec84e RH |
1020 | tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], |
1021 | start1, length); | |
9a13565d | 1022 | } |
88e89a57 | 1023 | |
9a13565d | 1024 | for (i = 0; i < CPU_VTLB_SIZE; i++) { |
a40ec84e RH |
1025 | tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i], |
1026 | start1, length); | |
0cac1b66 BS |
1027 | } |
1028 | } | |
a40ec84e | 1029 | qemu_spin_unlock(&env_tlb(env)->c.lock); |
0cac1b66 BS |
1030 | } |
1031 | ||
53d28455 | 1032 | /* Called with tlb_c.lock held */ |
71aec354 EC |
1033 | static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, |
1034 | target_ulong vaddr) | |
0cac1b66 BS |
1035 | { |
1036 | if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { | |
1037 | tlb_entry->addr_write = vaddr; | |
1038 | } | |
1039 | } | |
1040 | ||
1041 | /* update the TLB corresponding to virtual page vaddr | |
1042 | so that it is no longer dirty */ | |
bcae01e4 | 1043 | void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) |
0cac1b66 | 1044 | { |
bcae01e4 | 1045 | CPUArchState *env = cpu->env_ptr; |
0cac1b66 BS |
1046 | int mmu_idx; |
1047 | ||
f0aff0f1 AB |
1048 | assert_cpu_is_self(cpu); |
1049 | ||
0cac1b66 | 1050 | vaddr &= TARGET_PAGE_MASK; |
a40ec84e | 1051 | qemu_spin_lock(&env_tlb(env)->c.lock); |
0cac1b66 | 1052 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
383beda9 | 1053 | tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); |
0cac1b66 | 1054 | } |
88e89a57 XT |
1055 | |
1056 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | |
1057 | int k; | |
1058 | for (k = 0; k < CPU_VTLB_SIZE; k++) { | |
a40ec84e | 1059 | tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); |
88e89a57 XT |
1060 | } |
1061 | } | |
a40ec84e | 1062 | qemu_spin_unlock(&env_tlb(env)->c.lock); |
0cac1b66 BS |
1063 | } |
1064 | ||
1065 | /* Our TLB does not support large pages, so remember the area covered by | |
1066 | large pages and trigger a full TLB flush if these are invalidated. */ | |
1308e026 RH |
1067 | static void tlb_add_large_page(CPUArchState *env, int mmu_idx, |
1068 | target_ulong vaddr, target_ulong size) | |
0cac1b66 | 1069 | { |
a40ec84e | 1070 | target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; |
1308e026 | 1071 | target_ulong lp_mask = ~(size - 1); |
0cac1b66 | 1072 | |
1308e026 RH |
1073 | if (lp_addr == (target_ulong)-1) { |
1074 | /* No previous large page. */ | |
1075 | lp_addr = vaddr; | |
1076 | } else { | |
1077 | /* Extend the existing region to include the new page. | |
1078 | This is a compromise between unnecessary flushes and | |
1079 | the cost of maintaining a full variable size TLB. */ | |
a40ec84e | 1080 | lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; |
1308e026 RH |
1081 | while (((lp_addr ^ vaddr) & lp_mask) != 0) { |
1082 | lp_mask <<= 1; | |
1083 | } | |
0cac1b66 | 1084 | } |
a40ec84e RH |
1085 | env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; |
1086 | env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; | |
0cac1b66 BS |
1087 | } |
1088 | ||
1089 | /* Add a new TLB entry. At most one entry for a given virtual address | |
79e2b9ae PB |
1090 | * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the |
1091 | * supplied size is only used by tlb_flush_page. | |
1092 | * | |
1093 | * Called from TCG-generated code, which is under an RCU read-side | |
1094 | * critical section. | |
1095 | */ | |
fadc1cbe PM |
1096 | void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, |
1097 | hwaddr paddr, MemTxAttrs attrs, int prot, | |
1098 | int mmu_idx, target_ulong size) | |
0cac1b66 | 1099 | { |
0c591eb0 | 1100 | CPUArchState *env = cpu->env_ptr; |
a40ec84e RH |
1101 | CPUTLB *tlb = env_tlb(env); |
1102 | CPUTLBDesc *desc = &tlb->d[mmu_idx]; | |
0cac1b66 BS |
1103 | MemoryRegionSection *section; |
1104 | unsigned int index; | |
1105 | target_ulong address; | |
8f5db641 | 1106 | target_ulong write_address; |
0cac1b66 | 1107 | uintptr_t addend; |
68fea038 | 1108 | CPUTLBEntry *te, tn; |
55df6fcf PM |
1109 | hwaddr iotlb, xlat, sz, paddr_page; |
1110 | target_ulong vaddr_page; | |
d7898cda | 1111 | int asidx = cpu_asidx_from_attrs(cpu, attrs); |
50b107c5 | 1112 | int wp_flags; |
8f5db641 | 1113 | bool is_ram, is_romd; |
0cac1b66 | 1114 | |
f0aff0f1 | 1115 | assert_cpu_is_self(cpu); |
55df6fcf | 1116 | |
1308e026 | 1117 | if (size <= TARGET_PAGE_SIZE) { |
55df6fcf PM |
1118 | sz = TARGET_PAGE_SIZE; |
1119 | } else { | |
1308e026 | 1120 | tlb_add_large_page(env, mmu_idx, vaddr, size); |
55df6fcf | 1121 | sz = size; |
0cac1b66 | 1122 | } |
55df6fcf PM |
1123 | vaddr_page = vaddr & TARGET_PAGE_MASK; |
1124 | paddr_page = paddr & TARGET_PAGE_MASK; | |
149f54b5 | 1125 | |
55df6fcf PM |
1126 | section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, |
1127 | &xlat, &sz, attrs, &prot); | |
149f54b5 PB |
1128 | assert(sz >= TARGET_PAGE_SIZE); |
1129 | ||
8526e1f4 AB |
1130 | tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx |
1131 | " prot=%x idx=%d\n", | |
1132 | vaddr, paddr, prot, mmu_idx); | |
0cac1b66 | 1133 | |
55df6fcf PM |
1134 | address = vaddr_page; |
1135 | if (size < TARGET_PAGE_SIZE) { | |
30d7e098 RH |
1136 | /* Repeat the MMU check and TLB fill on every access. */ |
1137 | address |= TLB_INVALID_MASK; | |
55df6fcf | 1138 | } |
a26fc6f5 | 1139 | if (attrs.byte_swap) { |
5b87b3e6 | 1140 | address |= TLB_BSWAP; |
a26fc6f5 | 1141 | } |
8f5db641 RH |
1142 | |
1143 | is_ram = memory_region_is_ram(section->mr); | |
1144 | is_romd = memory_region_is_romd(section->mr); | |
1145 | ||
1146 | if (is_ram || is_romd) { | |
1147 | /* RAM and ROMD both have associated host memory. */ | |
1148 | addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; | |
1149 | } else { | |
1150 | /* I/O does not; force the host address to NULL. */ | |
8f3e03cb | 1151 | addend = 0; |
8f5db641 RH |
1152 | } |
1153 | ||
1154 | write_address = address; | |
1155 | if (is_ram) { | |
1156 | iotlb = memory_region_get_ram_addr(section->mr) + xlat; | |
1157 | /* | |
1158 | * Computing is_clean is expensive; avoid all that unless | |
1159 | * the page is actually writable. | |
1160 | */ | |
1161 | if (prot & PAGE_WRITE) { | |
1162 | if (section->readonly) { | |
1163 | write_address |= TLB_DISCARD_WRITE; | |
1164 | } else if (cpu_physical_memory_is_clean(iotlb)) { | |
1165 | write_address |= TLB_NOTDIRTY; | |
1166 | } | |
1167 | } | |
8f3e03cb | 1168 | } else { |
8f5db641 RH |
1169 | /* I/O or ROMD */ |
1170 | iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; | |
1171 | /* | |
1172 | * Writes to romd devices must go through MMIO to enable write. | |
1173 | * Reads to romd devices go through the ram_ptr found above, | |
1174 | * but of course reads to I/O must go through MMIO. | |
1175 | */ | |
1176 | write_address |= TLB_MMIO; | |
1177 | if (!is_romd) { | |
1178 | address = write_address; | |
1179 | } | |
0cac1b66 | 1180 | } |
0cac1b66 | 1181 | |
50b107c5 RH |
1182 | wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, |
1183 | TARGET_PAGE_SIZE); | |
0cac1b66 | 1184 | |
383beda9 RH |
1185 | index = tlb_index(env, mmu_idx, vaddr_page); |
1186 | te = tlb_entry(env, mmu_idx, vaddr_page); | |
b0706b71 | 1187 | |
71aec354 EC |
1188 | /* |
1189 | * Hold the TLB lock for the rest of the function. We could acquire/release | |
1190 | * the lock several times in the function, but it is faster to amortize the | |
1191 | * acquisition cost by acquiring it just once. Note that this leads to | |
1192 | * a longer critical section, but this is not a concern since the TLB lock | |
1193 | * is unlikely to be contended. | |
1194 | */ | |
a40ec84e | 1195 | qemu_spin_lock(&tlb->c.lock); |
71aec354 | 1196 | |
3d1523ce | 1197 | /* Note that the tlb is no longer clean. */ |
a40ec84e | 1198 | tlb->c.dirty |= 1 << mmu_idx; |
3d1523ce | 1199 | |
71aec354 EC |
1200 | /* Make sure there's no cached translation for the new page. */ |
1201 | tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); | |
1202 | ||
68fea038 RH |
1203 | /* |
1204 | * Only evict the old entry to the victim tlb if it's for a | |
1205 | * different page; otherwise just overwrite the stale data. | |
1206 | */ | |
3cea94bb | 1207 | if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { |
a40ec84e RH |
1208 | unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; |
1209 | CPUTLBEntry *tv = &desc->vtable[vidx]; | |
b0706b71 | 1210 | |
68fea038 | 1211 | /* Evict the old entry into the victim tlb. */ |
71aec354 | 1212 | copy_tlb_helper_locked(tv, te); |
a40ec84e | 1213 | desc->viotlb[vidx] = desc->iotlb[index]; |
86e1eff8 | 1214 | tlb_n_used_entries_dec(env, mmu_idx); |
68fea038 | 1215 | } |
88e89a57 XT |
1216 | |
1217 | /* refill the tlb */ | |
ace41090 PM |
1218 | /* |
1219 | * At this point iotlb contains a physical section number in the lower | |
1220 | * TARGET_PAGE_BITS, and either | |
8f5db641 RH |
1221 | * + the ram_addr_t of the page base of the target RAM (RAM) |
1222 | * + the offset within section->mr of the page base (I/O, ROMD) | |
55df6fcf | 1223 | * We subtract the vaddr_page (which is page aligned and thus won't |
ace41090 PM |
1224 | * disturb the low bits) to give an offset which can be added to the |
1225 | * (non-page-aligned) vaddr of the eventual memory access to get | |
1226 | * the MemoryRegion offset for the access. Note that the vaddr we | |
1227 | * subtract here is that of the page base, and not the same as the | |
1228 | * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). | |
1229 | */ | |
a40ec84e RH |
1230 | desc->iotlb[index].addr = iotlb - vaddr_page; |
1231 | desc->iotlb[index].attrs = attrs; | |
b0706b71 AB |
1232 | |
1233 | /* Now calculate the new entry */ | |
55df6fcf | 1234 | tn.addend = addend - vaddr_page; |
0cac1b66 | 1235 | if (prot & PAGE_READ) { |
b0706b71 | 1236 | tn.addr_read = address; |
50b107c5 RH |
1237 | if (wp_flags & BP_MEM_READ) { |
1238 | tn.addr_read |= TLB_WATCHPOINT; | |
1239 | } | |
0cac1b66 | 1240 | } else { |
b0706b71 | 1241 | tn.addr_read = -1; |
0cac1b66 BS |
1242 | } |
1243 | ||
1244 | if (prot & PAGE_EXEC) { | |
8f5db641 | 1245 | tn.addr_code = address; |
0cac1b66 | 1246 | } else { |
b0706b71 | 1247 | tn.addr_code = -1; |
0cac1b66 | 1248 | } |
b0706b71 AB |
1249 | |
1250 | tn.addr_write = -1; | |
0cac1b66 | 1251 | if (prot & PAGE_WRITE) { |
8f5db641 | 1252 | tn.addr_write = write_address; |
f52bfb12 DH |
1253 | if (prot & PAGE_WRITE_INV) { |
1254 | tn.addr_write |= TLB_INVALID_MASK; | |
1255 | } | |
50b107c5 RH |
1256 | if (wp_flags & BP_MEM_WRITE) { |
1257 | tn.addr_write |= TLB_WATCHPOINT; | |
1258 | } | |
0cac1b66 | 1259 | } |
b0706b71 | 1260 | |
71aec354 | 1261 | copy_tlb_helper_locked(te, &tn); |
86e1eff8 | 1262 | tlb_n_used_entries_inc(env, mmu_idx); |
a40ec84e | 1263 | qemu_spin_unlock(&tlb->c.lock); |
0cac1b66 BS |
1264 | } |
1265 | ||
fadc1cbe PM |
1266 | /* Add a new TLB entry, but without specifying the memory |
1267 | * transaction attributes to be used. | |
1268 | */ | |
1269 | void tlb_set_page(CPUState *cpu, target_ulong vaddr, | |
1270 | hwaddr paddr, int prot, | |
1271 | int mmu_idx, target_ulong size) | |
1272 | { | |
1273 | tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, | |
1274 | prot, mmu_idx, size); | |
1275 | } | |
1276 | ||
857baec1 AB |
1277 | static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) |
1278 | { | |
1279 | ram_addr_t ram_addr; | |
1280 | ||
1281 | ram_addr = qemu_ram_addr_from_host(ptr); | |
1282 | if (ram_addr == RAM_ADDR_INVALID) { | |
1283 | error_report("Bad ram pointer %p", ptr); | |
1284 | abort(); | |
1285 | } | |
1286 | return ram_addr; | |
1287 | } | |
1288 | ||
c319dc13 RH |
1289 | /* |
1290 | * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the | |
1291 | * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must | |
1292 | * be discarded and looked up again (e.g. via tlb_entry()). | |
1293 | */ | |
1294 | static void tlb_fill(CPUState *cpu, target_ulong addr, int size, | |
1295 | MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) | |
1296 | { | |
1297 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
1298 | bool ok; | |
1299 | ||
1300 | /* | |
1301 | * This is not a probe, so only valid return is success; failure | |
1302 | * should result in exception + longjmp to the cpu loop. | |
1303 | */ | |
78271684 CF |
1304 | ok = cc->tcg_ops->tlb_fill(cpu, addr, size, |
1305 | access_type, mmu_idx, false, retaddr); | |
c319dc13 RH |
1306 | assert(ok); |
1307 | } | |
1308 | ||
78271684 CF |
1309 | static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, |
1310 | MMUAccessType access_type, | |
1311 | int mmu_idx, uintptr_t retaddr) | |
1312 | { | |
1313 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
1314 | ||
1315 | cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr); | |
1316 | } | |
1317 | ||
1318 | static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, | |
1319 | vaddr addr, unsigned size, | |
1320 | MMUAccessType access_type, | |
1321 | int mmu_idx, MemTxAttrs attrs, | |
1322 | MemTxResult response, | |
1323 | uintptr_t retaddr) | |
1324 | { | |
1325 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
1326 | ||
1327 | if (!cpu->ignore_memory_transaction_failures && | |
1328 | cc->tcg_ops->do_transaction_failed) { | |
1329 | cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size, | |
1330 | access_type, mmu_idx, attrs, | |
1331 | response, retaddr); | |
1332 | } | |
1333 | } | |
1334 | ||
82a45b96 | 1335 | static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, |
f1be3696 | 1336 | int mmu_idx, target_ulong addr, uintptr_t retaddr, |
be5c4787 | 1337 | MMUAccessType access_type, MemOp op) |
82a45b96 | 1338 | { |
29a0af61 | 1339 | CPUState *cpu = env_cpu(env); |
2d54f194 PM |
1340 | hwaddr mr_offset; |
1341 | MemoryRegionSection *section; | |
1342 | MemoryRegion *mr; | |
82a45b96 | 1343 | uint64_t val; |
8d04fb55 | 1344 | bool locked = false; |
04e3aabd | 1345 | MemTxResult r; |
82a45b96 | 1346 | |
2d54f194 PM |
1347 | section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); |
1348 | mr = section->mr; | |
1349 | mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; | |
82a45b96 | 1350 | cpu->mem_io_pc = retaddr; |
08565552 | 1351 | if (!cpu->can_do_io) { |
82a45b96 RH |
1352 | cpu_io_recompile(cpu, retaddr); |
1353 | } | |
1354 | ||
41744954 | 1355 | if (!qemu_mutex_iothread_locked()) { |
8d04fb55 JK |
1356 | qemu_mutex_lock_iothread(); |
1357 | locked = true; | |
1358 | } | |
be5c4787 | 1359 | r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs); |
04e3aabd | 1360 | if (r != MEMTX_OK) { |
2d54f194 PM |
1361 | hwaddr physaddr = mr_offset + |
1362 | section->offset_within_address_space - | |
1363 | section->offset_within_region; | |
1364 | ||
be5c4787 | 1365 | cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, |
04e3aabd PM |
1366 | mmu_idx, iotlbentry->attrs, r, retaddr); |
1367 | } | |
8d04fb55 JK |
1368 | if (locked) { |
1369 | qemu_mutex_unlock_iothread(); | |
1370 | } | |
1371 | ||
82a45b96 RH |
1372 | return val; |
1373 | } | |
1374 | ||
2f3a57ee AB |
1375 | /* |
1376 | * Save a potentially trashed IOTLB entry for later lookup by plugin. | |
570ef309 AB |
1377 | * This is read by tlb_plugin_lookup if the iotlb entry doesn't match |
1378 | * because of the side effect of io_writex changing memory layout. | |
2f3a57ee AB |
1379 | */ |
1380 | static void save_iotlb_data(CPUState *cs, hwaddr addr, | |
1381 | MemoryRegionSection *section, hwaddr mr_offset) | |
1382 | { | |
1383 | #ifdef CONFIG_PLUGIN | |
1384 | SavedIOTLB *saved = &cs->saved_iotlb; | |
1385 | saved->addr = addr; | |
1386 | saved->section = section; | |
1387 | saved->mr_offset = mr_offset; | |
1388 | #endif | |
1389 | } | |
1390 | ||
82a45b96 | 1391 | static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, |
f1be3696 | 1392 | int mmu_idx, uint64_t val, target_ulong addr, |
be5c4787 | 1393 | uintptr_t retaddr, MemOp op) |
82a45b96 | 1394 | { |
29a0af61 | 1395 | CPUState *cpu = env_cpu(env); |
2d54f194 PM |
1396 | hwaddr mr_offset; |
1397 | MemoryRegionSection *section; | |
1398 | MemoryRegion *mr; | |
8d04fb55 | 1399 | bool locked = false; |
04e3aabd | 1400 | MemTxResult r; |
82a45b96 | 1401 | |
2d54f194 PM |
1402 | section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); |
1403 | mr = section->mr; | |
1404 | mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; | |
08565552 | 1405 | if (!cpu->can_do_io) { |
82a45b96 RH |
1406 | cpu_io_recompile(cpu, retaddr); |
1407 | } | |
82a45b96 | 1408 | cpu->mem_io_pc = retaddr; |
8d04fb55 | 1409 | |
2f3a57ee AB |
1410 | /* |
1411 | * The memory_region_dispatch may trigger a flush/resize | |
1412 | * so for plugins we save the iotlb_data just in case. | |
1413 | */ | |
1414 | save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset); | |
1415 | ||
41744954 | 1416 | if (!qemu_mutex_iothread_locked()) { |
8d04fb55 JK |
1417 | qemu_mutex_lock_iothread(); |
1418 | locked = true; | |
1419 | } | |
be5c4787 | 1420 | r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs); |
04e3aabd | 1421 | if (r != MEMTX_OK) { |
2d54f194 PM |
1422 | hwaddr physaddr = mr_offset + |
1423 | section->offset_within_address_space - | |
1424 | section->offset_within_region; | |
1425 | ||
be5c4787 TN |
1426 | cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), |
1427 | MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, | |
1428 | retaddr); | |
04e3aabd | 1429 | } |
8d04fb55 JK |
1430 | if (locked) { |
1431 | qemu_mutex_unlock_iothread(); | |
1432 | } | |
82a45b96 RH |
1433 | } |
1434 | ||
4811e909 RH |
1435 | static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) |
1436 | { | |
1437 | #if TCG_OVERSIZED_GUEST | |
1438 | return *(target_ulong *)((uintptr_t)entry + ofs); | |
1439 | #else | |
d73415a3 SH |
1440 | /* ofs might correspond to .addr_write, so use qatomic_read */ |
1441 | return qatomic_read((target_ulong *)((uintptr_t)entry + ofs)); | |
4811e909 RH |
1442 | #endif |
1443 | } | |
1444 | ||
7e9a7c50 RH |
1445 | /* Return true if ADDR is present in the victim tlb, and has been copied |
1446 | back to the main tlb. */ | |
1447 | static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, | |
1448 | size_t elt_ofs, target_ulong page) | |
1449 | { | |
1450 | size_t vidx; | |
71aec354 | 1451 | |
29a0af61 | 1452 | assert_cpu_is_self(env_cpu(env)); |
7e9a7c50 | 1453 | for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { |
a40ec84e RH |
1454 | CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; |
1455 | target_ulong cmp; | |
1456 | ||
d73415a3 | 1457 | /* elt_ofs might correspond to .addr_write, so use qatomic_read */ |
a40ec84e RH |
1458 | #if TCG_OVERSIZED_GUEST |
1459 | cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); | |
1460 | #else | |
d73415a3 | 1461 | cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); |
a40ec84e | 1462 | #endif |
7e9a7c50 RH |
1463 | |
1464 | if (cmp == page) { | |
1465 | /* Found entry in victim tlb, swap tlb and iotlb. */ | |
a40ec84e | 1466 | CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; |
b0706b71 | 1467 | |
a40ec84e | 1468 | qemu_spin_lock(&env_tlb(env)->c.lock); |
71aec354 EC |
1469 | copy_tlb_helper_locked(&tmptlb, tlb); |
1470 | copy_tlb_helper_locked(tlb, vtlb); | |
1471 | copy_tlb_helper_locked(vtlb, &tmptlb); | |
a40ec84e | 1472 | qemu_spin_unlock(&env_tlb(env)->c.lock); |
b0706b71 | 1473 | |
a40ec84e RH |
1474 | CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index]; |
1475 | CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx]; | |
7e9a7c50 RH |
1476 | tmpio = *io; *io = *vio; *vio = tmpio; |
1477 | return true; | |
1478 | } | |
1479 | } | |
1480 | return false; | |
1481 | } | |
1482 | ||
1483 | /* Macro to call the above, with local variables from the use context. */ | |
a390284b | 1484 | #define VICTIM_TLB_HIT(TY, ADDR) \ |
7e9a7c50 | 1485 | victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ |
a390284b | 1486 | (ADDR) & TARGET_PAGE_MASK) |
7e9a7c50 | 1487 | |
30d7e098 RH |
1488 | /* |
1489 | * Return a ram_addr_t for the virtual address for execution. | |
1490 | * | |
1491 | * Return -1 if we can't translate and execute from an entire page | |
1492 | * of RAM. This will force us to execute by loading and translating | |
1493 | * one insn at a time, without caching. | |
1494 | * | |
1495 | * NOTE: This function will trigger an exception if the page is | |
1496 | * not executable. | |
f2553f04 | 1497 | */ |
4b2190da EC |
1498 | tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, |
1499 | void **hostp) | |
f2553f04 | 1500 | { |
383beda9 RH |
1501 | uintptr_t mmu_idx = cpu_mmu_index(env, true); |
1502 | uintptr_t index = tlb_index(env, mmu_idx, addr); | |
1503 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); | |
f2553f04 | 1504 | void *p; |
f2553f04 | 1505 | |
383beda9 | 1506 | if (unlikely(!tlb_hit(entry->addr_code, addr))) { |
b493ccf1 | 1507 | if (!VICTIM_TLB_HIT(addr_code, addr)) { |
29a0af61 | 1508 | tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); |
6d967cb8 EC |
1509 | index = tlb_index(env, mmu_idx, addr); |
1510 | entry = tlb_entry(env, mmu_idx, addr); | |
30d7e098 RH |
1511 | |
1512 | if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { | |
1513 | /* | |
1514 | * The MMU protection covers a smaller range than a target | |
1515 | * page, so we must redo the MMU check for every insn. | |
1516 | */ | |
1517 | return -1; | |
1518 | } | |
71b9a453 | 1519 | } |
383beda9 | 1520 | assert(tlb_hit(entry->addr_code, addr)); |
f2553f04 | 1521 | } |
55df6fcf | 1522 | |
30d7e098 RH |
1523 | if (unlikely(entry->addr_code & TLB_MMIO)) { |
1524 | /* The region is not backed by RAM. */ | |
4b2190da EC |
1525 | if (hostp) { |
1526 | *hostp = NULL; | |
1527 | } | |
20cb6ae4 | 1528 | return -1; |
55df6fcf PM |
1529 | } |
1530 | ||
383beda9 | 1531 | p = (void *)((uintptr_t)addr + entry->addend); |
4b2190da EC |
1532 | if (hostp) { |
1533 | *hostp = p; | |
1534 | } | |
f2553f04 FK |
1535 | return qemu_ram_addr_from_host_nofail(p); |
1536 | } | |
1537 | ||
4b2190da EC |
1538 | tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) |
1539 | { | |
1540 | return get_page_addr_code_hostp(env, addr, NULL); | |
1541 | } | |
1542 | ||
707526ad RH |
1543 | static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, |
1544 | CPUIOTLBEntry *iotlbentry, uintptr_t retaddr) | |
1545 | { | |
1546 | ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr; | |
1547 | ||
1548 | trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); | |
1549 | ||
1550 | if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { | |
1551 | struct page_collection *pages | |
1552 | = page_collection_lock(ram_addr, ram_addr + size); | |
5a7c27bb | 1553 | tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr); |
707526ad RH |
1554 | page_collection_unlock(pages); |
1555 | } | |
1556 | ||
1557 | /* | |
1558 | * Set both VGA and migration bits for simplicity and to remove | |
1559 | * the notdirty callback faster. | |
1560 | */ | |
1561 | cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); | |
1562 | ||
1563 | /* We remove the notdirty callback only if the code has been flushed. */ | |
1564 | if (!cpu_physical_memory_is_clean(ram_addr)) { | |
1565 | trace_memory_notdirty_set_dirty(mem_vaddr); | |
1566 | tlb_set_dirty(cpu, mem_vaddr); | |
1567 | } | |
1568 | } | |
1569 | ||
069cfe77 RH |
1570 | static int probe_access_internal(CPUArchState *env, target_ulong addr, |
1571 | int fault_size, MMUAccessType access_type, | |
1572 | int mmu_idx, bool nonfault, | |
1573 | void **phost, uintptr_t retaddr) | |
3b08f0a9 | 1574 | { |
383beda9 RH |
1575 | uintptr_t index = tlb_index(env, mmu_idx, addr); |
1576 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); | |
069cfe77 | 1577 | target_ulong tlb_addr, page_addr; |
c25c283d | 1578 | size_t elt_ofs; |
069cfe77 | 1579 | int flags; |
ca86cf32 | 1580 | |
c25c283d DH |
1581 | switch (access_type) { |
1582 | case MMU_DATA_LOAD: | |
1583 | elt_ofs = offsetof(CPUTLBEntry, addr_read); | |
c25c283d DH |
1584 | break; |
1585 | case MMU_DATA_STORE: | |
1586 | elt_ofs = offsetof(CPUTLBEntry, addr_write); | |
c25c283d DH |
1587 | break; |
1588 | case MMU_INST_FETCH: | |
1589 | elt_ofs = offsetof(CPUTLBEntry, addr_code); | |
c25c283d DH |
1590 | break; |
1591 | default: | |
1592 | g_assert_not_reached(); | |
1593 | } | |
1594 | tlb_addr = tlb_read_ofs(entry, elt_ofs); | |
1595 | ||
069cfe77 RH |
1596 | page_addr = addr & TARGET_PAGE_MASK; |
1597 | if (!tlb_hit_page(tlb_addr, page_addr)) { | |
1598 | if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) { | |
1599 | CPUState *cs = env_cpu(env); | |
1600 | CPUClass *cc = CPU_GET_CLASS(cs); | |
1601 | ||
78271684 CF |
1602 | if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, |
1603 | mmu_idx, nonfault, retaddr)) { | |
069cfe77 RH |
1604 | /* Non-faulting page table read failed. */ |
1605 | *phost = NULL; | |
1606 | return TLB_INVALID_MASK; | |
1607 | } | |
1608 | ||
1609 | /* TLB resize via tlb_fill may have moved the entry. */ | |
03a98189 | 1610 | entry = tlb_entry(env, mmu_idx, addr); |
3b08f0a9 | 1611 | } |
c25c283d | 1612 | tlb_addr = tlb_read_ofs(entry, elt_ofs); |
03a98189 | 1613 | } |
069cfe77 | 1614 | flags = tlb_addr & TLB_FLAGS_MASK; |
03a98189 | 1615 | |
069cfe77 RH |
1616 | /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ |
1617 | if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { | |
1618 | *phost = NULL; | |
1619 | return TLB_MMIO; | |
fef39ccd DH |
1620 | } |
1621 | ||
069cfe77 RH |
1622 | /* Everything else is RAM. */ |
1623 | *phost = (void *)((uintptr_t)addr + entry->addend); | |
1624 | return flags; | |
1625 | } | |
1626 | ||
1627 | int probe_access_flags(CPUArchState *env, target_ulong addr, | |
1628 | MMUAccessType access_type, int mmu_idx, | |
1629 | bool nonfault, void **phost, uintptr_t retaddr) | |
1630 | { | |
1631 | int flags; | |
1632 | ||
1633 | flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, | |
1634 | nonfault, phost, retaddr); | |
1635 | ||
1636 | /* Handle clean RAM pages. */ | |
1637 | if (unlikely(flags & TLB_NOTDIRTY)) { | |
1638 | uintptr_t index = tlb_index(env, mmu_idx, addr); | |
73bc0bd4 | 1639 | CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; |
fef39ccd | 1640 | |
069cfe77 RH |
1641 | notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); |
1642 | flags &= ~TLB_NOTDIRTY; | |
1643 | } | |
1644 | ||
1645 | return flags; | |
1646 | } | |
1647 | ||
1648 | void *probe_access(CPUArchState *env, target_ulong addr, int size, | |
1649 | MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) | |
1650 | { | |
1651 | void *host; | |
1652 | int flags; | |
1653 | ||
1654 | g_assert(-(addr | TARGET_PAGE_MASK) >= size); | |
1655 | ||
1656 | flags = probe_access_internal(env, addr, size, access_type, mmu_idx, | |
1657 | false, &host, retaddr); | |
1658 | ||
1659 | /* Per the interface, size == 0 merely faults the access. */ | |
1660 | if (size == 0) { | |
1661 | return NULL; | |
1662 | } | |
1663 | ||
1664 | if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { | |
1665 | uintptr_t index = tlb_index(env, mmu_idx, addr); | |
1666 | CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; | |
73bc0bd4 RH |
1667 | |
1668 | /* Handle watchpoints. */ | |
069cfe77 RH |
1669 | if (flags & TLB_WATCHPOINT) { |
1670 | int wp_access = (access_type == MMU_DATA_STORE | |
1671 | ? BP_MEM_WRITE : BP_MEM_READ); | |
73bc0bd4 RH |
1672 | cpu_check_watchpoint(env_cpu(env), addr, size, |
1673 | iotlbentry->attrs, wp_access, retaddr); | |
1674 | } | |
1675 | ||
1676 | /* Handle clean RAM pages. */ | |
069cfe77 RH |
1677 | if (flags & TLB_NOTDIRTY) { |
1678 | notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); | |
73bc0bd4 | 1679 | } |
fef39ccd DH |
1680 | } |
1681 | ||
069cfe77 | 1682 | return host; |
3b08f0a9 RH |
1683 | } |
1684 | ||
4811e909 RH |
1685 | void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, |
1686 | MMUAccessType access_type, int mmu_idx) | |
1687 | { | |
069cfe77 RH |
1688 | void *host; |
1689 | int flags; | |
4811e909 | 1690 | |
069cfe77 RH |
1691 | flags = probe_access_internal(env, addr, 0, access_type, |
1692 | mmu_idx, true, &host, 0); | |
4811e909 | 1693 | |
069cfe77 RH |
1694 | /* No combination of flags are expected by the caller. */ |
1695 | return flags ? NULL : host; | |
4811e909 RH |
1696 | } |
1697 | ||
235537fa AB |
1698 | #ifdef CONFIG_PLUGIN |
1699 | /* | |
1700 | * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. | |
1701 | * This should be a hot path as we will have just looked this path up | |
1702 | * in the softmmu lookup code (or helper). We don't handle re-fills or | |
1703 | * checking the victim table. This is purely informational. | |
1704 | * | |
2f3a57ee AB |
1705 | * This almost never fails as the memory access being instrumented |
1706 | * should have just filled the TLB. The one corner case is io_writex | |
1707 | * which can cause TLB flushes and potential resizing of the TLBs | |
570ef309 AB |
1708 | * losing the information we need. In those cases we need to recover |
1709 | * data from a copy of the iotlbentry. As long as this always occurs | |
1710 | * from the same thread (which a mem callback will be) this is safe. | |
235537fa AB |
1711 | */ |
1712 | ||
1713 | bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, | |
1714 | bool is_store, struct qemu_plugin_hwaddr *data) | |
1715 | { | |
1716 | CPUArchState *env = cpu->env_ptr; | |
1717 | CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); | |
1718 | uintptr_t index = tlb_index(env, mmu_idx, addr); | |
1719 | target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read; | |
1720 | ||
1721 | if (likely(tlb_hit(tlb_addr, addr))) { | |
1722 | /* We must have an iotlb entry for MMIO */ | |
1723 | if (tlb_addr & TLB_MMIO) { | |
1724 | CPUIOTLBEntry *iotlbentry; | |
1725 | iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; | |
1726 | data->is_io = true; | |
1727 | data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); | |
1728 | data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; | |
1729 | } else { | |
1730 | data->is_io = false; | |
2d932039 | 1731 | data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend); |
235537fa AB |
1732 | } |
1733 | return true; | |
2f3a57ee AB |
1734 | } else { |
1735 | SavedIOTLB *saved = &cpu->saved_iotlb; | |
1736 | data->is_io = true; | |
1737 | data->v.io.section = saved->section; | |
1738 | data->v.io.offset = saved->mr_offset; | |
1739 | return true; | |
235537fa | 1740 | } |
235537fa AB |
1741 | } |
1742 | ||
1743 | #endif | |
1744 | ||
08dff435 RH |
1745 | /* |
1746 | * Probe for an atomic operation. Do not allow unaligned operations, | |
1747 | * or io operations to proceed. Return the host address. | |
1748 | * | |
1749 | * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. | |
1750 | */ | |
c482cb11 | 1751 | static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, |
9002ffcb | 1752 | MemOpIdx oi, int size, int prot, |
08dff435 | 1753 | uintptr_t retaddr) |
c482cb11 RH |
1754 | { |
1755 | size_t mmu_idx = get_mmuidx(oi); | |
14776ab5 | 1756 | MemOp mop = get_memop(oi); |
c482cb11 | 1757 | int a_bits = get_alignment_bits(mop); |
08dff435 RH |
1758 | uintptr_t index; |
1759 | CPUTLBEntry *tlbe; | |
1760 | target_ulong tlb_addr; | |
34d49937 | 1761 | void *hostaddr; |
c482cb11 RH |
1762 | |
1763 | /* Adjust the given return address. */ | |
1764 | retaddr -= GETPC_ADJ; | |
1765 | ||
1766 | /* Enforce guest required alignment. */ | |
1767 | if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { | |
1768 | /* ??? Maybe indicate atomic op to cpu_unaligned_access */ | |
29a0af61 | 1769 | cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, |
c482cb11 RH |
1770 | mmu_idx, retaddr); |
1771 | } | |
1772 | ||
1773 | /* Enforce qemu required alignment. */ | |
08dff435 | 1774 | if (unlikely(addr & (size - 1))) { |
c482cb11 RH |
1775 | /* We get here if guest alignment was not requested, |
1776 | or was not enforced by cpu_unaligned_access above. | |
1777 | We might widen the access and emulate, but for now | |
1778 | mark an exception and exit the cpu loop. */ | |
1779 | goto stop_the_world; | |
1780 | } | |
1781 | ||
08dff435 RH |
1782 | index = tlb_index(env, mmu_idx, addr); |
1783 | tlbe = tlb_entry(env, mmu_idx, addr); | |
1784 | ||
c482cb11 | 1785 | /* Check TLB entry and enforce page permissions. */ |
08dff435 RH |
1786 | if (prot & PAGE_WRITE) { |
1787 | tlb_addr = tlb_addr_write(tlbe); | |
1788 | if (!tlb_hit(tlb_addr, addr)) { | |
1789 | if (!VICTIM_TLB_HIT(addr_write, addr)) { | |
1790 | tlb_fill(env_cpu(env), addr, size, | |
1791 | MMU_DATA_STORE, mmu_idx, retaddr); | |
1792 | index = tlb_index(env, mmu_idx, addr); | |
1793 | tlbe = tlb_entry(env, mmu_idx, addr); | |
1794 | } | |
1795 | tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; | |
1796 | } | |
1797 | ||
1798 | /* Let the guest notice RMW on a write-only page. */ | |
1799 | if ((prot & PAGE_READ) && | |
1800 | unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { | |
1801 | tlb_fill(env_cpu(env), addr, size, | |
1802 | MMU_DATA_LOAD, mmu_idx, retaddr); | |
1803 | /* | |
1804 | * Since we don't support reads and writes to different addresses, | |
1805 | * and we do have the proper page loaded for write, this shouldn't | |
1806 | * ever return. But just in case, handle via stop-the-world. | |
1807 | */ | |
1808 | goto stop_the_world; | |
1809 | } | |
1810 | } else /* if (prot & PAGE_READ) */ { | |
1811 | tlb_addr = tlbe->addr_read; | |
1812 | if (!tlb_hit(tlb_addr, addr)) { | |
1813 | if (!VICTIM_TLB_HIT(addr_write, addr)) { | |
1814 | tlb_fill(env_cpu(env), addr, size, | |
1815 | MMU_DATA_LOAD, mmu_idx, retaddr); | |
1816 | index = tlb_index(env, mmu_idx, addr); | |
1817 | tlbe = tlb_entry(env, mmu_idx, addr); | |
1818 | } | |
1819 | tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK; | |
c482cb11 | 1820 | } |
c482cb11 RH |
1821 | } |
1822 | ||
55df6fcf | 1823 | /* Notice an IO access or a needs-MMU-lookup access */ |
30d7e098 | 1824 | if (unlikely(tlb_addr & TLB_MMIO)) { |
c482cb11 RH |
1825 | /* There's really nothing that can be done to |
1826 | support this apart from stop-the-world. */ | |
1827 | goto stop_the_world; | |
1828 | } | |
1829 | ||
34d49937 PM |
1830 | hostaddr = (void *)((uintptr_t)addr + tlbe->addend); |
1831 | ||
34d49937 | 1832 | if (unlikely(tlb_addr & TLB_NOTDIRTY)) { |
08dff435 | 1833 | notdirty_write(env_cpu(env), addr, size, |
707526ad | 1834 | &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr); |
34d49937 PM |
1835 | } |
1836 | ||
1837 | return hostaddr; | |
c482cb11 RH |
1838 | |
1839 | stop_the_world: | |
29a0af61 | 1840 | cpu_loop_exit_atomic(env_cpu(env), retaddr); |
c482cb11 RH |
1841 | } |
1842 | ||
f83bcecb RH |
1843 | /* |
1844 | * Verify that we have passed the correct MemOp to the correct function. | |
1845 | * | |
1846 | * In the case of the helper_*_mmu functions, we will have done this by | |
1847 | * using the MemOp to look up the helper during code generation. | |
1848 | * | |
1849 | * In the case of the cpu_*_mmu functions, this is up to the caller. | |
1850 | * We could present one function to target code, and dispatch based on | |
1851 | * the MemOp, but so far we have worked hard to avoid an indirect function | |
1852 | * call along the memory path. | |
1853 | */ | |
1854 | static void validate_memop(MemOpIdx oi, MemOp expected) | |
1855 | { | |
1856 | #ifdef CONFIG_DEBUG_TCG | |
1857 | MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); | |
1858 | assert(have == expected); | |
1859 | #endif | |
1860 | } | |
1861 | ||
eed56642 AB |
1862 | /* |
1863 | * Load Helpers | |
1864 | * | |
1865 | * We support two different access types. SOFTMMU_CODE_ACCESS is | |
1866 | * specifically for reading instructions from system memory. It is | |
1867 | * called by the translation loop and in some helpers where the code | |
1868 | * is disassembled. It shouldn't be called directly by guest code. | |
1869 | */ | |
0f590e74 | 1870 | |
2dd92606 | 1871 | typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, |
9002ffcb | 1872 | MemOpIdx oi, uintptr_t retaddr); |
2dd92606 | 1873 | |
80d9d1c6 RH |
1874 | static inline uint64_t QEMU_ALWAYS_INLINE |
1875 | load_memop(const void *haddr, MemOp op) | |
1876 | { | |
1877 | switch (op) { | |
1878 | case MO_UB: | |
1879 | return ldub_p(haddr); | |
1880 | case MO_BEUW: | |
1881 | return lduw_be_p(haddr); | |
1882 | case MO_LEUW: | |
1883 | return lduw_le_p(haddr); | |
1884 | case MO_BEUL: | |
1885 | return (uint32_t)ldl_be_p(haddr); | |
1886 | case MO_LEUL: | |
1887 | return (uint32_t)ldl_le_p(haddr); | |
1888 | case MO_BEQ: | |
1889 | return ldq_be_p(haddr); | |
1890 | case MO_LEQ: | |
1891 | return ldq_le_p(haddr); | |
1892 | default: | |
1893 | qemu_build_not_reached(); | |
1894 | } | |
1895 | } | |
1896 | ||
c6b716cd | 1897 | static inline uint64_t QEMU_ALWAYS_INLINE |
9002ffcb | 1898 | load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi, |
be5c4787 | 1899 | uintptr_t retaddr, MemOp op, bool code_read, |
2dd92606 | 1900 | FullLoadHelper *full_load) |
eed56642 AB |
1901 | { |
1902 | uintptr_t mmu_idx = get_mmuidx(oi); | |
1903 | uintptr_t index = tlb_index(env, mmu_idx, addr); | |
1904 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); | |
1905 | target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read; | |
1906 | const size_t tlb_off = code_read ? | |
1907 | offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); | |
f1be3696 RH |
1908 | const MMUAccessType access_type = |
1909 | code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; | |
eed56642 AB |
1910 | unsigned a_bits = get_alignment_bits(get_memop(oi)); |
1911 | void *haddr; | |
1912 | uint64_t res; | |
be5c4787 | 1913 | size_t size = memop_size(op); |
eed56642 AB |
1914 | |
1915 | /* Handle CPU specific unaligned behaviour */ | |
1916 | if (addr & ((1 << a_bits) - 1)) { | |
29a0af61 | 1917 | cpu_unaligned_access(env_cpu(env), addr, access_type, |
eed56642 AB |
1918 | mmu_idx, retaddr); |
1919 | } | |
0f590e74 | 1920 | |
eed56642 AB |
1921 | /* If the TLB entry is for a different page, reload and try again. */ |
1922 | if (!tlb_hit(tlb_addr, addr)) { | |
1923 | if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, | |
1924 | addr & TARGET_PAGE_MASK)) { | |
29a0af61 | 1925 | tlb_fill(env_cpu(env), addr, size, |
f1be3696 | 1926 | access_type, mmu_idx, retaddr); |
eed56642 AB |
1927 | index = tlb_index(env, mmu_idx, addr); |
1928 | entry = tlb_entry(env, mmu_idx, addr); | |
1929 | } | |
1930 | tlb_addr = code_read ? entry->addr_code : entry->addr_read; | |
30d7e098 | 1931 | tlb_addr &= ~TLB_INVALID_MASK; |
eed56642 AB |
1932 | } |
1933 | ||
50b107c5 | 1934 | /* Handle anything that isn't just a straight memory access. */ |
eed56642 | 1935 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { |
50b107c5 | 1936 | CPUIOTLBEntry *iotlbentry; |
5b87b3e6 | 1937 | bool need_swap; |
50b107c5 RH |
1938 | |
1939 | /* For anything that is unaligned, recurse through full_load. */ | |
eed56642 AB |
1940 | if ((addr & (size - 1)) != 0) { |
1941 | goto do_unaligned_access; | |
1942 | } | |
50b107c5 RH |
1943 | |
1944 | iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; | |
1945 | ||
1946 | /* Handle watchpoints. */ | |
1947 | if (unlikely(tlb_addr & TLB_WATCHPOINT)) { | |
1948 | /* On watchpoint hit, this will longjmp out. */ | |
1949 | cpu_check_watchpoint(env_cpu(env), addr, size, | |
1950 | iotlbentry->attrs, BP_MEM_READ, retaddr); | |
50b107c5 RH |
1951 | } |
1952 | ||
5b87b3e6 RH |
1953 | need_swap = size > 1 && (tlb_addr & TLB_BSWAP); |
1954 | ||
50b107c5 | 1955 | /* Handle I/O access. */ |
5b87b3e6 RH |
1956 | if (likely(tlb_addr & TLB_MMIO)) { |
1957 | return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, | |
1958 | access_type, op ^ (need_swap * MO_BSWAP)); | |
1959 | } | |
1960 | ||
1961 | haddr = (void *)((uintptr_t)addr + entry->addend); | |
1962 | ||
1963 | /* | |
1964 | * Keep these two load_memop separate to ensure that the compiler | |
1965 | * is able to fold the entire function to a single instruction. | |
1966 | * There is a build-time assert inside to remind you of this. ;-) | |
1967 | */ | |
1968 | if (unlikely(need_swap)) { | |
1969 | return load_memop(haddr, op ^ MO_BSWAP); | |
1970 | } | |
1971 | return load_memop(haddr, op); | |
eed56642 AB |
1972 | } |
1973 | ||
1974 | /* Handle slow unaligned access (it spans two pages or IO). */ | |
1975 | if (size > 1 | |
1976 | && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 | |
1977 | >= TARGET_PAGE_SIZE)) { | |
1978 | target_ulong addr1, addr2; | |
8c79b288 | 1979 | uint64_t r1, r2; |
eed56642 AB |
1980 | unsigned shift; |
1981 | do_unaligned_access: | |
ab7a2009 | 1982 | addr1 = addr & ~((target_ulong)size - 1); |
eed56642 | 1983 | addr2 = addr1 + size; |
2dd92606 RH |
1984 | r1 = full_load(env, addr1, oi, retaddr); |
1985 | r2 = full_load(env, addr2, oi, retaddr); | |
eed56642 AB |
1986 | shift = (addr & (size - 1)) * 8; |
1987 | ||
be5c4787 | 1988 | if (memop_big_endian(op)) { |
eed56642 AB |
1989 | /* Big-endian combine. */ |
1990 | res = (r1 << shift) | (r2 >> ((size * 8) - shift)); | |
1991 | } else { | |
1992 | /* Little-endian combine. */ | |
1993 | res = (r1 >> shift) | (r2 << ((size * 8) - shift)); | |
1994 | } | |
1995 | return res & MAKE_64BIT_MASK(0, size * 8); | |
1996 | } | |
1997 | ||
1998 | haddr = (void *)((uintptr_t)addr + entry->addend); | |
80d9d1c6 | 1999 | return load_memop(haddr, op); |
eed56642 AB |
2000 | } |
2001 | ||
2002 | /* | |
2003 | * For the benefit of TCG generated code, we want to avoid the | |
2004 | * complication of ABI-specific return type promotion and always | |
2005 | * return a value extended to the register size of the host. This is | |
2006 | * tcg_target_long, except in the case of a 32-bit host and 64-bit | |
2007 | * data, and for that we always have uint64_t. | |
2008 | * | |
2009 | * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. | |
2010 | */ | |
2011 | ||
2dd92606 | 2012 | static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, |
9002ffcb | 2013 | MemOpIdx oi, uintptr_t retaddr) |
2dd92606 | 2014 | { |
f83bcecb | 2015 | validate_memop(oi, MO_UB); |
be5c4787 | 2016 | return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); |
2dd92606 RH |
2017 | } |
2018 | ||
fc1bc777 | 2019 | tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, |
9002ffcb | 2020 | MemOpIdx oi, uintptr_t retaddr) |
eed56642 | 2021 | { |
2dd92606 RH |
2022 | return full_ldub_mmu(env, addr, oi, retaddr); |
2023 | } | |
2024 | ||
2025 | static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, | |
9002ffcb | 2026 | MemOpIdx oi, uintptr_t retaddr) |
2dd92606 | 2027 | { |
f83bcecb | 2028 | validate_memop(oi, MO_LEUW); |
be5c4787 | 2029 | return load_helper(env, addr, oi, retaddr, MO_LEUW, false, |
2dd92606 | 2030 | full_le_lduw_mmu); |
eed56642 AB |
2031 | } |
2032 | ||
fc1bc777 | 2033 | tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, |
9002ffcb | 2034 | MemOpIdx oi, uintptr_t retaddr) |
eed56642 | 2035 | { |
2dd92606 RH |
2036 | return full_le_lduw_mmu(env, addr, oi, retaddr); |
2037 | } | |
2038 | ||
2039 | static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, | |
9002ffcb | 2040 | MemOpIdx oi, uintptr_t retaddr) |
2dd92606 | 2041 | { |
f83bcecb | 2042 | validate_memop(oi, MO_BEUW); |
be5c4787 | 2043 | return load_helper(env, addr, oi, retaddr, MO_BEUW, false, |
2dd92606 | 2044 | full_be_lduw_mmu); |
eed56642 AB |
2045 | } |
2046 | ||
fc1bc777 | 2047 | tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, |
9002ffcb | 2048 | MemOpIdx oi, uintptr_t retaddr) |
eed56642 | 2049 | { |
2dd92606 RH |
2050 | return full_be_lduw_mmu(env, addr, oi, retaddr); |
2051 | } | |
2052 | ||
2053 | static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, | |
9002ffcb | 2054 | MemOpIdx oi, uintptr_t retaddr) |
2dd92606 | 2055 | { |
f83bcecb | 2056 | validate_memop(oi, MO_LEUL); |
be5c4787 | 2057 | return load_helper(env, addr, oi, retaddr, MO_LEUL, false, |
2dd92606 | 2058 | full_le_ldul_mmu); |
eed56642 AB |
2059 | } |
2060 | ||
fc1bc777 | 2061 | tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, |
9002ffcb | 2062 | MemOpIdx oi, uintptr_t retaddr) |
eed56642 | 2063 | { |
2dd92606 RH |
2064 | return full_le_ldul_mmu(env, addr, oi, retaddr); |
2065 | } | |
2066 | ||
2067 | static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, | |
9002ffcb | 2068 | MemOpIdx oi, uintptr_t retaddr) |
2dd92606 | 2069 | { |
f83bcecb | 2070 | validate_memop(oi, MO_BEUL); |
be5c4787 | 2071 | return load_helper(env, addr, oi, retaddr, MO_BEUL, false, |
2dd92606 | 2072 | full_be_ldul_mmu); |
eed56642 AB |
2073 | } |
2074 | ||
fc1bc777 | 2075 | tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, |
9002ffcb | 2076 | MemOpIdx oi, uintptr_t retaddr) |
eed56642 | 2077 | { |
2dd92606 | 2078 | return full_be_ldul_mmu(env, addr, oi, retaddr); |
eed56642 AB |
2079 | } |
2080 | ||
fc1bc777 | 2081 | uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, |
9002ffcb | 2082 | MemOpIdx oi, uintptr_t retaddr) |
eed56642 | 2083 | { |
f83bcecb | 2084 | validate_memop(oi, MO_LEQ); |
be5c4787 | 2085 | return load_helper(env, addr, oi, retaddr, MO_LEQ, false, |
2dd92606 | 2086 | helper_le_ldq_mmu); |
eed56642 AB |
2087 | } |
2088 | ||
fc1bc777 | 2089 | uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, |
9002ffcb | 2090 | MemOpIdx oi, uintptr_t retaddr) |
eed56642 | 2091 | { |
f83bcecb | 2092 | validate_memop(oi, MO_BEQ); |
be5c4787 | 2093 | return load_helper(env, addr, oi, retaddr, MO_BEQ, false, |
2dd92606 | 2094 | helper_be_ldq_mmu); |
eed56642 AB |
2095 | } |
2096 | ||
2097 | /* | |
2098 | * Provide signed versions of the load routines as well. We can of course | |
2099 | * avoid this for 64-bit data, or for 32-bit data on 32-bit host. | |
2100 | */ | |
2101 | ||
2102 | ||
2103 | tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, | |
9002ffcb | 2104 | MemOpIdx oi, uintptr_t retaddr) |
eed56642 AB |
2105 | { |
2106 | return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); | |
2107 | } | |
2108 | ||
2109 | tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, | |
9002ffcb | 2110 | MemOpIdx oi, uintptr_t retaddr) |
eed56642 AB |
2111 | { |
2112 | return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); | |
2113 | } | |
2114 | ||
2115 | tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, | |
9002ffcb | 2116 | MemOpIdx oi, uintptr_t retaddr) |
eed56642 AB |
2117 | { |
2118 | return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); | |
2119 | } | |
2120 | ||
2121 | tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, | |
9002ffcb | 2122 | MemOpIdx oi, uintptr_t retaddr) |
eed56642 AB |
2123 | { |
2124 | return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); | |
2125 | } | |
2126 | ||
2127 | tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, | |
9002ffcb | 2128 | MemOpIdx oi, uintptr_t retaddr) |
eed56642 AB |
2129 | { |
2130 | return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); | |
2131 | } | |
2132 | ||
d03f1408 RH |
2133 | /* |
2134 | * Load helpers for cpu_ldst.h. | |
2135 | */ | |
2136 | ||
2137 | static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr, | |
f83bcecb RH |
2138 | MemOpIdx oi, uintptr_t retaddr, |
2139 | FullLoadHelper *full_load) | |
d03f1408 | 2140 | { |
d03f1408 RH |
2141 | uint64_t ret; |
2142 | ||
0583f775 | 2143 | trace_guest_ld_before_exec(env_cpu(env), addr, oi); |
d03f1408 | 2144 | ret = full_load(env, addr, oi, retaddr); |
37aff087 | 2145 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); |
d03f1408 RH |
2146 | return ret; |
2147 | } | |
2148 | ||
f83bcecb | 2149 | uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) |
d03f1408 | 2150 | { |
f83bcecb | 2151 | return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu); |
d03f1408 RH |
2152 | } |
2153 | ||
f83bcecb RH |
2154 | uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, |
2155 | MemOpIdx oi, uintptr_t ra) | |
cfe04a4b | 2156 | { |
f83bcecb | 2157 | return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu); |
cfe04a4b RH |
2158 | } |
2159 | ||
f83bcecb RH |
2160 | uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, |
2161 | MemOpIdx oi, uintptr_t ra) | |
cfe04a4b | 2162 | { |
f83bcecb | 2163 | return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu); |
cfe04a4b RH |
2164 | } |
2165 | ||
f83bcecb RH |
2166 | uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, |
2167 | MemOpIdx oi, uintptr_t ra) | |
b9e60257 | 2168 | { |
f83bcecb | 2169 | return cpu_load_helper(env, addr, oi, MO_BEQ, helper_be_ldq_mmu); |
b9e60257 RH |
2170 | } |
2171 | ||
f83bcecb RH |
2172 | uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, |
2173 | MemOpIdx oi, uintptr_t ra) | |
cfe04a4b | 2174 | { |
f83bcecb | 2175 | return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu); |
cfe04a4b RH |
2176 | } |
2177 | ||
f83bcecb RH |
2178 | uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, |
2179 | MemOpIdx oi, uintptr_t ra) | |
cfe04a4b | 2180 | { |
f83bcecb | 2181 | return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu); |
cfe04a4b RH |
2182 | } |
2183 | ||
f83bcecb RH |
2184 | uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, |
2185 | MemOpIdx oi, uintptr_t ra) | |
cfe04a4b | 2186 | { |
f83bcecb | 2187 | return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu); |
cfe04a4b RH |
2188 | } |
2189 | ||
eed56642 AB |
2190 | /* |
2191 | * Store Helpers | |
2192 | */ | |
2193 | ||
80d9d1c6 RH |
2194 | static inline void QEMU_ALWAYS_INLINE |
2195 | store_memop(void *haddr, uint64_t val, MemOp op) | |
2196 | { | |
2197 | switch (op) { | |
2198 | case MO_UB: | |
2199 | stb_p(haddr, val); | |
2200 | break; | |
2201 | case MO_BEUW: | |
2202 | stw_be_p(haddr, val); | |
2203 | break; | |
2204 | case MO_LEUW: | |
2205 | stw_le_p(haddr, val); | |
2206 | break; | |
2207 | case MO_BEUL: | |
2208 | stl_be_p(haddr, val); | |
2209 | break; | |
2210 | case MO_LEUL: | |
2211 | stl_le_p(haddr, val); | |
2212 | break; | |
2213 | case MO_BEQ: | |
2214 | stq_be_p(haddr, val); | |
2215 | break; | |
2216 | case MO_LEQ: | |
2217 | stq_le_p(haddr, val); | |
2218 | break; | |
2219 | default: | |
2220 | qemu_build_not_reached(); | |
2221 | } | |
2222 | } | |
2223 | ||
f83bcecb RH |
2224 | static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, |
2225 | MemOpIdx oi, uintptr_t retaddr); | |
2226 | ||
6b8b622e RH |
2227 | static void __attribute__((noinline)) |
2228 | store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, | |
2229 | uintptr_t retaddr, size_t size, uintptr_t mmu_idx, | |
2230 | bool big_endian) | |
2231 | { | |
2232 | const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); | |
2233 | uintptr_t index, index2; | |
2234 | CPUTLBEntry *entry, *entry2; | |
2235 | target_ulong page2, tlb_addr, tlb_addr2; | |
9002ffcb | 2236 | MemOpIdx oi; |
6b8b622e RH |
2237 | size_t size2; |
2238 | int i; | |
2239 | ||
2240 | /* | |
2241 | * Ensure the second page is in the TLB. Note that the first page | |
2242 | * is already guaranteed to be filled, and that the second page | |
2243 | * cannot evict the first. | |
2244 | */ | |
2245 | page2 = (addr + size) & TARGET_PAGE_MASK; | |
2246 | size2 = (addr + size) & ~TARGET_PAGE_MASK; | |
2247 | index2 = tlb_index(env, mmu_idx, page2); | |
2248 | entry2 = tlb_entry(env, mmu_idx, page2); | |
2249 | ||
2250 | tlb_addr2 = tlb_addr_write(entry2); | |
2251 | if (!tlb_hit_page(tlb_addr2, page2)) { | |
2252 | if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { | |
2253 | tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, | |
2254 | mmu_idx, retaddr); | |
2255 | index2 = tlb_index(env, mmu_idx, page2); | |
2256 | entry2 = tlb_entry(env, mmu_idx, page2); | |
2257 | } | |
2258 | tlb_addr2 = tlb_addr_write(entry2); | |
2259 | } | |
2260 | ||
2261 | index = tlb_index(env, mmu_idx, addr); | |
2262 | entry = tlb_entry(env, mmu_idx, addr); | |
2263 | tlb_addr = tlb_addr_write(entry); | |
2264 | ||
2265 | /* | |
2266 | * Handle watchpoints. Since this may trap, all checks | |
2267 | * must happen before any store. | |
2268 | */ | |
2269 | if (unlikely(tlb_addr & TLB_WATCHPOINT)) { | |
2270 | cpu_check_watchpoint(env_cpu(env), addr, size - size2, | |
2271 | env_tlb(env)->d[mmu_idx].iotlb[index].attrs, | |
2272 | BP_MEM_WRITE, retaddr); | |
2273 | } | |
2274 | if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { | |
2275 | cpu_check_watchpoint(env_cpu(env), page2, size2, | |
2276 | env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, | |
2277 | BP_MEM_WRITE, retaddr); | |
2278 | } | |
2279 | ||
2280 | /* | |
2281 | * XXX: not efficient, but simple. | |
2282 | * This loop must go in the forward direction to avoid issues | |
2283 | * with self-modifying code in Windows 64-bit. | |
2284 | */ | |
2285 | oi = make_memop_idx(MO_UB, mmu_idx); | |
2286 | if (big_endian) { | |
2287 | for (i = 0; i < size; ++i) { | |
2288 | /* Big-endian extract. */ | |
2289 | uint8_t val8 = val >> (((size - 1) * 8) - (i * 8)); | |
f83bcecb | 2290 | full_stb_mmu(env, addr + i, val8, oi, retaddr); |
6b8b622e RH |
2291 | } |
2292 | } else { | |
2293 | for (i = 0; i < size; ++i) { | |
2294 | /* Little-endian extract. */ | |
2295 | uint8_t val8 = val >> (i * 8); | |
f83bcecb | 2296 | full_stb_mmu(env, addr + i, val8, oi, retaddr); |
6b8b622e RH |
2297 | } |
2298 | } | |
2299 | } | |
2300 | ||
c6b716cd | 2301 | static inline void QEMU_ALWAYS_INLINE |
4601f8d1 | 2302 | store_helper(CPUArchState *env, target_ulong addr, uint64_t val, |
9002ffcb | 2303 | MemOpIdx oi, uintptr_t retaddr, MemOp op) |
eed56642 AB |
2304 | { |
2305 | uintptr_t mmu_idx = get_mmuidx(oi); | |
2306 | uintptr_t index = tlb_index(env, mmu_idx, addr); | |
2307 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); | |
2308 | target_ulong tlb_addr = tlb_addr_write(entry); | |
2309 | const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); | |
2310 | unsigned a_bits = get_alignment_bits(get_memop(oi)); | |
2311 | void *haddr; | |
be5c4787 | 2312 | size_t size = memop_size(op); |
eed56642 AB |
2313 | |
2314 | /* Handle CPU specific unaligned behaviour */ | |
2315 | if (addr & ((1 << a_bits) - 1)) { | |
29a0af61 | 2316 | cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, |
eed56642 AB |
2317 | mmu_idx, retaddr); |
2318 | } | |
2319 | ||
2320 | /* If the TLB entry is for a different page, reload and try again. */ | |
2321 | if (!tlb_hit(tlb_addr, addr)) { | |
2322 | if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, | |
2323 | addr & TARGET_PAGE_MASK)) { | |
29a0af61 | 2324 | tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, |
eed56642 AB |
2325 | mmu_idx, retaddr); |
2326 | index = tlb_index(env, mmu_idx, addr); | |
2327 | entry = tlb_entry(env, mmu_idx, addr); | |
2328 | } | |
2329 | tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; | |
2330 | } | |
2331 | ||
50b107c5 | 2332 | /* Handle anything that isn't just a straight memory access. */ |
eed56642 | 2333 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { |
50b107c5 | 2334 | CPUIOTLBEntry *iotlbentry; |
5b87b3e6 | 2335 | bool need_swap; |
50b107c5 RH |
2336 | |
2337 | /* For anything that is unaligned, recurse through byte stores. */ | |
eed56642 AB |
2338 | if ((addr & (size - 1)) != 0) { |
2339 | goto do_unaligned_access; | |
2340 | } | |
50b107c5 RH |
2341 | |
2342 | iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; | |
2343 | ||
2344 | /* Handle watchpoints. */ | |
2345 | if (unlikely(tlb_addr & TLB_WATCHPOINT)) { | |
2346 | /* On watchpoint hit, this will longjmp out. */ | |
2347 | cpu_check_watchpoint(env_cpu(env), addr, size, | |
2348 | iotlbentry->attrs, BP_MEM_WRITE, retaddr); | |
50b107c5 RH |
2349 | } |
2350 | ||
5b87b3e6 RH |
2351 | need_swap = size > 1 && (tlb_addr & TLB_BSWAP); |
2352 | ||
50b107c5 | 2353 | /* Handle I/O access. */ |
08565552 | 2354 | if (tlb_addr & TLB_MMIO) { |
5b87b3e6 RH |
2355 | io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, |
2356 | op ^ (need_swap * MO_BSWAP)); | |
2357 | return; | |
2358 | } | |
2359 | ||
7b0d792c RH |
2360 | /* Ignore writes to ROM. */ |
2361 | if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) { | |
2362 | return; | |
2363 | } | |
2364 | ||
08565552 RH |
2365 | /* Handle clean RAM pages. */ |
2366 | if (tlb_addr & TLB_NOTDIRTY) { | |
707526ad | 2367 | notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr); |
08565552 RH |
2368 | } |
2369 | ||
707526ad RH |
2370 | haddr = (void *)((uintptr_t)addr + entry->addend); |
2371 | ||
5b87b3e6 RH |
2372 | /* |
2373 | * Keep these two store_memop separate to ensure that the compiler | |
2374 | * is able to fold the entire function to a single instruction. | |
2375 | * There is a build-time assert inside to remind you of this. ;-) | |
2376 | */ | |
2377 | if (unlikely(need_swap)) { | |
2378 | store_memop(haddr, val, op ^ MO_BSWAP); | |
2379 | } else { | |
2380 | store_memop(haddr, val, op); | |
2381 | } | |
eed56642 AB |
2382 | return; |
2383 | } | |
2384 | ||
2385 | /* Handle slow unaligned access (it spans two pages or IO). */ | |
2386 | if (size > 1 | |
2387 | && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 | |
2388 | >= TARGET_PAGE_SIZE)) { | |
eed56642 | 2389 | do_unaligned_access: |
6b8b622e RH |
2390 | store_helper_unaligned(env, addr, val, retaddr, size, |
2391 | mmu_idx, memop_big_endian(op)); | |
eed56642 AB |
2392 | return; |
2393 | } | |
2394 | ||
2395 | haddr = (void *)((uintptr_t)addr + entry->addend); | |
80d9d1c6 | 2396 | store_memop(haddr, val, op); |
eed56642 AB |
2397 | } |
2398 | ||
f83bcecb RH |
2399 | static void __attribute__((noinline)) |
2400 | full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, | |
2401 | MemOpIdx oi, uintptr_t retaddr) | |
eed56642 | 2402 | { |
f83bcecb | 2403 | validate_memop(oi, MO_UB); |
be5c4787 | 2404 | store_helper(env, addr, val, oi, retaddr, MO_UB); |
eed56642 AB |
2405 | } |
2406 | ||
f83bcecb RH |
2407 | void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, |
2408 | MemOpIdx oi, uintptr_t retaddr) | |
2409 | { | |
2410 | full_stb_mmu(env, addr, val, oi, retaddr); | |
2411 | } | |
2412 | ||
2413 | static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, | |
2414 | MemOpIdx oi, uintptr_t retaddr) | |
2415 | { | |
2416 | validate_memop(oi, MO_LEUW); | |
2417 | store_helper(env, addr, val, oi, retaddr, MO_LEUW); | |
2418 | } | |
2419 | ||
fc1bc777 | 2420 | void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, |
9002ffcb | 2421 | MemOpIdx oi, uintptr_t retaddr) |
eed56642 | 2422 | { |
f83bcecb RH |
2423 | full_le_stw_mmu(env, addr, val, oi, retaddr); |
2424 | } | |
2425 | ||
2426 | static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, | |
2427 | MemOpIdx oi, uintptr_t retaddr) | |
2428 | { | |
2429 | validate_memop(oi, MO_BEUW); | |
2430 | store_helper(env, addr, val, oi, retaddr, MO_BEUW); | |
eed56642 AB |
2431 | } |
2432 | ||
fc1bc777 | 2433 | void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, |
9002ffcb | 2434 | MemOpIdx oi, uintptr_t retaddr) |
eed56642 | 2435 | { |
f83bcecb RH |
2436 | full_be_stw_mmu(env, addr, val, oi, retaddr); |
2437 | } | |
2438 | ||
2439 | static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val, | |
2440 | MemOpIdx oi, uintptr_t retaddr) | |
2441 | { | |
2442 | validate_memop(oi, MO_LEUL); | |
2443 | store_helper(env, addr, val, oi, retaddr, MO_LEUL); | |
eed56642 AB |
2444 | } |
2445 | ||
fc1bc777 | 2446 | void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, |
9002ffcb | 2447 | MemOpIdx oi, uintptr_t retaddr) |
eed56642 | 2448 | { |
f83bcecb RH |
2449 | full_le_stl_mmu(env, addr, val, oi, retaddr); |
2450 | } | |
2451 | ||
2452 | static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val, | |
2453 | MemOpIdx oi, uintptr_t retaddr) | |
2454 | { | |
2455 | validate_memop(oi, MO_BEUL); | |
2456 | store_helper(env, addr, val, oi, retaddr, MO_BEUL); | |
eed56642 AB |
2457 | } |
2458 | ||
fc1bc777 | 2459 | void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, |
9002ffcb | 2460 | MemOpIdx oi, uintptr_t retaddr) |
eed56642 | 2461 | { |
f83bcecb | 2462 | full_be_stl_mmu(env, addr, val, oi, retaddr); |
eed56642 AB |
2463 | } |
2464 | ||
fc1bc777 | 2465 | void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, |
9002ffcb | 2466 | MemOpIdx oi, uintptr_t retaddr) |
eed56642 | 2467 | { |
f83bcecb | 2468 | validate_memop(oi, MO_LEQ); |
be5c4787 | 2469 | store_helper(env, addr, val, oi, retaddr, MO_LEQ); |
eed56642 AB |
2470 | } |
2471 | ||
fc1bc777 | 2472 | void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, |
9002ffcb | 2473 | MemOpIdx oi, uintptr_t retaddr) |
eed56642 | 2474 | { |
f83bcecb | 2475 | validate_memop(oi, MO_BEQ); |
be5c4787 | 2476 | store_helper(env, addr, val, oi, retaddr, MO_BEQ); |
eed56642 | 2477 | } |
0f590e74 | 2478 | |
d03f1408 RH |
2479 | /* |
2480 | * Store Helpers for cpu_ldst.h | |
2481 | */ | |
2482 | ||
f83bcecb RH |
2483 | typedef void FullStoreHelper(CPUArchState *env, target_ulong addr, |
2484 | uint64_t val, MemOpIdx oi, uintptr_t retaddr); | |
d03f1408 | 2485 | |
f83bcecb RH |
2486 | static inline void cpu_store_helper(CPUArchState *env, target_ulong addr, |
2487 | uint64_t val, MemOpIdx oi, uintptr_t ra, | |
2488 | FullStoreHelper *full_store) | |
2489 | { | |
0583f775 | 2490 | trace_guest_st_before_exec(env_cpu(env), addr, oi); |
f83bcecb | 2491 | full_store(env, addr, val, oi, ra); |
37aff087 | 2492 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); |
d03f1408 RH |
2493 | } |
2494 | ||
f83bcecb RH |
2495 | void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, |
2496 | MemOpIdx oi, uintptr_t retaddr) | |
d03f1408 | 2497 | { |
f83bcecb | 2498 | cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu); |
d03f1408 RH |
2499 | } |
2500 | ||
f83bcecb RH |
2501 | void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val, |
2502 | MemOpIdx oi, uintptr_t retaddr) | |
d03f1408 | 2503 | { |
f83bcecb | 2504 | cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu); |
d03f1408 RH |
2505 | } |
2506 | ||
f83bcecb RH |
2507 | void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val, |
2508 | MemOpIdx oi, uintptr_t retaddr) | |
d03f1408 | 2509 | { |
f83bcecb | 2510 | cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu); |
d03f1408 RH |
2511 | } |
2512 | ||
f83bcecb RH |
2513 | void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val, |
2514 | MemOpIdx oi, uintptr_t retaddr) | |
b9e60257 | 2515 | { |
f83bcecb | 2516 | cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu); |
b9e60257 RH |
2517 | } |
2518 | ||
f83bcecb RH |
2519 | void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val, |
2520 | MemOpIdx oi, uintptr_t retaddr) | |
d03f1408 | 2521 | { |
f83bcecb | 2522 | cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu); |
b9e60257 RH |
2523 | } |
2524 | ||
f83bcecb RH |
2525 | void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val, |
2526 | MemOpIdx oi, uintptr_t retaddr) | |
b9e60257 | 2527 | { |
f83bcecb | 2528 | cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu); |
b9e60257 RH |
2529 | } |
2530 | ||
f83bcecb RH |
2531 | void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val, |
2532 | MemOpIdx oi, uintptr_t retaddr) | |
b9e60257 | 2533 | { |
f83bcecb | 2534 | cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu); |
d03f1408 RH |
2535 | } |
2536 | ||
f83bcecb | 2537 | #include "ldst_common.c.inc" |
cfe04a4b | 2538 | |
be9568b4 RH |
2539 | /* |
2540 | * First set of functions passes in OI and RETADDR. | |
2541 | * This makes them callable from other helpers. | |
2542 | */ | |
c482cb11 | 2543 | |
c482cb11 | 2544 | #define ATOMIC_NAME(X) \ |
be9568b4 | 2545 | glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) |
a754f7f3 | 2546 | |
707526ad | 2547 | #define ATOMIC_MMU_CLEANUP |
504f73f7 | 2548 | #define ATOMIC_MMU_IDX get_mmuidx(oi) |
c482cb11 | 2549 | |
139c1837 | 2550 | #include "atomic_common.c.inc" |
c482cb11 RH |
2551 | |
2552 | #define DATA_SIZE 1 | |
2553 | #include "atomic_template.h" | |
2554 | ||
2555 | #define DATA_SIZE 2 | |
2556 | #include "atomic_template.h" | |
2557 | ||
2558 | #define DATA_SIZE 4 | |
2559 | #include "atomic_template.h" | |
2560 | ||
df79b996 | 2561 | #ifdef CONFIG_ATOMIC64 |
c482cb11 RH |
2562 | #define DATA_SIZE 8 |
2563 | #include "atomic_template.h" | |
df79b996 | 2564 | #endif |
c482cb11 | 2565 | |
e6cd4bb5 | 2566 | #if HAVE_CMPXCHG128 || HAVE_ATOMIC128 |
7ebee43e RH |
2567 | #define DATA_SIZE 16 |
2568 | #include "atomic_template.h" | |
2569 | #endif | |
2570 | ||
c482cb11 RH |
2571 | /* Code access functions. */ |
2572 | ||
fc4120a3 | 2573 | static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, |
9002ffcb | 2574 | MemOpIdx oi, uintptr_t retaddr) |
2dd92606 | 2575 | { |
fc4120a3 | 2576 | return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code); |
2dd92606 RH |
2577 | } |
2578 | ||
fc4120a3 | 2579 | uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) |
eed56642 | 2580 | { |
9002ffcb | 2581 | MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); |
fc4120a3 | 2582 | return full_ldub_code(env, addr, oi, 0); |
2dd92606 RH |
2583 | } |
2584 | ||
fc4120a3 | 2585 | static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr, |
9002ffcb | 2586 | MemOpIdx oi, uintptr_t retaddr) |
2dd92606 | 2587 | { |
fc4120a3 | 2588 | return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code); |
eed56642 | 2589 | } |
0cac1b66 | 2590 | |
fc4120a3 | 2591 | uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) |
eed56642 | 2592 | { |
9002ffcb | 2593 | MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); |
fc4120a3 | 2594 | return full_lduw_code(env, addr, oi, 0); |
2dd92606 RH |
2595 | } |
2596 | ||
fc4120a3 | 2597 | static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr, |
9002ffcb | 2598 | MemOpIdx oi, uintptr_t retaddr) |
2dd92606 | 2599 | { |
fc4120a3 | 2600 | return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code); |
eed56642 | 2601 | } |
0cac1b66 | 2602 | |
fc4120a3 | 2603 | uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) |
eed56642 | 2604 | { |
9002ffcb | 2605 | MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); |
fc4120a3 | 2606 | return full_ldl_code(env, addr, oi, 0); |
eed56642 AB |
2607 | } |
2608 | ||
fc4120a3 | 2609 | static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr, |
9002ffcb | 2610 | MemOpIdx oi, uintptr_t retaddr) |
eed56642 | 2611 | { |
fc4120a3 | 2612 | return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code); |
eed56642 AB |
2613 | } |
2614 | ||
fc4120a3 | 2615 | uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) |
eed56642 | 2616 | { |
9002ffcb | 2617 | MemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true)); |
fc4120a3 | 2618 | return full_ldq_code(env, addr, oi, 0); |
eed56642 | 2619 | } |