]> Git Repo - qemu.git/blame - exec.c
exec: separate current memory map from the one being built
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
0d09e41a 34#include "hw/xen/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
db7b5426 53//#define DEBUG_SUBPAGE
1196be37 54
e2eef170 55#if !defined(CONFIG_USER_ONLY)
9fa3e853 56int phys_ram_fd;
74576198 57static int in_migration;
94a6b54f 58
a3161038 59RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
60
61static MemoryRegion *system_memory;
309cb471 62static MemoryRegion *system_io;
62152b8a 63
f6790af6
AK
64AddressSpace address_space_io;
65AddressSpace address_space_memory;
2673a5da 66
0844e007 67MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 68static MemoryRegion io_mem_unassigned;
0e0df1e2 69
e2eef170 70#endif
9fa3e853 71
9349b4f9 72CPUArchState *first_cpu;
6a00d601
FB
73/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
9349b4f9 75DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 76/* 0 = Do not count executed instructions.
bf20dc07 77 1 = Precise instruction counting.
2e70f6ef 78 2 = Adaptive rate instruction counting. */
5708fc66 79int use_icount;
6a00d601 80
e2eef170 81#if !defined(CONFIG_USER_ONLY)
4346ae3e 82
1db8abb1
PB
83typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
86 uint16_t is_leaf : 1;
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88 uint16_t ptr : 15;
89};
90
91struct AddressSpaceDispatch {
92 /* This is a multi-level map on the physical address space.
93 * The bottom level has pointers to MemoryRegionSections.
94 */
95 PhysPageEntry phys_map;
96 MemoryListener listener;
acc9d80b 97 AddressSpace *as;
1db8abb1
PB
98};
99
90260c6c
JK
100#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
101typedef struct subpage_t {
102 MemoryRegion iomem;
acc9d80b 103 AddressSpace *as;
90260c6c
JK
104 hwaddr base;
105 uint16_t sub_section[TARGET_PAGE_SIZE];
106} subpage_t;
107
b41aac4f
LPF
108#define PHYS_SECTION_UNASSIGNED 0
109#define PHYS_SECTION_NOTDIRTY 1
110#define PHYS_SECTION_ROM 2
111#define PHYS_SECTION_WATCH 3
5312bd8b 112
9affd6fc
PB
113typedef PhysPageEntry Node[L2_SIZE];
114
115typedef struct PhysPageMap {
116 unsigned sections_nb;
117 unsigned sections_nb_alloc;
118 unsigned nodes_nb;
119 unsigned nodes_nb_alloc;
120 Node *nodes;
121 MemoryRegionSection *sections;
122} PhysPageMap;
123
124static PhysPageMap cur_map;
125static PhysPageMap next_map;
d6f2ea22 126
07f07b31 127#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 128
e2eef170 129static void io_mem_init(void);
62152b8a 130static void memory_map_init(void);
8b9c99d9 131static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 132
1ec9b909 133static MemoryRegion io_mem_watch;
6658ffb8 134#endif
fd6ce8f6 135
6d9a1304 136#if !defined(CONFIG_USER_ONLY)
d6f2ea22 137
f7bf5461 138static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 139{
9affd6fc
PB
140 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
141 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
142 16);
143 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
144 next_map.nodes_nb + nodes);
145 next_map.nodes = g_renew(Node, next_map.nodes,
146 next_map.nodes_nb_alloc);
d6f2ea22 147 }
f7bf5461
AK
148}
149
150static uint16_t phys_map_node_alloc(void)
151{
152 unsigned i;
153 uint16_t ret;
154
9affd6fc 155 ret = next_map.nodes_nb++;
f7bf5461 156 assert(ret != PHYS_MAP_NODE_NIL);
9affd6fc 157 assert(ret != next_map.nodes_nb_alloc);
d6f2ea22 158 for (i = 0; i < L2_SIZE; ++i) {
9affd6fc
PB
159 next_map.nodes[ret][i].is_leaf = 0;
160 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 161 }
f7bf5461 162 return ret;
d6f2ea22
AK
163}
164
a8170e5e
AK
165static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
166 hwaddr *nb, uint16_t leaf,
2999097b 167 int level)
f7bf5461
AK
168{
169 PhysPageEntry *p;
170 int i;
a8170e5e 171 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 172
07f07b31 173 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800 174 lp->ptr = phys_map_node_alloc();
9affd6fc 175 p = next_map.nodes[lp->ptr];
f7bf5461
AK
176 if (level == 0) {
177 for (i = 0; i < L2_SIZE; i++) {
07f07b31 178 p[i].is_leaf = 1;
b41aac4f 179 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 180 }
67c4d23c 181 }
f7bf5461 182 } else {
9affd6fc 183 p = next_map.nodes[lp->ptr];
92e873b9 184 }
2999097b 185 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 186
2999097b 187 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
188 if ((*index & (step - 1)) == 0 && *nb >= step) {
189 lp->is_leaf = true;
c19e8800 190 lp->ptr = leaf;
07f07b31
AK
191 *index += step;
192 *nb -= step;
2999097b
AK
193 } else {
194 phys_page_set_level(lp, index, nb, leaf, level - 1);
195 }
196 ++lp;
f7bf5461
AK
197 }
198}
199
ac1970fb 200static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 201 hwaddr index, hwaddr nb,
2999097b 202 uint16_t leaf)
f7bf5461 203{
2999097b 204 /* Wildly overreserve - it doesn't matter much. */
07f07b31 205 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 206
ac1970fb 207 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
208}
209
9affd6fc
PB
210static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
211 Node *nodes, MemoryRegionSection *sections)
92e873b9 212{
31ab2b4a
AK
213 PhysPageEntry *p;
214 int i;
f1f6e3b8 215
07f07b31 216 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 217 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 218 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 219 }
9affd6fc 220 p = nodes[lp.ptr];
31ab2b4a 221 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 222 }
9affd6fc 223 return &sections[lp.ptr];
f3705d53
AK
224}
225
e5548617
BS
226bool memory_region_is_unassigned(MemoryRegion *mr)
227{
2a8e7499 228 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 229 && mr != &io_mem_watch;
fd6ce8f6 230}
149f54b5 231
9f029603 232static MemoryRegionSection *address_space_lookup_region(AddressSpace *as,
90260c6c
JK
233 hwaddr addr,
234 bool resolve_subpage)
9f029603 235{
90260c6c
JK
236 MemoryRegionSection *section;
237 subpage_t *subpage;
238
9affd6fc
PB
239 section = phys_page_find(as->dispatch->phys_map, addr >> TARGET_PAGE_BITS,
240 cur_map.nodes, cur_map.sections);
90260c6c
JK
241 if (resolve_subpage && section->mr->subpage) {
242 subpage = container_of(section->mr, subpage_t, iomem);
9affd6fc 243 section = &cur_map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
244 }
245 return section;
9f029603
JK
246}
247
90260c6c
JK
248static MemoryRegionSection *
249address_space_translate_internal(AddressSpace *as, hwaddr addr, hwaddr *xlat,
250 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
251{
252 MemoryRegionSection *section;
253 Int128 diff;
254
90260c6c 255 section = address_space_lookup_region(as, addr, resolve_subpage);
149f54b5
PB
256 /* Compute offset within MemoryRegionSection */
257 addr -= section->offset_within_address_space;
258
259 /* Compute offset within MemoryRegion */
260 *xlat = addr + section->offset_within_region;
261
262 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 263 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
264 return section;
265}
90260c6c 266
5c8a00ce
PB
267MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
268 hwaddr *xlat, hwaddr *plen,
269 bool is_write)
90260c6c 270{
30951157
AK
271 IOMMUTLBEntry iotlb;
272 MemoryRegionSection *section;
273 MemoryRegion *mr;
274 hwaddr len = *plen;
275
276 for (;;) {
277 section = address_space_translate_internal(as, addr, &addr, plen, true);
278 mr = section->mr;
279
280 if (!mr->iommu_ops) {
281 break;
282 }
283
284 iotlb = mr->iommu_ops->translate(mr, addr);
285 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
286 | (addr & iotlb.addr_mask));
287 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
288 if (!(iotlb.perm & (1 << is_write))) {
289 mr = &io_mem_unassigned;
290 break;
291 }
292
293 as = iotlb.target_as;
294 }
295
296 *plen = len;
297 *xlat = addr;
298 return mr;
90260c6c
JK
299}
300
301MemoryRegionSection *
302address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
303 hwaddr *plen)
304{
30951157
AK
305 MemoryRegionSection *section;
306 section = address_space_translate_internal(as, addr, xlat, plen, false);
307
308 assert(!section->mr->iommu_ops);
309 return section;
90260c6c 310}
5b6dd868 311#endif
fd6ce8f6 312
5b6dd868 313void cpu_exec_init_all(void)
fdbb84d1 314{
5b6dd868 315#if !defined(CONFIG_USER_ONLY)
b2a8658e 316 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
317 memory_map_init();
318 io_mem_init();
fdbb84d1 319#endif
5b6dd868 320}
fdbb84d1 321
b170fce3 322#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
323
324static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 325{
259186a7 326 CPUState *cpu = opaque;
a513fe19 327
5b6dd868
BS
328 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
329 version_id is increased. */
259186a7
AF
330 cpu->interrupt_request &= ~0x01;
331 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
332
333 return 0;
a513fe19 334}
7501267e 335
1a1562f5 336const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
337 .name = "cpu_common",
338 .version_id = 1,
339 .minimum_version_id = 1,
340 .minimum_version_id_old = 1,
341 .post_load = cpu_common_post_load,
342 .fields = (VMStateField []) {
259186a7
AF
343 VMSTATE_UINT32(halted, CPUState),
344 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
345 VMSTATE_END_OF_LIST()
346 }
347};
1a1562f5 348
5b6dd868 349#endif
ea041c0e 350
38d8f5c8 351CPUState *qemu_get_cpu(int index)
ea041c0e 352{
5b6dd868 353 CPUArchState *env = first_cpu;
38d8f5c8 354 CPUState *cpu = NULL;
ea041c0e 355
5b6dd868 356 while (env) {
55e5c285
AF
357 cpu = ENV_GET_CPU(env);
358 if (cpu->cpu_index == index) {
5b6dd868 359 break;
55e5c285 360 }
5b6dd868 361 env = env->next_cpu;
ea041c0e 362 }
5b6dd868 363
d76fddae 364 return env ? cpu : NULL;
ea041c0e
FB
365}
366
d6b9e0d6
MT
367void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
368{
369 CPUArchState *env = first_cpu;
370
371 while (env) {
372 func(ENV_GET_CPU(env), data);
373 env = env->next_cpu;
374 }
375}
376
5b6dd868 377void cpu_exec_init(CPUArchState *env)
ea041c0e 378{
5b6dd868 379 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 380 CPUClass *cc = CPU_GET_CLASS(cpu);
5b6dd868
BS
381 CPUArchState **penv;
382 int cpu_index;
383
384#if defined(CONFIG_USER_ONLY)
385 cpu_list_lock();
386#endif
387 env->next_cpu = NULL;
388 penv = &first_cpu;
389 cpu_index = 0;
390 while (*penv != NULL) {
391 penv = &(*penv)->next_cpu;
392 cpu_index++;
393 }
55e5c285 394 cpu->cpu_index = cpu_index;
1b1ed8dc 395 cpu->numa_node = 0;
5b6dd868
BS
396 QTAILQ_INIT(&env->breakpoints);
397 QTAILQ_INIT(&env->watchpoints);
398#ifndef CONFIG_USER_ONLY
399 cpu->thread_id = qemu_get_thread_id();
400#endif
401 *penv = env;
402#if defined(CONFIG_USER_ONLY)
403 cpu_list_unlock();
404#endif
259186a7 405 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
5b6dd868 406#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
407 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
408 cpu_save, cpu_load, env);
b170fce3 409 assert(cc->vmsd == NULL);
5b6dd868 410#endif
b170fce3
AF
411 if (cc->vmsd != NULL) {
412 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
413 }
ea041c0e
FB
414}
415
1fddef4b 416#if defined(TARGET_HAS_ICE)
94df27fd 417#if defined(CONFIG_USER_ONLY)
9349b4f9 418static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
419{
420 tb_invalidate_phys_page_range(pc, pc + 1, 0);
421}
422#else
1e7855a5
MF
423static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
424{
9d70c4b7
MF
425 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
426 (pc & ~TARGET_PAGE_MASK));
1e7855a5 427}
c27004ec 428#endif
94df27fd 429#endif /* TARGET_HAS_ICE */
d720b93d 430
c527ee8f 431#if defined(CONFIG_USER_ONLY)
9349b4f9 432void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
433
434{
435}
436
9349b4f9 437int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
438 int flags, CPUWatchpoint **watchpoint)
439{
440 return -ENOSYS;
441}
442#else
6658ffb8 443/* Add a watchpoint. */
9349b4f9 444int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 445 int flags, CPUWatchpoint **watchpoint)
6658ffb8 446{
b4051334 447 target_ulong len_mask = ~(len - 1);
c0ce998e 448 CPUWatchpoint *wp;
6658ffb8 449
b4051334 450 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
451 if ((len & (len - 1)) || (addr & ~len_mask) ||
452 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
453 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
454 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
455 return -EINVAL;
456 }
7267c094 457 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
458
459 wp->vaddr = addr;
b4051334 460 wp->len_mask = len_mask;
a1d1bb31
AL
461 wp->flags = flags;
462
2dc9f411 463 /* keep all GDB-injected watchpoints in front */
c0ce998e 464 if (flags & BP_GDB)
72cf2d4f 465 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 466 else
72cf2d4f 467 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 468
6658ffb8 469 tlb_flush_page(env, addr);
a1d1bb31
AL
470
471 if (watchpoint)
472 *watchpoint = wp;
473 return 0;
6658ffb8
PB
474}
475
a1d1bb31 476/* Remove a specific watchpoint. */
9349b4f9 477int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 478 int flags)
6658ffb8 479{
b4051334 480 target_ulong len_mask = ~(len - 1);
a1d1bb31 481 CPUWatchpoint *wp;
6658ffb8 482
72cf2d4f 483 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 484 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 485 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 486 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
487 return 0;
488 }
489 }
a1d1bb31 490 return -ENOENT;
6658ffb8
PB
491}
492
a1d1bb31 493/* Remove a specific watchpoint by reference. */
9349b4f9 494void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 495{
72cf2d4f 496 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 497
a1d1bb31
AL
498 tlb_flush_page(env, watchpoint->vaddr);
499
7267c094 500 g_free(watchpoint);
a1d1bb31
AL
501}
502
503/* Remove all matching watchpoints. */
9349b4f9 504void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 505{
c0ce998e 506 CPUWatchpoint *wp, *next;
a1d1bb31 507
72cf2d4f 508 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
509 if (wp->flags & mask)
510 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 511 }
7d03f82f 512}
c527ee8f 513#endif
7d03f82f 514
a1d1bb31 515/* Add a breakpoint. */
9349b4f9 516int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 517 CPUBreakpoint **breakpoint)
4c3a88a2 518{
1fddef4b 519#if defined(TARGET_HAS_ICE)
c0ce998e 520 CPUBreakpoint *bp;
3b46e624 521
7267c094 522 bp = g_malloc(sizeof(*bp));
4c3a88a2 523
a1d1bb31
AL
524 bp->pc = pc;
525 bp->flags = flags;
526
2dc9f411 527 /* keep all GDB-injected breakpoints in front */
c0ce998e 528 if (flags & BP_GDB)
72cf2d4f 529 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 530 else
72cf2d4f 531 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 532
d720b93d 533 breakpoint_invalidate(env, pc);
a1d1bb31
AL
534
535 if (breakpoint)
536 *breakpoint = bp;
4c3a88a2
FB
537 return 0;
538#else
a1d1bb31 539 return -ENOSYS;
4c3a88a2
FB
540#endif
541}
542
a1d1bb31 543/* Remove a specific breakpoint. */
9349b4f9 544int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 545{
7d03f82f 546#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
547 CPUBreakpoint *bp;
548
72cf2d4f 549 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
550 if (bp->pc == pc && bp->flags == flags) {
551 cpu_breakpoint_remove_by_ref(env, bp);
552 return 0;
553 }
7d03f82f 554 }
a1d1bb31
AL
555 return -ENOENT;
556#else
557 return -ENOSYS;
7d03f82f
EI
558#endif
559}
560
a1d1bb31 561/* Remove a specific breakpoint by reference. */
9349b4f9 562void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 563{
1fddef4b 564#if defined(TARGET_HAS_ICE)
72cf2d4f 565 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 566
a1d1bb31
AL
567 breakpoint_invalidate(env, breakpoint->pc);
568
7267c094 569 g_free(breakpoint);
a1d1bb31
AL
570#endif
571}
572
573/* Remove all matching breakpoints. */
9349b4f9 574void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
575{
576#if defined(TARGET_HAS_ICE)
c0ce998e 577 CPUBreakpoint *bp, *next;
a1d1bb31 578
72cf2d4f 579 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
580 if (bp->flags & mask)
581 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 582 }
4c3a88a2
FB
583#endif
584}
585
c33a346e
FB
586/* enable or disable single step mode. EXCP_DEBUG is returned by the
587 CPU loop after each instruction */
9349b4f9 588void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 589{
1fddef4b 590#if defined(TARGET_HAS_ICE)
c33a346e
FB
591 if (env->singlestep_enabled != enabled) {
592 env->singlestep_enabled = enabled;
e22a25c9
AL
593 if (kvm_enabled())
594 kvm_update_guest_debug(env, 0);
595 else {
ccbb4d44 596 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
597 /* XXX: only flush what is necessary */
598 tb_flush(env);
599 }
c33a346e
FB
600 }
601#endif
602}
603
9349b4f9 604void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e 605{
878096ee 606 CPUState *cpu = ENV_GET_CPU(env);
7501267e 607 va_list ap;
493ae1f0 608 va_list ap2;
7501267e
FB
609
610 va_start(ap, fmt);
493ae1f0 611 va_copy(ap2, ap);
7501267e
FB
612 fprintf(stderr, "qemu: fatal: ");
613 vfprintf(stderr, fmt, ap);
614 fprintf(stderr, "\n");
878096ee 615 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
616 if (qemu_log_enabled()) {
617 qemu_log("qemu: fatal: ");
618 qemu_log_vprintf(fmt, ap2);
619 qemu_log("\n");
6fd2a026 620 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 621 qemu_log_flush();
93fcfe39 622 qemu_log_close();
924edcae 623 }
493ae1f0 624 va_end(ap2);
f9373291 625 va_end(ap);
fd052bf6
RV
626#if defined(CONFIG_USER_ONLY)
627 {
628 struct sigaction act;
629 sigfillset(&act.sa_mask);
630 act.sa_handler = SIG_DFL;
631 sigaction(SIGABRT, &act, NULL);
632 }
633#endif
7501267e
FB
634 abort();
635}
636
9349b4f9 637CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 638{
9349b4f9
AF
639 CPUArchState *new_env = cpu_init(env->cpu_model_str);
640 CPUArchState *next_cpu = new_env->next_cpu;
5a38f081
AL
641#if defined(TARGET_HAS_ICE)
642 CPUBreakpoint *bp;
643 CPUWatchpoint *wp;
644#endif
645
9349b4f9 646 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 647
55e5c285 648 /* Preserve chaining. */
c5be9f08 649 new_env->next_cpu = next_cpu;
5a38f081
AL
650
651 /* Clone all break/watchpoints.
652 Note: Once we support ptrace with hw-debug register access, make sure
653 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
654 QTAILQ_INIT(&env->breakpoints);
655 QTAILQ_INIT(&env->watchpoints);
5a38f081 656#if defined(TARGET_HAS_ICE)
72cf2d4f 657 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
658 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
659 }
72cf2d4f 660 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
661 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
662 wp->flags, NULL);
663 }
664#endif
665
c5be9f08
TS
666 return new_env;
667}
668
0124311e 669#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
670static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
671 uintptr_t length)
672{
673 uintptr_t start1;
674
675 /* we modify the TLB cache so that the dirty bit will be set again
676 when accessing the range */
677 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
678 /* Check that we don't span multiple blocks - this breaks the
679 address comparisons below. */
680 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
681 != (end - 1) - start) {
682 abort();
683 }
684 cpu_tlb_reset_dirty_all(start1, length);
685
686}
687
5579c7f3 688/* Note: start and end must be within the same ram block. */
c227f099 689void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 690 int dirty_flags)
1ccde1cb 691{
d24981d3 692 uintptr_t length;
1ccde1cb
FB
693
694 start &= TARGET_PAGE_MASK;
695 end = TARGET_PAGE_ALIGN(end);
696
697 length = end - start;
698 if (length == 0)
699 return;
f7c11b53 700 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 701
d24981d3
JQ
702 if (tcg_enabled()) {
703 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 704 }
1ccde1cb
FB
705}
706
8b9c99d9 707static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 708{
f6f3fbca 709 int ret = 0;
74576198 710 in_migration = enable;
f6f3fbca 711 return ret;
74576198
AL
712}
713
a8170e5e 714hwaddr memory_region_section_get_iotlb(CPUArchState *env,
149f54b5
PB
715 MemoryRegionSection *section,
716 target_ulong vaddr,
717 hwaddr paddr, hwaddr xlat,
718 int prot,
719 target_ulong *address)
e5548617 720{
a8170e5e 721 hwaddr iotlb;
e5548617
BS
722 CPUWatchpoint *wp;
723
cc5bea60 724 if (memory_region_is_ram(section->mr)) {
e5548617
BS
725 /* Normal RAM. */
726 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 727 + xlat;
e5548617 728 if (!section->readonly) {
b41aac4f 729 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 730 } else {
b41aac4f 731 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
732 }
733 } else {
9affd6fc 734 iotlb = section - cur_map.sections;
149f54b5 735 iotlb += xlat;
e5548617
BS
736 }
737
738 /* Make accesses to pages with watchpoints go via the
739 watchpoint trap routines. */
740 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
741 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
742 /* Avoid trapping reads of pages with a write breakpoint. */
743 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 744 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
745 *address |= TLB_MMIO;
746 break;
747 }
748 }
749 }
750
751 return iotlb;
752}
9fa3e853
FB
753#endif /* defined(CONFIG_USER_ONLY) */
754
e2eef170 755#if !defined(CONFIG_USER_ONLY)
8da3ff18 756
c227f099 757static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 758 uint16_t section);
acc9d80b 759static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 760
5312bd8b
AK
761static uint16_t phys_section_add(MemoryRegionSection *section)
762{
68f3f65b
PB
763 /* The physical section number is ORed with a page-aligned
764 * pointer to produce the iotlb entries. Thus it should
765 * never overflow into the page-aligned value.
766 */
9affd6fc 767 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
68f3f65b 768
9affd6fc
PB
769 if (next_map.sections_nb == next_map.sections_nb_alloc) {
770 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
771 16);
772 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
773 next_map.sections_nb_alloc);
5312bd8b 774 }
9affd6fc 775 next_map.sections[next_map.sections_nb] = *section;
dfde4e6e 776 memory_region_ref(section->mr);
9affd6fc 777 return next_map.sections_nb++;
5312bd8b
AK
778}
779
058bc4b5
PB
780static void phys_section_destroy(MemoryRegion *mr)
781{
dfde4e6e
PB
782 memory_region_unref(mr);
783
058bc4b5
PB
784 if (mr->subpage) {
785 subpage_t *subpage = container_of(mr, subpage_t, iomem);
786 memory_region_destroy(&subpage->iomem);
787 g_free(subpage);
788 }
789}
790
9affd6fc 791static void phys_sections_clear(PhysPageMap *map)
5312bd8b 792{
9affd6fc
PB
793 while (map->sections_nb > 0) {
794 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
795 phys_section_destroy(section->mr);
796 }
9affd6fc
PB
797 g_free(map->sections);
798 g_free(map->nodes);
5312bd8b
AK
799}
800
ac1970fb 801static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
802{
803 subpage_t *subpage;
a8170e5e 804 hwaddr base = section->offset_within_address_space
0f0cb164 805 & TARGET_PAGE_MASK;
9affd6fc
PB
806 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
807 next_map.nodes, next_map.sections);
0f0cb164
AK
808 MemoryRegionSection subsection = {
809 .offset_within_address_space = base,
052e87b0 810 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 811 };
a8170e5e 812 hwaddr start, end;
0f0cb164 813
f3705d53 814 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 815
f3705d53 816 if (!(existing->mr->subpage)) {
acc9d80b 817 subpage = subpage_init(d->as, base);
0f0cb164 818 subsection.mr = &subpage->iomem;
ac1970fb 819 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 820 phys_section_add(&subsection));
0f0cb164 821 } else {
f3705d53 822 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
823 }
824 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 825 end = start + int128_get64(section->size) - 1;
0f0cb164
AK
826 subpage_register(subpage, start, end, phys_section_add(section));
827}
828
829
052e87b0
PB
830static void register_multipage(AddressSpaceDispatch *d,
831 MemoryRegionSection *section)
33417e70 832{
a8170e5e 833 hwaddr start_addr = section->offset_within_address_space;
5312bd8b 834 uint16_t section_index = phys_section_add(section);
052e87b0
PB
835 uint64_t num_pages = int128_get64(int128_rshift(section->size,
836 TARGET_PAGE_BITS));
dd81124b 837
733d5ef5
PB
838 assert(num_pages);
839 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
840}
841
ac1970fb 842static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 843{
ac1970fb 844 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
99b9cc06 845 MemoryRegionSection now = *section, remain = *section;
052e87b0 846 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 847
733d5ef5
PB
848 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
849 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
850 - now.offset_within_address_space;
851
052e87b0 852 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 853 register_subpage(d, &now);
733d5ef5 854 } else {
052e87b0 855 now.size = int128_zero();
733d5ef5 856 }
052e87b0
PB
857 while (int128_ne(remain.size, now.size)) {
858 remain.size = int128_sub(remain.size, now.size);
859 remain.offset_within_address_space += int128_get64(now.size);
860 remain.offset_within_region += int128_get64(now.size);
69b67646 861 now = remain;
052e87b0 862 if (int128_lt(remain.size, page_size)) {
733d5ef5
PB
863 register_subpage(d, &now);
864 } else if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
052e87b0 865 now.size = page_size;
ac1970fb 866 register_subpage(d, &now);
69b67646 867 } else {
052e87b0 868 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 869 register_multipage(d, &now);
69b67646 870 }
0f0cb164
AK
871 }
872}
873
62a2744c
SY
874void qemu_flush_coalesced_mmio_buffer(void)
875{
876 if (kvm_enabled())
877 kvm_flush_coalesced_mmio_buffer();
878}
879
b2a8658e
UD
880void qemu_mutex_lock_ramlist(void)
881{
882 qemu_mutex_lock(&ram_list.mutex);
883}
884
885void qemu_mutex_unlock_ramlist(void)
886{
887 qemu_mutex_unlock(&ram_list.mutex);
888}
889
c902760f
MT
890#if defined(__linux__) && !defined(TARGET_S390X)
891
892#include <sys/vfs.h>
893
894#define HUGETLBFS_MAGIC 0x958458f6
895
896static long gethugepagesize(const char *path)
897{
898 struct statfs fs;
899 int ret;
900
901 do {
9742bf26 902 ret = statfs(path, &fs);
c902760f
MT
903 } while (ret != 0 && errno == EINTR);
904
905 if (ret != 0) {
9742bf26
YT
906 perror(path);
907 return 0;
c902760f
MT
908 }
909
910 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 911 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
912
913 return fs.f_bsize;
914}
915
04b16653
AW
916static void *file_ram_alloc(RAMBlock *block,
917 ram_addr_t memory,
918 const char *path)
c902760f
MT
919{
920 char *filename;
8ca761f6
PF
921 char *sanitized_name;
922 char *c;
c902760f
MT
923 void *area;
924 int fd;
925#ifdef MAP_POPULATE
926 int flags;
927#endif
928 unsigned long hpagesize;
929
930 hpagesize = gethugepagesize(path);
931 if (!hpagesize) {
9742bf26 932 return NULL;
c902760f
MT
933 }
934
935 if (memory < hpagesize) {
936 return NULL;
937 }
938
939 if (kvm_enabled() && !kvm_has_sync_mmu()) {
940 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
941 return NULL;
942 }
943
8ca761f6
PF
944 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
945 sanitized_name = g_strdup(block->mr->name);
946 for (c = sanitized_name; *c != '\0'; c++) {
947 if (*c == '/')
948 *c = '_';
949 }
950
951 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
952 sanitized_name);
953 g_free(sanitized_name);
c902760f
MT
954
955 fd = mkstemp(filename);
956 if (fd < 0) {
9742bf26 957 perror("unable to create backing store for hugepages");
e4ada482 958 g_free(filename);
9742bf26 959 return NULL;
c902760f
MT
960 }
961 unlink(filename);
e4ada482 962 g_free(filename);
c902760f
MT
963
964 memory = (memory+hpagesize-1) & ~(hpagesize-1);
965
966 /*
967 * ftruncate is not supported by hugetlbfs in older
968 * hosts, so don't bother bailing out on errors.
969 * If anything goes wrong with it under other filesystems,
970 * mmap will fail.
971 */
972 if (ftruncate(fd, memory))
9742bf26 973 perror("ftruncate");
c902760f
MT
974
975#ifdef MAP_POPULATE
976 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
977 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
978 * to sidestep this quirk.
979 */
980 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
981 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
982#else
983 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
984#endif
985 if (area == MAP_FAILED) {
9742bf26
YT
986 perror("file_ram_alloc: can't mmap RAM pages");
987 close(fd);
988 return (NULL);
c902760f 989 }
04b16653 990 block->fd = fd;
c902760f
MT
991 return area;
992}
993#endif
994
d17b5288 995static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
996{
997 RAMBlock *block, *next_block;
3e837b2c 998 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 999
49cd9ac6
SH
1000 assert(size != 0); /* it would hand out same offset multiple times */
1001
a3161038 1002 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1003 return 0;
1004
a3161038 1005 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1006 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1007
1008 end = block->offset + block->length;
1009
a3161038 1010 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1011 if (next_block->offset >= end) {
1012 next = MIN(next, next_block->offset);
1013 }
1014 }
1015 if (next - end >= size && next - end < mingap) {
3e837b2c 1016 offset = end;
04b16653
AW
1017 mingap = next - end;
1018 }
1019 }
3e837b2c
AW
1020
1021 if (offset == RAM_ADDR_MAX) {
1022 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1023 (uint64_t)size);
1024 abort();
1025 }
1026
04b16653
AW
1027 return offset;
1028}
1029
652d7ec2 1030ram_addr_t last_ram_offset(void)
d17b5288
AW
1031{
1032 RAMBlock *block;
1033 ram_addr_t last = 0;
1034
a3161038 1035 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1036 last = MAX(last, block->offset + block->length);
1037
1038 return last;
1039}
1040
ddb97f1d
JB
1041static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1042{
1043 int ret;
1044 QemuOpts *machine_opts;
1045
1046 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1047 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1048 if (machine_opts &&
1049 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
1050 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1051 if (ret) {
1052 perror("qemu_madvise");
1053 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1054 "but dump_guest_core=off specified\n");
1055 }
1056 }
1057}
1058
c5705a77 1059void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
1060{
1061 RAMBlock *new_block, *block;
1062
c5705a77 1063 new_block = NULL;
a3161038 1064 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
1065 if (block->offset == addr) {
1066 new_block = block;
1067 break;
1068 }
1069 }
1070 assert(new_block);
1071 assert(!new_block->idstr[0]);
84b89d78 1072
09e5ab63
AL
1073 if (dev) {
1074 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1075 if (id) {
1076 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1077 g_free(id);
84b89d78
CM
1078 }
1079 }
1080 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1081
b2a8658e
UD
1082 /* This assumes the iothread lock is taken here too. */
1083 qemu_mutex_lock_ramlist();
a3161038 1084 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1085 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1086 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1087 new_block->idstr);
1088 abort();
1089 }
1090 }
b2a8658e 1091 qemu_mutex_unlock_ramlist();
c5705a77
AK
1092}
1093
8490fc78
LC
1094static int memory_try_enable_merging(void *addr, size_t len)
1095{
1096 QemuOpts *opts;
1097
1098 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1099 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1100 /* disabled by the user */
1101 return 0;
1102 }
1103
1104 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1105}
1106
c5705a77
AK
1107ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1108 MemoryRegion *mr)
1109{
abb26d63 1110 RAMBlock *block, *new_block;
c5705a77
AK
1111
1112 size = TARGET_PAGE_ALIGN(size);
1113 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1114
b2a8658e
UD
1115 /* This assumes the iothread lock is taken here too. */
1116 qemu_mutex_lock_ramlist();
7c637366 1117 new_block->mr = mr;
432d268c 1118 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1119 if (host) {
1120 new_block->host = host;
cd19cfa2 1121 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1122 } else {
1123 if (mem_path) {
c902760f 1124#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1125 new_block->host = file_ram_alloc(new_block, size, mem_path);
1126 if (!new_block->host) {
6eebf958 1127 new_block->host = qemu_anon_ram_alloc(size);
8490fc78 1128 memory_try_enable_merging(new_block->host, size);
6977dfe6 1129 }
c902760f 1130#else
6977dfe6
YT
1131 fprintf(stderr, "-mem-path option unsupported\n");
1132 exit(1);
c902760f 1133#endif
6977dfe6 1134 } else {
868bb33f 1135 if (xen_enabled()) {
fce537d4 1136 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1137 } else if (kvm_enabled()) {
1138 /* some s390/kvm configurations have special constraints */
6eebf958 1139 new_block->host = kvm_ram_alloc(size);
432d268c 1140 } else {
6eebf958 1141 new_block->host = qemu_anon_ram_alloc(size);
432d268c 1142 }
8490fc78 1143 memory_try_enable_merging(new_block->host, size);
6977dfe6 1144 }
c902760f 1145 }
94a6b54f
PB
1146 new_block->length = size;
1147
abb26d63
PB
1148 /* Keep the list sorted from biggest to smallest block. */
1149 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1150 if (block->length < new_block->length) {
1151 break;
1152 }
1153 }
1154 if (block) {
1155 QTAILQ_INSERT_BEFORE(block, new_block, next);
1156 } else {
1157 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1158 }
0d6d3c87 1159 ram_list.mru_block = NULL;
94a6b54f 1160
f798b07f 1161 ram_list.version++;
b2a8658e 1162 qemu_mutex_unlock_ramlist();
f798b07f 1163
7267c094 1164 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1165 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1166 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1167 0, size >> TARGET_PAGE_BITS);
1720aeee 1168 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1169
ddb97f1d 1170 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1171 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1172
6f0437e8
JK
1173 if (kvm_enabled())
1174 kvm_setup_guest_memory(new_block->host, size);
1175
94a6b54f
PB
1176 return new_block->offset;
1177}
e9a1ab19 1178
c5705a77 1179ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1180{
c5705a77 1181 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1182}
1183
1f2e98b6
AW
1184void qemu_ram_free_from_ptr(ram_addr_t addr)
1185{
1186 RAMBlock *block;
1187
b2a8658e
UD
1188 /* This assumes the iothread lock is taken here too. */
1189 qemu_mutex_lock_ramlist();
a3161038 1190 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1191 if (addr == block->offset) {
a3161038 1192 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1193 ram_list.mru_block = NULL;
f798b07f 1194 ram_list.version++;
7267c094 1195 g_free(block);
b2a8658e 1196 break;
1f2e98b6
AW
1197 }
1198 }
b2a8658e 1199 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1200}
1201
c227f099 1202void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1203{
04b16653
AW
1204 RAMBlock *block;
1205
b2a8658e
UD
1206 /* This assumes the iothread lock is taken here too. */
1207 qemu_mutex_lock_ramlist();
a3161038 1208 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1209 if (addr == block->offset) {
a3161038 1210 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1211 ram_list.mru_block = NULL;
f798b07f 1212 ram_list.version++;
cd19cfa2
HY
1213 if (block->flags & RAM_PREALLOC_MASK) {
1214 ;
1215 } else if (mem_path) {
04b16653
AW
1216#if defined (__linux__) && !defined(TARGET_S390X)
1217 if (block->fd) {
1218 munmap(block->host, block->length);
1219 close(block->fd);
1220 } else {
e7a09b92 1221 qemu_anon_ram_free(block->host, block->length);
04b16653 1222 }
fd28aa13
JK
1223#else
1224 abort();
04b16653
AW
1225#endif
1226 } else {
868bb33f 1227 if (xen_enabled()) {
e41d7c69 1228 xen_invalidate_map_cache_entry(block->host);
432d268c 1229 } else {
e7a09b92 1230 qemu_anon_ram_free(block->host, block->length);
432d268c 1231 }
04b16653 1232 }
7267c094 1233 g_free(block);
b2a8658e 1234 break;
04b16653
AW
1235 }
1236 }
b2a8658e 1237 qemu_mutex_unlock_ramlist();
04b16653 1238
e9a1ab19
FB
1239}
1240
cd19cfa2
HY
1241#ifndef _WIN32
1242void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1243{
1244 RAMBlock *block;
1245 ram_addr_t offset;
1246 int flags;
1247 void *area, *vaddr;
1248
a3161038 1249 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1250 offset = addr - block->offset;
1251 if (offset < block->length) {
1252 vaddr = block->host + offset;
1253 if (block->flags & RAM_PREALLOC_MASK) {
1254 ;
1255 } else {
1256 flags = MAP_FIXED;
1257 munmap(vaddr, length);
1258 if (mem_path) {
1259#if defined(__linux__) && !defined(TARGET_S390X)
1260 if (block->fd) {
1261#ifdef MAP_POPULATE
1262 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1263 MAP_PRIVATE;
1264#else
1265 flags |= MAP_PRIVATE;
1266#endif
1267 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1268 flags, block->fd, offset);
1269 } else {
1270 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1271 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1272 flags, -1, 0);
1273 }
fd28aa13
JK
1274#else
1275 abort();
cd19cfa2
HY
1276#endif
1277 } else {
1278#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1279 flags |= MAP_SHARED | MAP_ANONYMOUS;
1280 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1281 flags, -1, 0);
1282#else
1283 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1284 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1285 flags, -1, 0);
1286#endif
1287 }
1288 if (area != vaddr) {
f15fbc4b
AP
1289 fprintf(stderr, "Could not remap addr: "
1290 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1291 length, addr);
1292 exit(1);
1293 }
8490fc78 1294 memory_try_enable_merging(vaddr, length);
ddb97f1d 1295 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1296 }
1297 return;
1298 }
1299 }
1300}
1301#endif /* !_WIN32 */
1302
1b5ec234 1303static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
dc828ca1 1304{
94a6b54f
PB
1305 RAMBlock *block;
1306
b2a8658e 1307 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1308 block = ram_list.mru_block;
1309 if (block && addr - block->offset < block->length) {
1310 goto found;
1311 }
a3161038 1312 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1313 if (addr - block->offset < block->length) {
0d6d3c87 1314 goto found;
f471a17e 1315 }
94a6b54f 1316 }
f471a17e
AW
1317
1318 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1319 abort();
1320
0d6d3c87
PB
1321found:
1322 ram_list.mru_block = block;
1b5ec234
PB
1323 return block;
1324}
1325
1326/* Return a host pointer to ram allocated with qemu_ram_alloc.
1327 With the exception of the softmmu code in this file, this should
1328 only be used for local memory (e.g. video ram) that the device owns,
1329 and knows it isn't going to access beyond the end of the block.
1330
1331 It should not be used for general purpose DMA.
1332 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1333 */
1334void *qemu_get_ram_ptr(ram_addr_t addr)
1335{
1336 RAMBlock *block = qemu_get_ram_block(addr);
1337
0d6d3c87
PB
1338 if (xen_enabled()) {
1339 /* We need to check if the requested address is in the RAM
1340 * because we don't want to map the entire memory in QEMU.
1341 * In that case just map until the end of the page.
1342 */
1343 if (block->offset == 0) {
1344 return xen_map_cache(addr, 0, 0);
1345 } else if (block->host == NULL) {
1346 block->host =
1347 xen_map_cache(block->offset, block->length, 1);
1348 }
1349 }
1350 return block->host + (addr - block->offset);
dc828ca1
PB
1351}
1352
0d6d3c87
PB
1353/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1354 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1355 *
1356 * ??? Is this still necessary?
b2e0a138 1357 */
8b9c99d9 1358static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1359{
1360 RAMBlock *block;
1361
b2a8658e 1362 /* The list is protected by the iothread lock here. */
a3161038 1363 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1364 if (addr - block->offset < block->length) {
868bb33f 1365 if (xen_enabled()) {
432d268c
JN
1366 /* We need to check if the requested address is in the RAM
1367 * because we don't want to map the entire memory in QEMU.
712c2b41 1368 * In that case just map until the end of the page.
432d268c
JN
1369 */
1370 if (block->offset == 0) {
e41d7c69 1371 return xen_map_cache(addr, 0, 0);
432d268c 1372 } else if (block->host == NULL) {
e41d7c69
JK
1373 block->host =
1374 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1375 }
1376 }
b2e0a138
MT
1377 return block->host + (addr - block->offset);
1378 }
1379 }
1380
1381 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1382 abort();
1383
1384 return NULL;
1385}
1386
38bee5dc
SS
1387/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1388 * but takes a size argument */
8b9c99d9 1389static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1390{
8ab934f9
SS
1391 if (*size == 0) {
1392 return NULL;
1393 }
868bb33f 1394 if (xen_enabled()) {
e41d7c69 1395 return xen_map_cache(addr, *size, 1);
868bb33f 1396 } else {
38bee5dc
SS
1397 RAMBlock *block;
1398
a3161038 1399 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1400 if (addr - block->offset < block->length) {
1401 if (addr - block->offset + *size > block->length)
1402 *size = block->length - addr + block->offset;
1403 return block->host + (addr - block->offset);
1404 }
1405 }
1406
1407 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1408 abort();
38bee5dc
SS
1409 }
1410}
1411
7443b437
PB
1412/* Some of the softmmu routines need to translate from a host pointer
1413 (typically a TLB entry) back to a ram offset. */
1b5ec234 1414MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1415{
94a6b54f
PB
1416 RAMBlock *block;
1417 uint8_t *host = ptr;
1418
868bb33f 1419 if (xen_enabled()) {
e41d7c69 1420 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1421 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1422 }
1423
23887b79
PB
1424 block = ram_list.mru_block;
1425 if (block && block->host && host - block->host < block->length) {
1426 goto found;
1427 }
1428
a3161038 1429 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1430 /* This case append when the block is not mapped. */
1431 if (block->host == NULL) {
1432 continue;
1433 }
f471a17e 1434 if (host - block->host < block->length) {
23887b79 1435 goto found;
f471a17e 1436 }
94a6b54f 1437 }
432d268c 1438
1b5ec234 1439 return NULL;
23887b79
PB
1440
1441found:
1442 *ram_addr = block->offset + (host - block->host);
1b5ec234 1443 return block->mr;
e890261f 1444}
f471a17e 1445
a8170e5e 1446static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1447 uint64_t val, unsigned size)
9fa3e853 1448{
3a7d929e 1449 int dirty_flags;
f7c11b53 1450 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1451 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
0e0df1e2 1452 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1453 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1454 }
0e0df1e2
AK
1455 switch (size) {
1456 case 1:
1457 stb_p(qemu_get_ram_ptr(ram_addr), val);
1458 break;
1459 case 2:
1460 stw_p(qemu_get_ram_ptr(ram_addr), val);
1461 break;
1462 case 4:
1463 stl_p(qemu_get_ram_ptr(ram_addr), val);
1464 break;
1465 default:
1466 abort();
3a7d929e 1467 }
f23db169 1468 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1469 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1470 /* we remove the notdirty callback only if the code has been
1471 flushed */
1472 if (dirty_flags == 0xff)
2e70f6ef 1473 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1474}
1475
b018ddf6
PB
1476static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1477 unsigned size, bool is_write)
1478{
1479 return is_write;
1480}
1481
0e0df1e2 1482static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1483 .write = notdirty_mem_write,
b018ddf6 1484 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1485 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1486};
1487
0f459d16 1488/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1489static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1490{
9349b4f9 1491 CPUArchState *env = cpu_single_env;
06d55cc1 1492 target_ulong pc, cs_base;
0f459d16 1493 target_ulong vaddr;
a1d1bb31 1494 CPUWatchpoint *wp;
06d55cc1 1495 int cpu_flags;
0f459d16 1496
06d55cc1
AL
1497 if (env->watchpoint_hit) {
1498 /* We re-entered the check after replacing the TB. Now raise
1499 * the debug interrupt so that is will trigger after the
1500 * current instruction. */
c3affe56 1501 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1502 return;
1503 }
2e70f6ef 1504 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1505 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1506 if ((vaddr == (wp->vaddr & len_mask) ||
1507 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1508 wp->flags |= BP_WATCHPOINT_HIT;
1509 if (!env->watchpoint_hit) {
1510 env->watchpoint_hit = wp;
5a316526 1511 tb_check_watchpoint(env);
6e140f28
AL
1512 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1513 env->exception_index = EXCP_DEBUG;
488d6577 1514 cpu_loop_exit(env);
6e140f28
AL
1515 } else {
1516 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1517 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1518 cpu_resume_from_signal(env, NULL);
6e140f28 1519 }
06d55cc1 1520 }
6e140f28
AL
1521 } else {
1522 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1523 }
1524 }
1525}
1526
6658ffb8
PB
1527/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1528 so these check for a hit then pass through to the normal out-of-line
1529 phys routines. */
a8170e5e 1530static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1531 unsigned size)
6658ffb8 1532{
1ec9b909
AK
1533 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1534 switch (size) {
1535 case 1: return ldub_phys(addr);
1536 case 2: return lduw_phys(addr);
1537 case 4: return ldl_phys(addr);
1538 default: abort();
1539 }
6658ffb8
PB
1540}
1541
a8170e5e 1542static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1543 uint64_t val, unsigned size)
6658ffb8 1544{
1ec9b909
AK
1545 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1546 switch (size) {
67364150
MF
1547 case 1:
1548 stb_phys(addr, val);
1549 break;
1550 case 2:
1551 stw_phys(addr, val);
1552 break;
1553 case 4:
1554 stl_phys(addr, val);
1555 break;
1ec9b909
AK
1556 default: abort();
1557 }
6658ffb8
PB
1558}
1559
1ec9b909
AK
1560static const MemoryRegionOps watch_mem_ops = {
1561 .read = watch_mem_read,
1562 .write = watch_mem_write,
1563 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1564};
6658ffb8 1565
a8170e5e 1566static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1567 unsigned len)
db7b5426 1568{
acc9d80b
JK
1569 subpage_t *subpage = opaque;
1570 uint8_t buf[4];
791af8c8 1571
db7b5426 1572#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1573 printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__,
1574 subpage, len, addr);
db7b5426 1575#endif
acc9d80b
JK
1576 address_space_read(subpage->as, addr + subpage->base, buf, len);
1577 switch (len) {
1578 case 1:
1579 return ldub_p(buf);
1580 case 2:
1581 return lduw_p(buf);
1582 case 4:
1583 return ldl_p(buf);
1584 default:
1585 abort();
1586 }
db7b5426
BS
1587}
1588
a8170e5e 1589static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1590 uint64_t value, unsigned len)
db7b5426 1591{
acc9d80b
JK
1592 subpage_t *subpage = opaque;
1593 uint8_t buf[4];
1594
db7b5426 1595#if defined(DEBUG_SUBPAGE)
70c68e44 1596 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
acc9d80b
JK
1597 " value %"PRIx64"\n",
1598 __func__, subpage, len, addr, value);
db7b5426 1599#endif
acc9d80b
JK
1600 switch (len) {
1601 case 1:
1602 stb_p(buf, value);
1603 break;
1604 case 2:
1605 stw_p(buf, value);
1606 break;
1607 case 4:
1608 stl_p(buf, value);
1609 break;
1610 default:
1611 abort();
1612 }
1613 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1614}
1615
c353e4cc
PB
1616static bool subpage_accepts(void *opaque, hwaddr addr,
1617 unsigned size, bool is_write)
1618{
acc9d80b 1619 subpage_t *subpage = opaque;
c353e4cc 1620#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1621 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n",
1622 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1623#endif
1624
acc9d80b
JK
1625 return address_space_access_valid(subpage->as, addr + subpage->base,
1626 size, is_write);
c353e4cc
PB
1627}
1628
70c68e44
AK
1629static const MemoryRegionOps subpage_ops = {
1630 .read = subpage_read,
1631 .write = subpage_write,
c353e4cc 1632 .valid.accepts = subpage_accepts,
70c68e44 1633 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1634};
1635
c227f099 1636static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1637 uint16_t section)
db7b5426
BS
1638{
1639 int idx, eidx;
1640
1641 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1642 return -1;
1643 idx = SUBPAGE_IDX(start);
1644 eidx = SUBPAGE_IDX(end);
1645#if defined(DEBUG_SUBPAGE)
0bf9e31a 1646 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1647 mmio, start, end, idx, eidx, memory);
1648#endif
db7b5426 1649 for (; idx <= eidx; idx++) {
5312bd8b 1650 mmio->sub_section[idx] = section;
db7b5426
BS
1651 }
1652
1653 return 0;
1654}
1655
acc9d80b 1656static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1657{
c227f099 1658 subpage_t *mmio;
db7b5426 1659
7267c094 1660 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1661
acc9d80b 1662 mmio->as = as;
1eec614b 1663 mmio->base = base;
2c9b15ca 1664 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
70c68e44 1665 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1666 mmio->iomem.subpage = true;
db7b5426 1667#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1668 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1669 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1670#endif
b41aac4f 1671 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1672
1673 return mmio;
1674}
1675
5312bd8b
AK
1676static uint16_t dummy_section(MemoryRegion *mr)
1677{
1678 MemoryRegionSection section = {
1679 .mr = mr,
1680 .offset_within_address_space = 0,
1681 .offset_within_region = 0,
052e87b0 1682 .size = int128_2_64(),
5312bd8b
AK
1683 };
1684
1685 return phys_section_add(&section);
1686}
1687
a8170e5e 1688MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1689{
9affd6fc 1690 return cur_map.sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1691}
1692
e9179ce1
AK
1693static void io_mem_init(void)
1694{
2c9b15ca
PB
1695 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1696 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
0e0df1e2 1697 "unassigned", UINT64_MAX);
2c9b15ca 1698 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
0e0df1e2 1699 "notdirty", UINT64_MAX);
2c9b15ca 1700 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1ec9b909 1701 "watch", UINT64_MAX);
e9179ce1
AK
1702}
1703
ac1970fb
AK
1704static void mem_begin(MemoryListener *listener)
1705{
1706 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1707
ac1970fb
AK
1708 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1709}
1710
50c1e149
AK
1711static void core_begin(MemoryListener *listener)
1712{
b41aac4f
LPF
1713 uint16_t n;
1714
9affd6fc 1715 memset(&next_map, 0, sizeof(next_map));
b41aac4f
LPF
1716 n = dummy_section(&io_mem_unassigned);
1717 assert(n == PHYS_SECTION_UNASSIGNED);
1718 n = dummy_section(&io_mem_notdirty);
1719 assert(n == PHYS_SECTION_NOTDIRTY);
1720 n = dummy_section(&io_mem_rom);
1721 assert(n == PHYS_SECTION_ROM);
1722 n = dummy_section(&io_mem_watch);
1723 assert(n == PHYS_SECTION_WATCH);
50c1e149
AK
1724}
1725
9affd6fc
PB
1726/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1727 * All AddressSpaceDispatch instances have switched to the next map.
1728 */
1729static void core_commit(MemoryListener *listener)
1730{
1731 PhysPageMap info = cur_map;
1732 cur_map = next_map;
1733 phys_sections_clear(&info);
1734}
1735
1d71148e 1736static void tcg_commit(MemoryListener *listener)
50c1e149 1737{
9349b4f9 1738 CPUArchState *env;
117712c3
AK
1739
1740 /* since each CPU stores ram addresses in its TLB cache, we must
1741 reset the modified entries */
1742 /* XXX: slow ! */
1743 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1744 tlb_flush(env, 1);
1745 }
50c1e149
AK
1746}
1747
93632747
AK
1748static void core_log_global_start(MemoryListener *listener)
1749{
1750 cpu_physical_memory_set_dirty_tracking(1);
1751}
1752
1753static void core_log_global_stop(MemoryListener *listener)
1754{
1755 cpu_physical_memory_set_dirty_tracking(0);
1756}
1757
93632747 1758static MemoryListener core_memory_listener = {
50c1e149 1759 .begin = core_begin,
9affd6fc 1760 .commit = core_commit,
93632747
AK
1761 .log_global_start = core_log_global_start,
1762 .log_global_stop = core_log_global_stop,
ac1970fb 1763 .priority = 1,
93632747
AK
1764};
1765
1d71148e
AK
1766static MemoryListener tcg_memory_listener = {
1767 .commit = tcg_commit,
1768};
1769
ac1970fb
AK
1770void address_space_init_dispatch(AddressSpace *as)
1771{
1772 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1773
1774 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1775 d->listener = (MemoryListener) {
1776 .begin = mem_begin,
1777 .region_add = mem_add,
1778 .region_nop = mem_add,
1779 .priority = 0,
1780 };
acc9d80b 1781 d->as = as;
ac1970fb
AK
1782 as->dispatch = d;
1783 memory_listener_register(&d->listener, as);
1784}
1785
83f3c251
AK
1786void address_space_destroy_dispatch(AddressSpace *as)
1787{
1788 AddressSpaceDispatch *d = as->dispatch;
1789
1790 memory_listener_unregister(&d->listener);
83f3c251
AK
1791 g_free(d);
1792 as->dispatch = NULL;
1793}
1794
62152b8a
AK
1795static void memory_map_init(void)
1796{
7267c094 1797 system_memory = g_malloc(sizeof(*system_memory));
2c9b15ca 1798 memory_region_init(system_memory, NULL, "system", INT64_MAX);
7dca8043 1799 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1800
7267c094 1801 system_io = g_malloc(sizeof(*system_io));
2c9b15ca 1802 memory_region_init(system_io, NULL, "io", 65536);
7dca8043 1803 address_space_init(&address_space_io, system_io, "I/O");
93632747 1804
f6790af6 1805 memory_listener_register(&core_memory_listener, &address_space_memory);
f6790af6 1806 memory_listener_register(&tcg_memory_listener, &address_space_memory);
62152b8a
AK
1807}
1808
1809MemoryRegion *get_system_memory(void)
1810{
1811 return system_memory;
1812}
1813
309cb471
AK
1814MemoryRegion *get_system_io(void)
1815{
1816 return system_io;
1817}
1818
e2eef170
PB
1819#endif /* !defined(CONFIG_USER_ONLY) */
1820
13eb76e0
FB
1821/* physical memory access (slow version, mainly for debug) */
1822#if defined(CONFIG_USER_ONLY)
9349b4f9 1823int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1824 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1825{
1826 int l, flags;
1827 target_ulong page;
53a5960a 1828 void * p;
13eb76e0
FB
1829
1830 while (len > 0) {
1831 page = addr & TARGET_PAGE_MASK;
1832 l = (page + TARGET_PAGE_SIZE) - addr;
1833 if (l > len)
1834 l = len;
1835 flags = page_get_flags(page);
1836 if (!(flags & PAGE_VALID))
a68fe89c 1837 return -1;
13eb76e0
FB
1838 if (is_write) {
1839 if (!(flags & PAGE_WRITE))
a68fe89c 1840 return -1;
579a97f7 1841 /* XXX: this code should not depend on lock_user */
72fb7daa 1842 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1843 return -1;
72fb7daa
AJ
1844 memcpy(p, buf, l);
1845 unlock_user(p, addr, l);
13eb76e0
FB
1846 } else {
1847 if (!(flags & PAGE_READ))
a68fe89c 1848 return -1;
579a97f7 1849 /* XXX: this code should not depend on lock_user */
72fb7daa 1850 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1851 return -1;
72fb7daa 1852 memcpy(buf, p, l);
5b257578 1853 unlock_user(p, addr, 0);
13eb76e0
FB
1854 }
1855 len -= l;
1856 buf += l;
1857 addr += l;
1858 }
a68fe89c 1859 return 0;
13eb76e0 1860}
8df1cd07 1861
13eb76e0 1862#else
51d7a9eb 1863
a8170e5e
AK
1864static void invalidate_and_set_dirty(hwaddr addr,
1865 hwaddr length)
51d7a9eb
AP
1866{
1867 if (!cpu_physical_memory_is_dirty(addr)) {
1868 /* invalidate code */
1869 tb_invalidate_phys_page_range(addr, addr + length, 0);
1870 /* set dirty bit */
1871 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1872 }
e226939d 1873 xen_modified_memory(addr, length);
51d7a9eb
AP
1874}
1875
2bbfa05d
PB
1876static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1877{
1878 if (memory_region_is_ram(mr)) {
1879 return !(is_write && mr->readonly);
1880 }
1881 if (memory_region_is_romd(mr)) {
1882 return !is_write;
1883 }
1884
1885 return false;
1886}
1887
f52cc467 1888static inline int memory_access_size(MemoryRegion *mr, int l, hwaddr addr)
82f2563f 1889{
f52cc467 1890 if (l >= 4 && (((addr & 3) == 0 || mr->ops->impl.unaligned))) {
82f2563f
PB
1891 return 4;
1892 }
f52cc467 1893 if (l >= 2 && (((addr & 1) == 0) || mr->ops->impl.unaligned)) {
82f2563f
PB
1894 return 2;
1895 }
1896 return 1;
1897}
1898
fd8aaa76 1899bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1900 int len, bool is_write)
13eb76e0 1901{
149f54b5 1902 hwaddr l;
13eb76e0 1903 uint8_t *ptr;
791af8c8 1904 uint64_t val;
149f54b5 1905 hwaddr addr1;
5c8a00ce 1906 MemoryRegion *mr;
fd8aaa76 1907 bool error = false;
3b46e624 1908
13eb76e0 1909 while (len > 0) {
149f54b5 1910 l = len;
5c8a00ce 1911 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 1912
13eb76e0 1913 if (is_write) {
5c8a00ce
PB
1914 if (!memory_access_is_direct(mr, is_write)) {
1915 l = memory_access_size(mr, l, addr1);
6a00d601
FB
1916 /* XXX: could force cpu_single_env to NULL to avoid
1917 potential bugs */
82f2563f 1918 if (l == 4) {
1c213d19 1919 /* 32 bit write access */
c27004ec 1920 val = ldl_p(buf);
5c8a00ce 1921 error |= io_mem_write(mr, addr1, val, 4);
82f2563f 1922 } else if (l == 2) {
1c213d19 1923 /* 16 bit write access */
c27004ec 1924 val = lduw_p(buf);
5c8a00ce 1925 error |= io_mem_write(mr, addr1, val, 2);
13eb76e0 1926 } else {
1c213d19 1927 /* 8 bit write access */
c27004ec 1928 val = ldub_p(buf);
5c8a00ce 1929 error |= io_mem_write(mr, addr1, val, 1);
13eb76e0 1930 }
2bbfa05d 1931 } else {
5c8a00ce 1932 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 1933 /* RAM case */
5579c7f3 1934 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1935 memcpy(ptr, buf, l);
51d7a9eb 1936 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
1937 }
1938 } else {
5c8a00ce 1939 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 1940 /* I/O case */
5c8a00ce 1941 l = memory_access_size(mr, l, addr1);
82f2563f 1942 if (l == 4) {
13eb76e0 1943 /* 32 bit read access */
5c8a00ce 1944 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 1945 stl_p(buf, val);
82f2563f 1946 } else if (l == 2) {
13eb76e0 1947 /* 16 bit read access */
5c8a00ce 1948 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 1949 stw_p(buf, val);
13eb76e0 1950 } else {
1c213d19 1951 /* 8 bit read access */
5c8a00ce 1952 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 1953 stb_p(buf, val);
13eb76e0
FB
1954 }
1955 } else {
1956 /* RAM case */
5c8a00ce 1957 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 1958 memcpy(buf, ptr, l);
13eb76e0
FB
1959 }
1960 }
1961 len -= l;
1962 buf += l;
1963 addr += l;
1964 }
fd8aaa76
PB
1965
1966 return error;
13eb76e0 1967}
8df1cd07 1968
fd8aaa76 1969bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1970 const uint8_t *buf, int len)
1971{
fd8aaa76 1972 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
1973}
1974
fd8aaa76 1975bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 1976{
fd8aaa76 1977 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
1978}
1979
1980
a8170e5e 1981void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1982 int len, int is_write)
1983{
fd8aaa76 1984 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
1985}
1986
d0ecd2aa 1987/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1988void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
1989 const uint8_t *buf, int len)
1990{
149f54b5 1991 hwaddr l;
d0ecd2aa 1992 uint8_t *ptr;
149f54b5 1993 hwaddr addr1;
5c8a00ce 1994 MemoryRegion *mr;
3b46e624 1995
d0ecd2aa 1996 while (len > 0) {
149f54b5 1997 l = len;
5c8a00ce
PB
1998 mr = address_space_translate(&address_space_memory,
1999 addr, &addr1, &l, true);
3b46e624 2000
5c8a00ce
PB
2001 if (!(memory_region_is_ram(mr) ||
2002 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2003 /* do nothing */
2004 } else {
5c8a00ce 2005 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2006 /* ROM/RAM case */
5579c7f3 2007 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2008 memcpy(ptr, buf, l);
51d7a9eb 2009 invalidate_and_set_dirty(addr1, l);
d0ecd2aa
FB
2010 }
2011 len -= l;
2012 buf += l;
2013 addr += l;
2014 }
2015}
2016
6d16c2f8 2017typedef struct {
d3e71559 2018 MemoryRegion *mr;
6d16c2f8 2019 void *buffer;
a8170e5e
AK
2020 hwaddr addr;
2021 hwaddr len;
6d16c2f8
AL
2022} BounceBuffer;
2023
2024static BounceBuffer bounce;
2025
ba223c29
AL
2026typedef struct MapClient {
2027 void *opaque;
2028 void (*callback)(void *opaque);
72cf2d4f 2029 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2030} MapClient;
2031
72cf2d4f
BS
2032static QLIST_HEAD(map_client_list, MapClient) map_client_list
2033 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2034
2035void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2036{
7267c094 2037 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2038
2039 client->opaque = opaque;
2040 client->callback = callback;
72cf2d4f 2041 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2042 return client;
2043}
2044
8b9c99d9 2045static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2046{
2047 MapClient *client = (MapClient *)_client;
2048
72cf2d4f 2049 QLIST_REMOVE(client, link);
7267c094 2050 g_free(client);
ba223c29
AL
2051}
2052
2053static void cpu_notify_map_clients(void)
2054{
2055 MapClient *client;
2056
72cf2d4f
BS
2057 while (!QLIST_EMPTY(&map_client_list)) {
2058 client = QLIST_FIRST(&map_client_list);
ba223c29 2059 client->callback(client->opaque);
34d5e948 2060 cpu_unregister_map_client(client);
ba223c29
AL
2061 }
2062}
2063
51644ab7
PB
2064bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2065{
5c8a00ce 2066 MemoryRegion *mr;
51644ab7
PB
2067 hwaddr l, xlat;
2068
2069 while (len > 0) {
2070 l = len;
5c8a00ce
PB
2071 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2072 if (!memory_access_is_direct(mr, is_write)) {
2073 l = memory_access_size(mr, l, addr);
2074 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2075 return false;
2076 }
2077 }
2078
2079 len -= l;
2080 addr += l;
2081 }
2082 return true;
2083}
2084
6d16c2f8
AL
2085/* Map a physical memory region into a host virtual address.
2086 * May map a subset of the requested range, given by and returned in *plen.
2087 * May return NULL if resources needed to perform the mapping are exhausted.
2088 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2089 * Use cpu_register_map_client() to know when retrying the map operation is
2090 * likely to succeed.
6d16c2f8 2091 */
ac1970fb 2092void *address_space_map(AddressSpace *as,
a8170e5e
AK
2093 hwaddr addr,
2094 hwaddr *plen,
ac1970fb 2095 bool is_write)
6d16c2f8 2096{
a8170e5e 2097 hwaddr len = *plen;
e3127ae0
PB
2098 hwaddr done = 0;
2099 hwaddr l, xlat, base;
2100 MemoryRegion *mr, *this_mr;
2101 ram_addr_t raddr;
6d16c2f8 2102
e3127ae0
PB
2103 if (len == 0) {
2104 return NULL;
2105 }
38bee5dc 2106
e3127ae0
PB
2107 l = len;
2108 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2109 if (!memory_access_is_direct(mr, is_write)) {
2110 if (bounce.buffer) {
2111 return NULL;
6d16c2f8 2112 }
e3127ae0
PB
2113 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2114 bounce.addr = addr;
2115 bounce.len = l;
d3e71559
PB
2116
2117 memory_region_ref(mr);
2118 bounce.mr = mr;
e3127ae0
PB
2119 if (!is_write) {
2120 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2121 }
6d16c2f8 2122
e3127ae0
PB
2123 *plen = l;
2124 return bounce.buffer;
2125 }
2126
2127 base = xlat;
2128 raddr = memory_region_get_ram_addr(mr);
2129
2130 for (;;) {
6d16c2f8
AL
2131 len -= l;
2132 addr += l;
e3127ae0
PB
2133 done += l;
2134 if (len == 0) {
2135 break;
2136 }
2137
2138 l = len;
2139 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2140 if (this_mr != mr || xlat != base + done) {
2141 break;
2142 }
6d16c2f8 2143 }
e3127ae0 2144
d3e71559 2145 memory_region_ref(mr);
e3127ae0
PB
2146 *plen = done;
2147 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2148}
2149
ac1970fb 2150/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2151 * Will also mark the memory as dirty if is_write == 1. access_len gives
2152 * the amount of memory that was actually read or written by the caller.
2153 */
a8170e5e
AK
2154void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2155 int is_write, hwaddr access_len)
6d16c2f8
AL
2156{
2157 if (buffer != bounce.buffer) {
d3e71559
PB
2158 MemoryRegion *mr;
2159 ram_addr_t addr1;
2160
2161 mr = qemu_ram_addr_from_host(buffer, &addr1);
2162 assert(mr != NULL);
6d16c2f8 2163 if (is_write) {
6d16c2f8
AL
2164 while (access_len) {
2165 unsigned l;
2166 l = TARGET_PAGE_SIZE;
2167 if (l > access_len)
2168 l = access_len;
51d7a9eb 2169 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2170 addr1 += l;
2171 access_len -= l;
2172 }
2173 }
868bb33f 2174 if (xen_enabled()) {
e41d7c69 2175 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2176 }
d3e71559 2177 memory_region_unref(mr);
6d16c2f8
AL
2178 return;
2179 }
2180 if (is_write) {
ac1970fb 2181 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2182 }
f8a83245 2183 qemu_vfree(bounce.buffer);
6d16c2f8 2184 bounce.buffer = NULL;
d3e71559 2185 memory_region_unref(bounce.mr);
ba223c29 2186 cpu_notify_map_clients();
6d16c2f8 2187}
d0ecd2aa 2188
a8170e5e
AK
2189void *cpu_physical_memory_map(hwaddr addr,
2190 hwaddr *plen,
ac1970fb
AK
2191 int is_write)
2192{
2193 return address_space_map(&address_space_memory, addr, plen, is_write);
2194}
2195
a8170e5e
AK
2196void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2197 int is_write, hwaddr access_len)
ac1970fb
AK
2198{
2199 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2200}
2201
8df1cd07 2202/* warning: addr must be aligned */
a8170e5e 2203static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2204 enum device_endian endian)
8df1cd07 2205{
8df1cd07 2206 uint8_t *ptr;
791af8c8 2207 uint64_t val;
5c8a00ce 2208 MemoryRegion *mr;
149f54b5
PB
2209 hwaddr l = 4;
2210 hwaddr addr1;
8df1cd07 2211
5c8a00ce
PB
2212 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2213 false);
2214 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2215 /* I/O case */
5c8a00ce 2216 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2217#if defined(TARGET_WORDS_BIGENDIAN)
2218 if (endian == DEVICE_LITTLE_ENDIAN) {
2219 val = bswap32(val);
2220 }
2221#else
2222 if (endian == DEVICE_BIG_ENDIAN) {
2223 val = bswap32(val);
2224 }
2225#endif
8df1cd07
FB
2226 } else {
2227 /* RAM case */
5c8a00ce 2228 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2229 & TARGET_PAGE_MASK)
149f54b5 2230 + addr1);
1e78bcc1
AG
2231 switch (endian) {
2232 case DEVICE_LITTLE_ENDIAN:
2233 val = ldl_le_p(ptr);
2234 break;
2235 case DEVICE_BIG_ENDIAN:
2236 val = ldl_be_p(ptr);
2237 break;
2238 default:
2239 val = ldl_p(ptr);
2240 break;
2241 }
8df1cd07
FB
2242 }
2243 return val;
2244}
2245
a8170e5e 2246uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2247{
2248 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2249}
2250
a8170e5e 2251uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2252{
2253 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2254}
2255
a8170e5e 2256uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2257{
2258 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2259}
2260
84b7b8e7 2261/* warning: addr must be aligned */
a8170e5e 2262static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2263 enum device_endian endian)
84b7b8e7 2264{
84b7b8e7
FB
2265 uint8_t *ptr;
2266 uint64_t val;
5c8a00ce 2267 MemoryRegion *mr;
149f54b5
PB
2268 hwaddr l = 8;
2269 hwaddr addr1;
84b7b8e7 2270
5c8a00ce
PB
2271 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2272 false);
2273 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2274 /* I/O case */
5c8a00ce 2275 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2276#if defined(TARGET_WORDS_BIGENDIAN)
2277 if (endian == DEVICE_LITTLE_ENDIAN) {
2278 val = bswap64(val);
2279 }
2280#else
2281 if (endian == DEVICE_BIG_ENDIAN) {
2282 val = bswap64(val);
2283 }
84b7b8e7
FB
2284#endif
2285 } else {
2286 /* RAM case */
5c8a00ce 2287 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2288 & TARGET_PAGE_MASK)
149f54b5 2289 + addr1);
1e78bcc1
AG
2290 switch (endian) {
2291 case DEVICE_LITTLE_ENDIAN:
2292 val = ldq_le_p(ptr);
2293 break;
2294 case DEVICE_BIG_ENDIAN:
2295 val = ldq_be_p(ptr);
2296 break;
2297 default:
2298 val = ldq_p(ptr);
2299 break;
2300 }
84b7b8e7
FB
2301 }
2302 return val;
2303}
2304
a8170e5e 2305uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2306{
2307 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2308}
2309
a8170e5e 2310uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2311{
2312 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2313}
2314
a8170e5e 2315uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2316{
2317 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2318}
2319
aab33094 2320/* XXX: optimize */
a8170e5e 2321uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2322{
2323 uint8_t val;
2324 cpu_physical_memory_read(addr, &val, 1);
2325 return val;
2326}
2327
733f0b02 2328/* warning: addr must be aligned */
a8170e5e 2329static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2330 enum device_endian endian)
aab33094 2331{
733f0b02
MT
2332 uint8_t *ptr;
2333 uint64_t val;
5c8a00ce 2334 MemoryRegion *mr;
149f54b5
PB
2335 hwaddr l = 2;
2336 hwaddr addr1;
733f0b02 2337
5c8a00ce
PB
2338 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2339 false);
2340 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2341 /* I/O case */
5c8a00ce 2342 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2343#if defined(TARGET_WORDS_BIGENDIAN)
2344 if (endian == DEVICE_LITTLE_ENDIAN) {
2345 val = bswap16(val);
2346 }
2347#else
2348 if (endian == DEVICE_BIG_ENDIAN) {
2349 val = bswap16(val);
2350 }
2351#endif
733f0b02
MT
2352 } else {
2353 /* RAM case */
5c8a00ce 2354 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2355 & TARGET_PAGE_MASK)
149f54b5 2356 + addr1);
1e78bcc1
AG
2357 switch (endian) {
2358 case DEVICE_LITTLE_ENDIAN:
2359 val = lduw_le_p(ptr);
2360 break;
2361 case DEVICE_BIG_ENDIAN:
2362 val = lduw_be_p(ptr);
2363 break;
2364 default:
2365 val = lduw_p(ptr);
2366 break;
2367 }
733f0b02
MT
2368 }
2369 return val;
aab33094
FB
2370}
2371
a8170e5e 2372uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2373{
2374 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2375}
2376
a8170e5e 2377uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2378{
2379 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2380}
2381
a8170e5e 2382uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2383{
2384 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2385}
2386
8df1cd07
FB
2387/* warning: addr must be aligned. The ram page is not masked as dirty
2388 and the code inside is not invalidated. It is useful if the dirty
2389 bits are used to track modified PTEs */
a8170e5e 2390void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2391{
8df1cd07 2392 uint8_t *ptr;
5c8a00ce 2393 MemoryRegion *mr;
149f54b5
PB
2394 hwaddr l = 4;
2395 hwaddr addr1;
8df1cd07 2396
5c8a00ce
PB
2397 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2398 true);
2399 if (l < 4 || !memory_access_is_direct(mr, true)) {
2400 io_mem_write(mr, addr1, val, 4);
8df1cd07 2401 } else {
5c8a00ce 2402 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2403 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2404 stl_p(ptr, val);
74576198
AL
2405
2406 if (unlikely(in_migration)) {
2407 if (!cpu_physical_memory_is_dirty(addr1)) {
2408 /* invalidate code */
2409 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2410 /* set dirty bit */
f7c11b53
YT
2411 cpu_physical_memory_set_dirty_flags(
2412 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2413 }
2414 }
8df1cd07
FB
2415 }
2416}
2417
2418/* warning: addr must be aligned */
a8170e5e 2419static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2420 enum device_endian endian)
8df1cd07 2421{
8df1cd07 2422 uint8_t *ptr;
5c8a00ce 2423 MemoryRegion *mr;
149f54b5
PB
2424 hwaddr l = 4;
2425 hwaddr addr1;
8df1cd07 2426
5c8a00ce
PB
2427 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2428 true);
2429 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2430#if defined(TARGET_WORDS_BIGENDIAN)
2431 if (endian == DEVICE_LITTLE_ENDIAN) {
2432 val = bswap32(val);
2433 }
2434#else
2435 if (endian == DEVICE_BIG_ENDIAN) {
2436 val = bswap32(val);
2437 }
2438#endif
5c8a00ce 2439 io_mem_write(mr, addr1, val, 4);
8df1cd07 2440 } else {
8df1cd07 2441 /* RAM case */
5c8a00ce 2442 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2443 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2444 switch (endian) {
2445 case DEVICE_LITTLE_ENDIAN:
2446 stl_le_p(ptr, val);
2447 break;
2448 case DEVICE_BIG_ENDIAN:
2449 stl_be_p(ptr, val);
2450 break;
2451 default:
2452 stl_p(ptr, val);
2453 break;
2454 }
51d7a9eb 2455 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2456 }
2457}
2458
a8170e5e 2459void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2460{
2461 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2462}
2463
a8170e5e 2464void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2465{
2466 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2467}
2468
a8170e5e 2469void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2470{
2471 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2472}
2473
aab33094 2474/* XXX: optimize */
a8170e5e 2475void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2476{
2477 uint8_t v = val;
2478 cpu_physical_memory_write(addr, &v, 1);
2479}
2480
733f0b02 2481/* warning: addr must be aligned */
a8170e5e 2482static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2483 enum device_endian endian)
aab33094 2484{
733f0b02 2485 uint8_t *ptr;
5c8a00ce 2486 MemoryRegion *mr;
149f54b5
PB
2487 hwaddr l = 2;
2488 hwaddr addr1;
733f0b02 2489
5c8a00ce
PB
2490 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2491 true);
2492 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2493#if defined(TARGET_WORDS_BIGENDIAN)
2494 if (endian == DEVICE_LITTLE_ENDIAN) {
2495 val = bswap16(val);
2496 }
2497#else
2498 if (endian == DEVICE_BIG_ENDIAN) {
2499 val = bswap16(val);
2500 }
2501#endif
5c8a00ce 2502 io_mem_write(mr, addr1, val, 2);
733f0b02 2503 } else {
733f0b02 2504 /* RAM case */
5c8a00ce 2505 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2506 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2507 switch (endian) {
2508 case DEVICE_LITTLE_ENDIAN:
2509 stw_le_p(ptr, val);
2510 break;
2511 case DEVICE_BIG_ENDIAN:
2512 stw_be_p(ptr, val);
2513 break;
2514 default:
2515 stw_p(ptr, val);
2516 break;
2517 }
51d7a9eb 2518 invalidate_and_set_dirty(addr1, 2);
733f0b02 2519 }
aab33094
FB
2520}
2521
a8170e5e 2522void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2523{
2524 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2525}
2526
a8170e5e 2527void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2528{
2529 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2530}
2531
a8170e5e 2532void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2533{
2534 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2535}
2536
aab33094 2537/* XXX: optimize */
a8170e5e 2538void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2539{
2540 val = tswap64(val);
71d2b725 2541 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2542}
2543
a8170e5e 2544void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2545{
2546 val = cpu_to_le64(val);
2547 cpu_physical_memory_write(addr, &val, 8);
2548}
2549
a8170e5e 2550void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2551{
2552 val = cpu_to_be64(val);
2553 cpu_physical_memory_write(addr, &val, 8);
2554}
2555
5e2972fd 2556/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2557int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2558 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2559{
2560 int l;
a8170e5e 2561 hwaddr phys_addr;
9b3c35e0 2562 target_ulong page;
13eb76e0
FB
2563
2564 while (len > 0) {
2565 page = addr & TARGET_PAGE_MASK;
2566 phys_addr = cpu_get_phys_page_debug(env, page);
2567 /* if no physical page mapped, return an error */
2568 if (phys_addr == -1)
2569 return -1;
2570 l = (page + TARGET_PAGE_SIZE) - addr;
2571 if (l > len)
2572 l = len;
5e2972fd 2573 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2574 if (is_write)
2575 cpu_physical_memory_write_rom(phys_addr, buf, l);
2576 else
5e2972fd 2577 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2578 len -= l;
2579 buf += l;
2580 addr += l;
2581 }
2582 return 0;
2583}
a68fe89c 2584#endif
13eb76e0 2585
8e4a424b
BS
2586#if !defined(CONFIG_USER_ONLY)
2587
2588/*
2589 * A helper function for the _utterly broken_ virtio device model to find out if
2590 * it's running on a big endian machine. Don't do this at home kids!
2591 */
2592bool virtio_is_big_endian(void);
2593bool virtio_is_big_endian(void)
2594{
2595#if defined(TARGET_WORDS_BIGENDIAN)
2596 return true;
2597#else
2598 return false;
2599#endif
2600}
2601
2602#endif
2603
76f35538 2604#ifndef CONFIG_USER_ONLY
a8170e5e 2605bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2606{
5c8a00ce 2607 MemoryRegion*mr;
149f54b5 2608 hwaddr l = 1;
76f35538 2609
5c8a00ce
PB
2610 mr = address_space_translate(&address_space_memory,
2611 phys_addr, &phys_addr, &l, false);
76f35538 2612
5c8a00ce
PB
2613 return !(memory_region_is_ram(mr) ||
2614 memory_region_is_romd(mr));
76f35538 2615}
bd2fa51f
MH
2616
2617void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2618{
2619 RAMBlock *block;
2620
2621 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2622 func(block->host, block->offset, block->length, opaque);
2623 }
2624}
ec3f8c99 2625#endif
This page took 1.210155 seconds and 4 git commands to generate.