]> Git Repo - qemu.git/blame - exec.c
exec: pass client mask to cpu_physical_memory_set_dirty_range
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
4485bd26 29#if !defined(CONFIG_USER_ONLY)
47c8ca53 30#include "hw/boards.h"
4485bd26 31#endif
cc9e98cb 32#include "hw/qdev.h"
1de7afc9 33#include "qemu/osdep.h"
9c17d615 34#include "sysemu/kvm.h"
2ff3de68 35#include "sysemu/sysemu.h"
0d09e41a 36#include "hw/xen/xen.h"
1de7afc9
PB
37#include "qemu/timer.h"
38#include "qemu/config-file.h"
75a34036 39#include "qemu/error-report.h"
022c62cb 40#include "exec/memory.h"
9c17d615 41#include "sysemu/dma.h"
022c62cb 42#include "exec/address-spaces.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
432d268c 45#else /* !CONFIG_USER_ONLY */
9c17d615 46#include "sysemu/xen-mapcache.h"
6506e4f9 47#include "trace.h"
53a5960a 48#endif
0d6d3c87 49#include "exec/cpu-all.h"
0dc3f44a 50#include "qemu/rcu_queue.h"
022c62cb 51#include "exec/cputlb.h"
5b6dd868 52#include "translate-all.h"
0cac1b66 53
022c62cb 54#include "exec/memory-internal.h"
220c3ebd 55#include "exec/ram_addr.h"
67d95c15 56
b35ba30f
MT
57#include "qemu/range.h"
58
db7b5426 59//#define DEBUG_SUBPAGE
1196be37 60
e2eef170 61#if !defined(CONFIG_USER_ONLY)
0dc3f44a
MD
62/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
63 * are protected by the ramlist lock.
64 */
0d53d9fe 65RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
66
67static MemoryRegion *system_memory;
309cb471 68static MemoryRegion *system_io;
62152b8a 69
f6790af6
AK
70AddressSpace address_space_io;
71AddressSpace address_space_memory;
2673a5da 72
0844e007 73MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 74static MemoryRegion io_mem_unassigned;
0e0df1e2 75
7bd4f430
PB
76/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
77#define RAM_PREALLOC (1 << 0)
78
dbcb8981
PB
79/* RAM is mmap-ed with MAP_SHARED */
80#define RAM_SHARED (1 << 1)
81
62be4e3a
MT
82/* Only a portion of RAM (used_length) is actually used, and migrated.
83 * This used_length size can change across reboots.
84 */
85#define RAM_RESIZEABLE (1 << 2)
86
e2eef170 87#endif
9fa3e853 88
bdc44640 89struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
90/* current CPU in the current thread. It is only valid inside
91 cpu_exec() */
4917cf44 92DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 93/* 0 = Do not count executed instructions.
bf20dc07 94 1 = Precise instruction counting.
2e70f6ef 95 2 = Adaptive rate instruction counting. */
5708fc66 96int use_icount;
6a00d601 97
e2eef170 98#if !defined(CONFIG_USER_ONLY)
4346ae3e 99
1db8abb1
PB
100typedef struct PhysPageEntry PhysPageEntry;
101
102struct PhysPageEntry {
9736e55b 103 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 104 uint32_t skip : 6;
9736e55b 105 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 106 uint32_t ptr : 26;
1db8abb1
PB
107};
108
8b795765
MT
109#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
110
03f49957 111/* Size of the L2 (and L3, etc) page tables. */
57271d63 112#define ADDR_SPACE_BITS 64
03f49957 113
026736ce 114#define P_L2_BITS 9
03f49957
PB
115#define P_L2_SIZE (1 << P_L2_BITS)
116
117#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
118
119typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 120
53cb28cb 121typedef struct PhysPageMap {
79e2b9ae
PB
122 struct rcu_head rcu;
123
53cb28cb
MA
124 unsigned sections_nb;
125 unsigned sections_nb_alloc;
126 unsigned nodes_nb;
127 unsigned nodes_nb_alloc;
128 Node *nodes;
129 MemoryRegionSection *sections;
130} PhysPageMap;
131
1db8abb1 132struct AddressSpaceDispatch {
79e2b9ae
PB
133 struct rcu_head rcu;
134
1db8abb1
PB
135 /* This is a multi-level map on the physical address space.
136 * The bottom level has pointers to MemoryRegionSections.
137 */
138 PhysPageEntry phys_map;
53cb28cb 139 PhysPageMap map;
acc9d80b 140 AddressSpace *as;
1db8abb1
PB
141};
142
90260c6c
JK
143#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
144typedef struct subpage_t {
145 MemoryRegion iomem;
acc9d80b 146 AddressSpace *as;
90260c6c
JK
147 hwaddr base;
148 uint16_t sub_section[TARGET_PAGE_SIZE];
149} subpage_t;
150
b41aac4f
LPF
151#define PHYS_SECTION_UNASSIGNED 0
152#define PHYS_SECTION_NOTDIRTY 1
153#define PHYS_SECTION_ROM 2
154#define PHYS_SECTION_WATCH 3
5312bd8b 155
e2eef170 156static void io_mem_init(void);
62152b8a 157static void memory_map_init(void);
09daed84 158static void tcg_commit(MemoryListener *listener);
e2eef170 159
1ec9b909 160static MemoryRegion io_mem_watch;
6658ffb8 161#endif
fd6ce8f6 162
6d9a1304 163#if !defined(CONFIG_USER_ONLY)
d6f2ea22 164
53cb28cb 165static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 166{
53cb28cb
MA
167 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
168 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
169 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
170 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 171 }
f7bf5461
AK
172}
173
db94604b 174static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
f7bf5461
AK
175{
176 unsigned i;
8b795765 177 uint32_t ret;
db94604b
PB
178 PhysPageEntry e;
179 PhysPageEntry *p;
f7bf5461 180
53cb28cb 181 ret = map->nodes_nb++;
db94604b 182 p = map->nodes[ret];
f7bf5461 183 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 184 assert(ret != map->nodes_nb_alloc);
db94604b
PB
185
186 e.skip = leaf ? 0 : 1;
187 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
03f49957 188 for (i = 0; i < P_L2_SIZE; ++i) {
db94604b 189 memcpy(&p[i], &e, sizeof(e));
d6f2ea22 190 }
f7bf5461 191 return ret;
d6f2ea22
AK
192}
193
53cb28cb
MA
194static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
195 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 196 int level)
f7bf5461
AK
197{
198 PhysPageEntry *p;
03f49957 199 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 200
9736e55b 201 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
db94604b 202 lp->ptr = phys_map_node_alloc(map, level == 0);
92e873b9 203 }
db94604b 204 p = map->nodes[lp->ptr];
03f49957 205 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 206
03f49957 207 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 208 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 209 lp->skip = 0;
c19e8800 210 lp->ptr = leaf;
07f07b31
AK
211 *index += step;
212 *nb -= step;
2999097b 213 } else {
53cb28cb 214 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
215 }
216 ++lp;
f7bf5461
AK
217 }
218}
219
ac1970fb 220static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 221 hwaddr index, hwaddr nb,
2999097b 222 uint16_t leaf)
f7bf5461 223{
2999097b 224 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 225 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 226
53cb28cb 227 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
228}
229
b35ba30f
MT
230/* Compact a non leaf page entry. Simply detect that the entry has a single child,
231 * and update our entry so we can skip it and go directly to the destination.
232 */
233static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
234{
235 unsigned valid_ptr = P_L2_SIZE;
236 int valid = 0;
237 PhysPageEntry *p;
238 int i;
239
240 if (lp->ptr == PHYS_MAP_NODE_NIL) {
241 return;
242 }
243
244 p = nodes[lp->ptr];
245 for (i = 0; i < P_L2_SIZE; i++) {
246 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
247 continue;
248 }
249
250 valid_ptr = i;
251 valid++;
252 if (p[i].skip) {
253 phys_page_compact(&p[i], nodes, compacted);
254 }
255 }
256
257 /* We can only compress if there's only one child. */
258 if (valid != 1) {
259 return;
260 }
261
262 assert(valid_ptr < P_L2_SIZE);
263
264 /* Don't compress if it won't fit in the # of bits we have. */
265 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
266 return;
267 }
268
269 lp->ptr = p[valid_ptr].ptr;
270 if (!p[valid_ptr].skip) {
271 /* If our only child is a leaf, make this a leaf. */
272 /* By design, we should have made this node a leaf to begin with so we
273 * should never reach here.
274 * But since it's so simple to handle this, let's do it just in case we
275 * change this rule.
276 */
277 lp->skip = 0;
278 } else {
279 lp->skip += p[valid_ptr].skip;
280 }
281}
282
283static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
284{
285 DECLARE_BITMAP(compacted, nodes_nb);
286
287 if (d->phys_map.skip) {
53cb28cb 288 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
289 }
290}
291
97115a8d 292static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 293 Node *nodes, MemoryRegionSection *sections)
92e873b9 294{
31ab2b4a 295 PhysPageEntry *p;
97115a8d 296 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 297 int i;
f1f6e3b8 298
9736e55b 299 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 300 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 301 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 302 }
9affd6fc 303 p = nodes[lp.ptr];
03f49957 304 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 305 }
b35ba30f
MT
306
307 if (sections[lp.ptr].size.hi ||
308 range_covers_byte(sections[lp.ptr].offset_within_address_space,
309 sections[lp.ptr].size.lo, addr)) {
310 return &sections[lp.ptr];
311 } else {
312 return &sections[PHYS_SECTION_UNASSIGNED];
313 }
f3705d53
AK
314}
315
e5548617
BS
316bool memory_region_is_unassigned(MemoryRegion *mr)
317{
2a8e7499 318 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 319 && mr != &io_mem_watch;
fd6ce8f6 320}
149f54b5 321
79e2b9ae 322/* Called from RCU critical section */
c7086b4a 323static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
324 hwaddr addr,
325 bool resolve_subpage)
9f029603 326{
90260c6c
JK
327 MemoryRegionSection *section;
328 subpage_t *subpage;
329
53cb28cb 330 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
331 if (resolve_subpage && section->mr->subpage) {
332 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 333 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
334 }
335 return section;
9f029603
JK
336}
337
79e2b9ae 338/* Called from RCU critical section */
90260c6c 339static MemoryRegionSection *
c7086b4a 340address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 341 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
342{
343 MemoryRegionSection *section;
a87f3954 344 Int128 diff;
149f54b5 345
c7086b4a 346 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
347 /* Compute offset within MemoryRegionSection */
348 addr -= section->offset_within_address_space;
349
350 /* Compute offset within MemoryRegion */
351 *xlat = addr + section->offset_within_region;
352
353 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 354 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
355 return section;
356}
90260c6c 357
a87f3954
PB
358static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
359{
360 if (memory_region_is_ram(mr)) {
361 return !(is_write && mr->readonly);
362 }
363 if (memory_region_is_romd(mr)) {
364 return !is_write;
365 }
366
367 return false;
368}
369
41063e1e 370/* Called from RCU critical section */
5c8a00ce
PB
371MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
372 hwaddr *xlat, hwaddr *plen,
373 bool is_write)
90260c6c 374{
30951157
AK
375 IOMMUTLBEntry iotlb;
376 MemoryRegionSection *section;
377 MemoryRegion *mr;
30951157
AK
378
379 for (;;) {
79e2b9ae
PB
380 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
381 section = address_space_translate_internal(d, addr, &addr, plen, true);
30951157
AK
382 mr = section->mr;
383
384 if (!mr->iommu_ops) {
385 break;
386 }
387
8d7b8cb9 388 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
389 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
390 | (addr & iotlb.addr_mask));
23820dbf 391 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
30951157
AK
392 if (!(iotlb.perm & (1 << is_write))) {
393 mr = &io_mem_unassigned;
394 break;
395 }
396
397 as = iotlb.target_as;
398 }
399
fe680d0d 400 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954 401 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
23820dbf 402 *plen = MIN(page, *plen);
a87f3954
PB
403 }
404
30951157
AK
405 *xlat = addr;
406 return mr;
90260c6c
JK
407}
408
79e2b9ae 409/* Called from RCU critical section */
90260c6c 410MemoryRegionSection *
9d82b5a7
PB
411address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
412 hwaddr *xlat, hwaddr *plen)
90260c6c 413{
30951157 414 MemoryRegionSection *section;
9d82b5a7
PB
415 section = address_space_translate_internal(cpu->memory_dispatch,
416 addr, xlat, plen, false);
30951157
AK
417
418 assert(!section->mr->iommu_ops);
419 return section;
90260c6c 420}
5b6dd868 421#endif
fd6ce8f6 422
b170fce3 423#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
424
425static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 426{
259186a7 427 CPUState *cpu = opaque;
a513fe19 428
5b6dd868
BS
429 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
430 version_id is increased. */
259186a7 431 cpu->interrupt_request &= ~0x01;
c01a71c1 432 tlb_flush(cpu, 1);
5b6dd868
BS
433
434 return 0;
a513fe19 435}
7501267e 436
6c3bff0e
PD
437static int cpu_common_pre_load(void *opaque)
438{
439 CPUState *cpu = opaque;
440
adee6424 441 cpu->exception_index = -1;
6c3bff0e
PD
442
443 return 0;
444}
445
446static bool cpu_common_exception_index_needed(void *opaque)
447{
448 CPUState *cpu = opaque;
449
adee6424 450 return tcg_enabled() && cpu->exception_index != -1;
6c3bff0e
PD
451}
452
453static const VMStateDescription vmstate_cpu_common_exception_index = {
454 .name = "cpu_common/exception_index",
455 .version_id = 1,
456 .minimum_version_id = 1,
457 .fields = (VMStateField[]) {
458 VMSTATE_INT32(exception_index, CPUState),
459 VMSTATE_END_OF_LIST()
460 }
461};
462
1a1562f5 463const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
464 .name = "cpu_common",
465 .version_id = 1,
466 .minimum_version_id = 1,
6c3bff0e 467 .pre_load = cpu_common_pre_load,
5b6dd868 468 .post_load = cpu_common_post_load,
35d08458 469 .fields = (VMStateField[]) {
259186a7
AF
470 VMSTATE_UINT32(halted, CPUState),
471 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 472 VMSTATE_END_OF_LIST()
6c3bff0e
PD
473 },
474 .subsections = (VMStateSubsection[]) {
475 {
476 .vmsd = &vmstate_cpu_common_exception_index,
477 .needed = cpu_common_exception_index_needed,
478 } , {
479 /* empty */
480 }
5b6dd868
BS
481 }
482};
1a1562f5 483
5b6dd868 484#endif
ea041c0e 485
38d8f5c8 486CPUState *qemu_get_cpu(int index)
ea041c0e 487{
bdc44640 488 CPUState *cpu;
ea041c0e 489
bdc44640 490 CPU_FOREACH(cpu) {
55e5c285 491 if (cpu->cpu_index == index) {
bdc44640 492 return cpu;
55e5c285 493 }
ea041c0e 494 }
5b6dd868 495
bdc44640 496 return NULL;
ea041c0e
FB
497}
498
09daed84
EI
499#if !defined(CONFIG_USER_ONLY)
500void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
501{
502 /* We only support one address space per cpu at the moment. */
503 assert(cpu->as == as);
504
505 if (cpu->tcg_as_listener) {
506 memory_listener_unregister(cpu->tcg_as_listener);
507 } else {
508 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
509 }
510 cpu->tcg_as_listener->commit = tcg_commit;
511 memory_listener_register(cpu->tcg_as_listener, as);
512}
513#endif
514
5b6dd868 515void cpu_exec_init(CPUArchState *env)
ea041c0e 516{
5b6dd868 517 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 518 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 519 CPUState *some_cpu;
5b6dd868
BS
520 int cpu_index;
521
522#if defined(CONFIG_USER_ONLY)
523 cpu_list_lock();
524#endif
5b6dd868 525 cpu_index = 0;
bdc44640 526 CPU_FOREACH(some_cpu) {
5b6dd868
BS
527 cpu_index++;
528 }
55e5c285 529 cpu->cpu_index = cpu_index;
1b1ed8dc 530 cpu->numa_node = 0;
f0c3c505 531 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 532 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 533#ifndef CONFIG_USER_ONLY
09daed84 534 cpu->as = &address_space_memory;
5b6dd868 535 cpu->thread_id = qemu_get_thread_id();
cba70549 536 cpu_reload_memory_map(cpu);
5b6dd868 537#endif
bdc44640 538 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
539#if defined(CONFIG_USER_ONLY)
540 cpu_list_unlock();
541#endif
e0d47944
AF
542 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
543 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
544 }
5b6dd868 545#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
546 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
547 cpu_save, cpu_load, env);
b170fce3 548 assert(cc->vmsd == NULL);
e0d47944 549 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 550#endif
b170fce3
AF
551 if (cc->vmsd != NULL) {
552 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
553 }
ea041c0e
FB
554}
555
94df27fd 556#if defined(CONFIG_USER_ONLY)
00b941e5 557static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
558{
559 tb_invalidate_phys_page_range(pc, pc + 1, 0);
560}
561#else
00b941e5 562static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 563{
e8262a1b
MF
564 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
565 if (phys != -1) {
09daed84 566 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 567 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 568 }
1e7855a5 569}
c27004ec 570#endif
d720b93d 571
c527ee8f 572#if defined(CONFIG_USER_ONLY)
75a34036 573void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
574
575{
576}
577
3ee887e8
PM
578int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
579 int flags)
580{
581 return -ENOSYS;
582}
583
584void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
585{
586}
587
75a34036 588int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
589 int flags, CPUWatchpoint **watchpoint)
590{
591 return -ENOSYS;
592}
593#else
6658ffb8 594/* Add a watchpoint. */
75a34036 595int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 596 int flags, CPUWatchpoint **watchpoint)
6658ffb8 597{
c0ce998e 598 CPUWatchpoint *wp;
6658ffb8 599
05068c0d 600 /* forbid ranges which are empty or run off the end of the address space */
07e2863d 601 if (len == 0 || (addr + len - 1) < addr) {
75a34036
AF
602 error_report("tried to set invalid watchpoint at %"
603 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
604 return -EINVAL;
605 }
7267c094 606 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
607
608 wp->vaddr = addr;
05068c0d 609 wp->len = len;
a1d1bb31
AL
610 wp->flags = flags;
611
2dc9f411 612 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
613 if (flags & BP_GDB) {
614 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
615 } else {
616 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
617 }
6658ffb8 618
31b030d4 619 tlb_flush_page(cpu, addr);
a1d1bb31
AL
620
621 if (watchpoint)
622 *watchpoint = wp;
623 return 0;
6658ffb8
PB
624}
625
a1d1bb31 626/* Remove a specific watchpoint. */
75a34036 627int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 628 int flags)
6658ffb8 629{
a1d1bb31 630 CPUWatchpoint *wp;
6658ffb8 631
ff4700b0 632 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 633 if (addr == wp->vaddr && len == wp->len
6e140f28 634 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 635 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
636 return 0;
637 }
638 }
a1d1bb31 639 return -ENOENT;
6658ffb8
PB
640}
641
a1d1bb31 642/* Remove a specific watchpoint by reference. */
75a34036 643void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 644{
ff4700b0 645 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 646
31b030d4 647 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 648
7267c094 649 g_free(watchpoint);
a1d1bb31
AL
650}
651
652/* Remove all matching watchpoints. */
75a34036 653void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 654{
c0ce998e 655 CPUWatchpoint *wp, *next;
a1d1bb31 656
ff4700b0 657 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
658 if (wp->flags & mask) {
659 cpu_watchpoint_remove_by_ref(cpu, wp);
660 }
c0ce998e 661 }
7d03f82f 662}
05068c0d
PM
663
664/* Return true if this watchpoint address matches the specified
665 * access (ie the address range covered by the watchpoint overlaps
666 * partially or completely with the address range covered by the
667 * access).
668 */
669static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
670 vaddr addr,
671 vaddr len)
672{
673 /* We know the lengths are non-zero, but a little caution is
674 * required to avoid errors in the case where the range ends
675 * exactly at the top of the address space and so addr + len
676 * wraps round to zero.
677 */
678 vaddr wpend = wp->vaddr + wp->len - 1;
679 vaddr addrend = addr + len - 1;
680
681 return !(addr > wpend || wp->vaddr > addrend);
682}
683
c527ee8f 684#endif
7d03f82f 685
a1d1bb31 686/* Add a breakpoint. */
b3310ab3 687int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 688 CPUBreakpoint **breakpoint)
4c3a88a2 689{
c0ce998e 690 CPUBreakpoint *bp;
3b46e624 691
7267c094 692 bp = g_malloc(sizeof(*bp));
4c3a88a2 693
a1d1bb31
AL
694 bp->pc = pc;
695 bp->flags = flags;
696
2dc9f411 697 /* keep all GDB-injected breakpoints in front */
00b941e5 698 if (flags & BP_GDB) {
f0c3c505 699 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 700 } else {
f0c3c505 701 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 702 }
3b46e624 703
f0c3c505 704 breakpoint_invalidate(cpu, pc);
a1d1bb31 705
00b941e5 706 if (breakpoint) {
a1d1bb31 707 *breakpoint = bp;
00b941e5 708 }
4c3a88a2 709 return 0;
4c3a88a2
FB
710}
711
a1d1bb31 712/* Remove a specific breakpoint. */
b3310ab3 713int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 714{
a1d1bb31
AL
715 CPUBreakpoint *bp;
716
f0c3c505 717 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 718 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 719 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
720 return 0;
721 }
7d03f82f 722 }
a1d1bb31 723 return -ENOENT;
7d03f82f
EI
724}
725
a1d1bb31 726/* Remove a specific breakpoint by reference. */
b3310ab3 727void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 728{
f0c3c505
AF
729 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
730
731 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 732
7267c094 733 g_free(breakpoint);
a1d1bb31
AL
734}
735
736/* Remove all matching breakpoints. */
b3310ab3 737void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 738{
c0ce998e 739 CPUBreakpoint *bp, *next;
a1d1bb31 740
f0c3c505 741 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
742 if (bp->flags & mask) {
743 cpu_breakpoint_remove_by_ref(cpu, bp);
744 }
c0ce998e 745 }
4c3a88a2
FB
746}
747
c33a346e
FB
748/* enable or disable single step mode. EXCP_DEBUG is returned by the
749 CPU loop after each instruction */
3825b28f 750void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 751{
ed2803da
AF
752 if (cpu->singlestep_enabled != enabled) {
753 cpu->singlestep_enabled = enabled;
754 if (kvm_enabled()) {
38e478ec 755 kvm_update_guest_debug(cpu, 0);
ed2803da 756 } else {
ccbb4d44 757 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 758 /* XXX: only flush what is necessary */
38e478ec 759 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
760 tb_flush(env);
761 }
c33a346e 762 }
c33a346e
FB
763}
764
a47dddd7 765void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
766{
767 va_list ap;
493ae1f0 768 va_list ap2;
7501267e
FB
769
770 va_start(ap, fmt);
493ae1f0 771 va_copy(ap2, ap);
7501267e
FB
772 fprintf(stderr, "qemu: fatal: ");
773 vfprintf(stderr, fmt, ap);
774 fprintf(stderr, "\n");
878096ee 775 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
776 if (qemu_log_enabled()) {
777 qemu_log("qemu: fatal: ");
778 qemu_log_vprintf(fmt, ap2);
779 qemu_log("\n");
a0762859 780 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 781 qemu_log_flush();
93fcfe39 782 qemu_log_close();
924edcae 783 }
493ae1f0 784 va_end(ap2);
f9373291 785 va_end(ap);
fd052bf6
RV
786#if defined(CONFIG_USER_ONLY)
787 {
788 struct sigaction act;
789 sigfillset(&act.sa_mask);
790 act.sa_handler = SIG_DFL;
791 sigaction(SIGABRT, &act, NULL);
792 }
793#endif
7501267e
FB
794 abort();
795}
796
0124311e 797#if !defined(CONFIG_USER_ONLY)
0dc3f44a 798/* Called from RCU critical section */
041603fe
PB
799static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
800{
801 RAMBlock *block;
802
43771539 803 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 804 if (block && addr - block->offset < block->max_length) {
041603fe
PB
805 goto found;
806 }
0dc3f44a 807 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5 808 if (addr - block->offset < block->max_length) {
041603fe
PB
809 goto found;
810 }
811 }
812
813 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
814 abort();
815
816found:
43771539
PB
817 /* It is safe to write mru_block outside the iothread lock. This
818 * is what happens:
819 *
820 * mru_block = xxx
821 * rcu_read_unlock()
822 * xxx removed from list
823 * rcu_read_lock()
824 * read mru_block
825 * mru_block = NULL;
826 * call_rcu(reclaim_ramblock, xxx);
827 * rcu_read_unlock()
828 *
829 * atomic_rcu_set is not needed here. The block was already published
830 * when it was placed into the list. Here we're just making an extra
831 * copy of the pointer.
832 */
041603fe
PB
833 ram_list.mru_block = block;
834 return block;
835}
836
a2f4d5be 837static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 838{
041603fe 839 ram_addr_t start1;
a2f4d5be
JQ
840 RAMBlock *block;
841 ram_addr_t end;
842
843 end = TARGET_PAGE_ALIGN(start + length);
844 start &= TARGET_PAGE_MASK;
d24981d3 845
0dc3f44a 846 rcu_read_lock();
041603fe
PB
847 block = qemu_get_ram_block(start);
848 assert(block == qemu_get_ram_block(end - 1));
1240be24 849 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
041603fe 850 cpu_tlb_reset_dirty_all(start1, length);
0dc3f44a 851 rcu_read_unlock();
d24981d3
JQ
852}
853
5579c7f3 854/* Note: start and end must be within the same ram block. */
a2f4d5be 855void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 856 unsigned client)
1ccde1cb 857{
1ccde1cb
FB
858 if (length == 0)
859 return;
c8d6f66a 860 cpu_physical_memory_clear_dirty_range_type(start, length, client);
f23db169 861
d24981d3 862 if (tcg_enabled()) {
a2f4d5be 863 tlb_reset_dirty_range_all(start, length);
5579c7f3 864 }
1ccde1cb
FB
865}
866
79e2b9ae 867/* Called from RCU critical section */
bb0e627a 868hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
869 MemoryRegionSection *section,
870 target_ulong vaddr,
871 hwaddr paddr, hwaddr xlat,
872 int prot,
873 target_ulong *address)
e5548617 874{
a8170e5e 875 hwaddr iotlb;
e5548617
BS
876 CPUWatchpoint *wp;
877
cc5bea60 878 if (memory_region_is_ram(section->mr)) {
e5548617
BS
879 /* Normal RAM. */
880 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 881 + xlat;
e5548617 882 if (!section->readonly) {
b41aac4f 883 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 884 } else {
b41aac4f 885 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
886 }
887 } else {
1b3fb98f 888 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 889 iotlb += xlat;
e5548617
BS
890 }
891
892 /* Make accesses to pages with watchpoints go via the
893 watchpoint trap routines. */
ff4700b0 894 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 895 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
896 /* Avoid trapping reads of pages with a write breakpoint. */
897 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 898 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
899 *address |= TLB_MMIO;
900 break;
901 }
902 }
903 }
904
905 return iotlb;
906}
9fa3e853
FB
907#endif /* defined(CONFIG_USER_ONLY) */
908
e2eef170 909#if !defined(CONFIG_USER_ONLY)
8da3ff18 910
c227f099 911static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 912 uint16_t section);
acc9d80b 913static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 914
a2b257d6
IM
915static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
916 qemu_anon_ram_alloc;
91138037
MA
917
918/*
919 * Set a custom physical guest memory alloator.
920 * Accelerators with unusual needs may need this. Hopefully, we can
921 * get rid of it eventually.
922 */
a2b257d6 923void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
91138037
MA
924{
925 phys_mem_alloc = alloc;
926}
927
53cb28cb
MA
928static uint16_t phys_section_add(PhysPageMap *map,
929 MemoryRegionSection *section)
5312bd8b 930{
68f3f65b
PB
931 /* The physical section number is ORed with a page-aligned
932 * pointer to produce the iotlb entries. Thus it should
933 * never overflow into the page-aligned value.
934 */
53cb28cb 935 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 936
53cb28cb
MA
937 if (map->sections_nb == map->sections_nb_alloc) {
938 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
939 map->sections = g_renew(MemoryRegionSection, map->sections,
940 map->sections_nb_alloc);
5312bd8b 941 }
53cb28cb 942 map->sections[map->sections_nb] = *section;
dfde4e6e 943 memory_region_ref(section->mr);
53cb28cb 944 return map->sections_nb++;
5312bd8b
AK
945}
946
058bc4b5
PB
947static void phys_section_destroy(MemoryRegion *mr)
948{
dfde4e6e
PB
949 memory_region_unref(mr);
950
058bc4b5
PB
951 if (mr->subpage) {
952 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 953 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
954 g_free(subpage);
955 }
956}
957
6092666e 958static void phys_sections_free(PhysPageMap *map)
5312bd8b 959{
9affd6fc
PB
960 while (map->sections_nb > 0) {
961 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
962 phys_section_destroy(section->mr);
963 }
9affd6fc
PB
964 g_free(map->sections);
965 g_free(map->nodes);
5312bd8b
AK
966}
967
ac1970fb 968static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
969{
970 subpage_t *subpage;
a8170e5e 971 hwaddr base = section->offset_within_address_space
0f0cb164 972 & TARGET_PAGE_MASK;
97115a8d 973 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 974 d->map.nodes, d->map.sections);
0f0cb164
AK
975 MemoryRegionSection subsection = {
976 .offset_within_address_space = base,
052e87b0 977 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 978 };
a8170e5e 979 hwaddr start, end;
0f0cb164 980
f3705d53 981 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 982
f3705d53 983 if (!(existing->mr->subpage)) {
acc9d80b 984 subpage = subpage_init(d->as, base);
3be91e86 985 subsection.address_space = d->as;
0f0cb164 986 subsection.mr = &subpage->iomem;
ac1970fb 987 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 988 phys_section_add(&d->map, &subsection));
0f0cb164 989 } else {
f3705d53 990 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
991 }
992 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 993 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
994 subpage_register(subpage, start, end,
995 phys_section_add(&d->map, section));
0f0cb164
AK
996}
997
998
052e87b0
PB
999static void register_multipage(AddressSpaceDispatch *d,
1000 MemoryRegionSection *section)
33417e70 1001{
a8170e5e 1002 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 1003 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
1004 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1005 TARGET_PAGE_BITS));
dd81124b 1006
733d5ef5
PB
1007 assert(num_pages);
1008 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
1009}
1010
ac1970fb 1011static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 1012{
89ae337a 1013 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 1014 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 1015 MemoryRegionSection now = *section, remain = *section;
052e87b0 1016 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1017
733d5ef5
PB
1018 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1019 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1020 - now.offset_within_address_space;
1021
052e87b0 1022 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 1023 register_subpage(d, &now);
733d5ef5 1024 } else {
052e87b0 1025 now.size = int128_zero();
733d5ef5 1026 }
052e87b0
PB
1027 while (int128_ne(remain.size, now.size)) {
1028 remain.size = int128_sub(remain.size, now.size);
1029 remain.offset_within_address_space += int128_get64(now.size);
1030 remain.offset_within_region += int128_get64(now.size);
69b67646 1031 now = remain;
052e87b0 1032 if (int128_lt(remain.size, page_size)) {
733d5ef5 1033 register_subpage(d, &now);
88266249 1034 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1035 now.size = page_size;
ac1970fb 1036 register_subpage(d, &now);
69b67646 1037 } else {
052e87b0 1038 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1039 register_multipage(d, &now);
69b67646 1040 }
0f0cb164
AK
1041 }
1042}
1043
62a2744c
SY
1044void qemu_flush_coalesced_mmio_buffer(void)
1045{
1046 if (kvm_enabled())
1047 kvm_flush_coalesced_mmio_buffer();
1048}
1049
b2a8658e
UD
1050void qemu_mutex_lock_ramlist(void)
1051{
1052 qemu_mutex_lock(&ram_list.mutex);
1053}
1054
1055void qemu_mutex_unlock_ramlist(void)
1056{
1057 qemu_mutex_unlock(&ram_list.mutex);
1058}
1059
e1e84ba0 1060#ifdef __linux__
c902760f
MT
1061
1062#include <sys/vfs.h>
1063
1064#define HUGETLBFS_MAGIC 0x958458f6
1065
fc7a5800 1066static long gethugepagesize(const char *path, Error **errp)
c902760f
MT
1067{
1068 struct statfs fs;
1069 int ret;
1070
1071 do {
9742bf26 1072 ret = statfs(path, &fs);
c902760f
MT
1073 } while (ret != 0 && errno == EINTR);
1074
1075 if (ret != 0) {
fc7a5800
HT
1076 error_setg_errno(errp, errno, "failed to get page size of file %s",
1077 path);
9742bf26 1078 return 0;
c902760f
MT
1079 }
1080
1081 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1082 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1083
1084 return fs.f_bsize;
1085}
1086
04b16653
AW
1087static void *file_ram_alloc(RAMBlock *block,
1088 ram_addr_t memory,
7f56e740
PB
1089 const char *path,
1090 Error **errp)
c902760f
MT
1091{
1092 char *filename;
8ca761f6
PF
1093 char *sanitized_name;
1094 char *c;
557529dd 1095 void *area = NULL;
c902760f 1096 int fd;
557529dd 1097 uint64_t hpagesize;
fc7a5800 1098 Error *local_err = NULL;
c902760f 1099
fc7a5800
HT
1100 hpagesize = gethugepagesize(path, &local_err);
1101 if (local_err) {
1102 error_propagate(errp, local_err);
f9a49dfa 1103 goto error;
c902760f 1104 }
a2b257d6 1105 block->mr->align = hpagesize;
c902760f
MT
1106
1107 if (memory < hpagesize) {
557529dd
HT
1108 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1109 "or larger than huge page size 0x%" PRIx64,
1110 memory, hpagesize);
1111 goto error;
c902760f
MT
1112 }
1113
1114 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1115 error_setg(errp,
1116 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1117 goto error;
c902760f
MT
1118 }
1119
8ca761f6 1120 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1121 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1122 for (c = sanitized_name; *c != '\0'; c++) {
1123 if (*c == '/')
1124 *c = '_';
1125 }
1126
1127 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1128 sanitized_name);
1129 g_free(sanitized_name);
c902760f
MT
1130
1131 fd = mkstemp(filename);
1132 if (fd < 0) {
7f56e740
PB
1133 error_setg_errno(errp, errno,
1134 "unable to create backing store for hugepages");
e4ada482 1135 g_free(filename);
f9a49dfa 1136 goto error;
c902760f
MT
1137 }
1138 unlink(filename);
e4ada482 1139 g_free(filename);
c902760f
MT
1140
1141 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1142
1143 /*
1144 * ftruncate is not supported by hugetlbfs in older
1145 * hosts, so don't bother bailing out on errors.
1146 * If anything goes wrong with it under other filesystems,
1147 * mmap will fail.
1148 */
7f56e740 1149 if (ftruncate(fd, memory)) {
9742bf26 1150 perror("ftruncate");
7f56e740 1151 }
c902760f 1152
dbcb8981
PB
1153 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1154 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1155 fd, 0);
c902760f 1156 if (area == MAP_FAILED) {
7f56e740
PB
1157 error_setg_errno(errp, errno,
1158 "unable to map backing store for hugepages");
9742bf26 1159 close(fd);
f9a49dfa 1160 goto error;
c902760f 1161 }
ef36fa14
MT
1162
1163 if (mem_prealloc) {
38183310 1164 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1165 }
1166
04b16653 1167 block->fd = fd;
c902760f 1168 return area;
f9a49dfa
MT
1169
1170error:
1171 if (mem_prealloc) {
81b07353 1172 error_report("%s", error_get_pretty(*errp));
f9a49dfa
MT
1173 exit(1);
1174 }
1175 return NULL;
c902760f
MT
1176}
1177#endif
1178
0dc3f44a 1179/* Called with the ramlist lock held. */
d17b5288 1180static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1181{
1182 RAMBlock *block, *next_block;
3e837b2c 1183 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1184
49cd9ac6
SH
1185 assert(size != 0); /* it would hand out same offset multiple times */
1186
0dc3f44a 1187 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
04b16653 1188 return 0;
0d53d9fe 1189 }
04b16653 1190
0dc3f44a 1191 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
f15fbc4b 1192 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653 1193
62be4e3a 1194 end = block->offset + block->max_length;
04b16653 1195
0dc3f44a 1196 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
04b16653
AW
1197 if (next_block->offset >= end) {
1198 next = MIN(next, next_block->offset);
1199 }
1200 }
1201 if (next - end >= size && next - end < mingap) {
3e837b2c 1202 offset = end;
04b16653
AW
1203 mingap = next - end;
1204 }
1205 }
3e837b2c
AW
1206
1207 if (offset == RAM_ADDR_MAX) {
1208 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1209 (uint64_t)size);
1210 abort();
1211 }
1212
04b16653
AW
1213 return offset;
1214}
1215
652d7ec2 1216ram_addr_t last_ram_offset(void)
d17b5288
AW
1217{
1218 RAMBlock *block;
1219 ram_addr_t last = 0;
1220
0dc3f44a
MD
1221 rcu_read_lock();
1222 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
62be4e3a 1223 last = MAX(last, block->offset + block->max_length);
0d53d9fe 1224 }
0dc3f44a 1225 rcu_read_unlock();
d17b5288
AW
1226 return last;
1227}
1228
ddb97f1d
JB
1229static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1230{
1231 int ret;
ddb97f1d
JB
1232
1233 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
47c8ca53 1234 if (!machine_dump_guest_core(current_machine)) {
ddb97f1d
JB
1235 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1236 if (ret) {
1237 perror("qemu_madvise");
1238 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1239 "but dump_guest_core=off specified\n");
1240 }
1241 }
1242}
1243
0dc3f44a
MD
1244/* Called within an RCU critical section, or while the ramlist lock
1245 * is held.
1246 */
20cfe881 1247static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1248{
20cfe881 1249 RAMBlock *block;
84b89d78 1250
0dc3f44a 1251 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1252 if (block->offset == addr) {
20cfe881 1253 return block;
c5705a77
AK
1254 }
1255 }
20cfe881
HT
1256
1257 return NULL;
1258}
1259
ae3a7047 1260/* Called with iothread lock held. */
20cfe881
HT
1261void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1262{
ae3a7047 1263 RAMBlock *new_block, *block;
20cfe881 1264
0dc3f44a 1265 rcu_read_lock();
ae3a7047 1266 new_block = find_ram_block(addr);
c5705a77
AK
1267 assert(new_block);
1268 assert(!new_block->idstr[0]);
84b89d78 1269
09e5ab63
AL
1270 if (dev) {
1271 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1272 if (id) {
1273 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1274 g_free(id);
84b89d78
CM
1275 }
1276 }
1277 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1278
0dc3f44a 1279 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1280 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1281 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1282 new_block->idstr);
1283 abort();
1284 }
1285 }
0dc3f44a 1286 rcu_read_unlock();
c5705a77
AK
1287}
1288
ae3a7047 1289/* Called with iothread lock held. */
20cfe881
HT
1290void qemu_ram_unset_idstr(ram_addr_t addr)
1291{
ae3a7047 1292 RAMBlock *block;
20cfe881 1293
ae3a7047
MD
1294 /* FIXME: arch_init.c assumes that this is not called throughout
1295 * migration. Ignore the problem since hot-unplug during migration
1296 * does not work anyway.
1297 */
1298
0dc3f44a 1299 rcu_read_lock();
ae3a7047 1300 block = find_ram_block(addr);
20cfe881
HT
1301 if (block) {
1302 memset(block->idstr, 0, sizeof(block->idstr));
1303 }
0dc3f44a 1304 rcu_read_unlock();
20cfe881
HT
1305}
1306
8490fc78
LC
1307static int memory_try_enable_merging(void *addr, size_t len)
1308{
75cc7f01 1309 if (!machine_mem_merge(current_machine)) {
8490fc78
LC
1310 /* disabled by the user */
1311 return 0;
1312 }
1313
1314 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1315}
1316
62be4e3a
MT
1317/* Only legal before guest might have detected the memory size: e.g. on
1318 * incoming migration, or right after reset.
1319 *
1320 * As memory core doesn't know how is memory accessed, it is up to
1321 * resize callback to update device state and/or add assertions to detect
1322 * misuse, if necessary.
1323 */
1324int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1325{
1326 RAMBlock *block = find_ram_block(base);
1327
1328 assert(block);
1329
129ddaf3
MT
1330 newsize = TARGET_PAGE_ALIGN(newsize);
1331
62be4e3a
MT
1332 if (block->used_length == newsize) {
1333 return 0;
1334 }
1335
1336 if (!(block->flags & RAM_RESIZEABLE)) {
1337 error_setg_errno(errp, EINVAL,
1338 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1339 " in != 0x" RAM_ADDR_FMT, block->idstr,
1340 newsize, block->used_length);
1341 return -EINVAL;
1342 }
1343
1344 if (block->max_length < newsize) {
1345 error_setg_errno(errp, EINVAL,
1346 "Length too large: %s: 0x" RAM_ADDR_FMT
1347 " > 0x" RAM_ADDR_FMT, block->idstr,
1348 newsize, block->max_length);
1349 return -EINVAL;
1350 }
1351
1352 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1353 block->used_length = newsize;
58d2707e
PB
1354 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1355 DIRTY_CLIENTS_ALL);
62be4e3a
MT
1356 memory_region_set_size(block->mr, newsize);
1357 if (block->resized) {
1358 block->resized(block->idstr, newsize, block->host);
1359 }
1360 return 0;
1361}
1362
ef701d7b 1363static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1364{
e1c57ab8 1365 RAMBlock *block;
0d53d9fe 1366 RAMBlock *last_block = NULL;
2152f5ca
JQ
1367 ram_addr_t old_ram_size, new_ram_size;
1368
1369 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1370
b2a8658e 1371 qemu_mutex_lock_ramlist();
9b8424d5 1372 new_block->offset = find_ram_offset(new_block->max_length);
e1c57ab8
PB
1373
1374 if (!new_block->host) {
1375 if (xen_enabled()) {
9b8424d5
MT
1376 xen_ram_alloc(new_block->offset, new_block->max_length,
1377 new_block->mr);
e1c57ab8 1378 } else {
9b8424d5 1379 new_block->host = phys_mem_alloc(new_block->max_length,
a2b257d6 1380 &new_block->mr->align);
39228250 1381 if (!new_block->host) {
ef701d7b
HT
1382 error_setg_errno(errp, errno,
1383 "cannot set up guest memory '%s'",
1384 memory_region_name(new_block->mr));
1385 qemu_mutex_unlock_ramlist();
1386 return -1;
39228250 1387 }
9b8424d5 1388 memory_try_enable_merging(new_block->host, new_block->max_length);
6977dfe6 1389 }
c902760f 1390 }
94a6b54f 1391
0d53d9fe
MD
1392 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1393 * QLIST (which has an RCU-friendly variant) does not have insertion at
1394 * tail, so save the last element in last_block.
1395 */
0dc3f44a 1396 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
0d53d9fe 1397 last_block = block;
9b8424d5 1398 if (block->max_length < new_block->max_length) {
abb26d63
PB
1399 break;
1400 }
1401 }
1402 if (block) {
0dc3f44a 1403 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
0d53d9fe 1404 } else if (last_block) {
0dc3f44a 1405 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
0d53d9fe 1406 } else { /* list is empty */
0dc3f44a 1407 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
abb26d63 1408 }
0d6d3c87 1409 ram_list.mru_block = NULL;
94a6b54f 1410
0dc3f44a
MD
1411 /* Write list before version */
1412 smp_wmb();
f798b07f 1413 ram_list.version++;
b2a8658e 1414 qemu_mutex_unlock_ramlist();
f798b07f 1415
2152f5ca
JQ
1416 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1417
1418 if (new_ram_size > old_ram_size) {
1ab4c8ce 1419 int i;
ae3a7047
MD
1420
1421 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1ab4c8ce
JQ
1422 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1423 ram_list.dirty_memory[i] =
1424 bitmap_zero_extend(ram_list.dirty_memory[i],
1425 old_ram_size, new_ram_size);
1426 }
2152f5ca 1427 }
9b8424d5 1428 cpu_physical_memory_set_dirty_range(new_block->offset,
58d2707e
PB
1429 new_block->used_length,
1430 DIRTY_CLIENTS_ALL);
94a6b54f 1431
a904c911
PB
1432 if (new_block->host) {
1433 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1434 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1435 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1436 if (kvm_enabled()) {
1437 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1438 }
e1c57ab8 1439 }
6f0437e8 1440
94a6b54f
PB
1441 return new_block->offset;
1442}
e9a1ab19 1443
0b183fc8 1444#ifdef __linux__
e1c57ab8 1445ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1446 bool share, const char *mem_path,
7f56e740 1447 Error **errp)
e1c57ab8
PB
1448{
1449 RAMBlock *new_block;
ef701d7b
HT
1450 ram_addr_t addr;
1451 Error *local_err = NULL;
e1c57ab8
PB
1452
1453 if (xen_enabled()) {
7f56e740
PB
1454 error_setg(errp, "-mem-path not supported with Xen");
1455 return -1;
e1c57ab8
PB
1456 }
1457
1458 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1459 /*
1460 * file_ram_alloc() needs to allocate just like
1461 * phys_mem_alloc, but we haven't bothered to provide
1462 * a hook there.
1463 */
7f56e740
PB
1464 error_setg(errp,
1465 "-mem-path not supported with this accelerator");
1466 return -1;
e1c57ab8
PB
1467 }
1468
1469 size = TARGET_PAGE_ALIGN(size);
1470 new_block = g_malloc0(sizeof(*new_block));
1471 new_block->mr = mr;
9b8424d5
MT
1472 new_block->used_length = size;
1473 new_block->max_length = size;
dbcb8981 1474 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1475 new_block->host = file_ram_alloc(new_block, size,
1476 mem_path, errp);
1477 if (!new_block->host) {
1478 g_free(new_block);
1479 return -1;
1480 }
1481
ef701d7b
HT
1482 addr = ram_block_add(new_block, &local_err);
1483 if (local_err) {
1484 g_free(new_block);
1485 error_propagate(errp, local_err);
1486 return -1;
1487 }
1488 return addr;
e1c57ab8 1489}
0b183fc8 1490#endif
e1c57ab8 1491
62be4e3a
MT
1492static
1493ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1494 void (*resized)(const char*,
1495 uint64_t length,
1496 void *host),
1497 void *host, bool resizeable,
ef701d7b 1498 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1499{
1500 RAMBlock *new_block;
ef701d7b
HT
1501 ram_addr_t addr;
1502 Error *local_err = NULL;
e1c57ab8
PB
1503
1504 size = TARGET_PAGE_ALIGN(size);
62be4e3a 1505 max_size = TARGET_PAGE_ALIGN(max_size);
e1c57ab8
PB
1506 new_block = g_malloc0(sizeof(*new_block));
1507 new_block->mr = mr;
62be4e3a 1508 new_block->resized = resized;
9b8424d5
MT
1509 new_block->used_length = size;
1510 new_block->max_length = max_size;
62be4e3a 1511 assert(max_size >= size);
e1c57ab8
PB
1512 new_block->fd = -1;
1513 new_block->host = host;
1514 if (host) {
7bd4f430 1515 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1516 }
62be4e3a
MT
1517 if (resizeable) {
1518 new_block->flags |= RAM_RESIZEABLE;
1519 }
ef701d7b
HT
1520 addr = ram_block_add(new_block, &local_err);
1521 if (local_err) {
1522 g_free(new_block);
1523 error_propagate(errp, local_err);
1524 return -1;
1525 }
1526 return addr;
e1c57ab8
PB
1527}
1528
62be4e3a
MT
1529ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1530 MemoryRegion *mr, Error **errp)
1531{
1532 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1533}
1534
ef701d7b 1535ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1536{
62be4e3a
MT
1537 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1538}
1539
1540ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1541 void (*resized)(const char*,
1542 uint64_t length,
1543 void *host),
1544 MemoryRegion *mr, Error **errp)
1545{
1546 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
6977dfe6
YT
1547}
1548
1f2e98b6
AW
1549void qemu_ram_free_from_ptr(ram_addr_t addr)
1550{
1551 RAMBlock *block;
1552
b2a8658e 1553 qemu_mutex_lock_ramlist();
0dc3f44a 1554 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1f2e98b6 1555 if (addr == block->offset) {
0dc3f44a 1556 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1557 ram_list.mru_block = NULL;
0dc3f44a
MD
1558 /* Write list before version */
1559 smp_wmb();
f798b07f 1560 ram_list.version++;
43771539 1561 g_free_rcu(block, rcu);
b2a8658e 1562 break;
1f2e98b6
AW
1563 }
1564 }
b2a8658e 1565 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1566}
1567
43771539
PB
1568static void reclaim_ramblock(RAMBlock *block)
1569{
1570 if (block->flags & RAM_PREALLOC) {
1571 ;
1572 } else if (xen_enabled()) {
1573 xen_invalidate_map_cache_entry(block->host);
1574#ifndef _WIN32
1575 } else if (block->fd >= 0) {
1576 munmap(block->host, block->max_length);
1577 close(block->fd);
1578#endif
1579 } else {
1580 qemu_anon_ram_free(block->host, block->max_length);
1581 }
1582 g_free(block);
1583}
1584
c227f099 1585void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1586{
04b16653
AW
1587 RAMBlock *block;
1588
b2a8658e 1589 qemu_mutex_lock_ramlist();
0dc3f44a 1590 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
04b16653 1591 if (addr == block->offset) {
0dc3f44a 1592 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1593 ram_list.mru_block = NULL;
0dc3f44a
MD
1594 /* Write list before version */
1595 smp_wmb();
f798b07f 1596 ram_list.version++;
43771539 1597 call_rcu(block, reclaim_ramblock, rcu);
b2a8658e 1598 break;
04b16653
AW
1599 }
1600 }
b2a8658e 1601 qemu_mutex_unlock_ramlist();
e9a1ab19
FB
1602}
1603
cd19cfa2
HY
1604#ifndef _WIN32
1605void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1606{
1607 RAMBlock *block;
1608 ram_addr_t offset;
1609 int flags;
1610 void *area, *vaddr;
1611
0dc3f44a 1612 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
cd19cfa2 1613 offset = addr - block->offset;
9b8424d5 1614 if (offset < block->max_length) {
1240be24 1615 vaddr = ramblock_ptr(block, offset);
7bd4f430 1616 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1617 ;
dfeaf2ab
MA
1618 } else if (xen_enabled()) {
1619 abort();
cd19cfa2
HY
1620 } else {
1621 flags = MAP_FIXED;
3435f395 1622 if (block->fd >= 0) {
dbcb8981
PB
1623 flags |= (block->flags & RAM_SHARED ?
1624 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1625 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1626 flags, block->fd, offset);
cd19cfa2 1627 } else {
2eb9fbaa
MA
1628 /*
1629 * Remap needs to match alloc. Accelerators that
1630 * set phys_mem_alloc never remap. If they did,
1631 * we'd need a remap hook here.
1632 */
1633 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1634
cd19cfa2
HY
1635 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1636 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1637 flags, -1, 0);
cd19cfa2
HY
1638 }
1639 if (area != vaddr) {
f15fbc4b
AP
1640 fprintf(stderr, "Could not remap addr: "
1641 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1642 length, addr);
1643 exit(1);
1644 }
8490fc78 1645 memory_try_enable_merging(vaddr, length);
ddb97f1d 1646 qemu_ram_setup_dump(vaddr, length);
cd19cfa2 1647 }
cd19cfa2
HY
1648 }
1649 }
1650}
1651#endif /* !_WIN32 */
1652
a35ba7be
PB
1653int qemu_get_ram_fd(ram_addr_t addr)
1654{
ae3a7047
MD
1655 RAMBlock *block;
1656 int fd;
a35ba7be 1657
0dc3f44a 1658 rcu_read_lock();
ae3a7047
MD
1659 block = qemu_get_ram_block(addr);
1660 fd = block->fd;
0dc3f44a 1661 rcu_read_unlock();
ae3a7047 1662 return fd;
a35ba7be
PB
1663}
1664
3fd74b84
DM
1665void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1666{
ae3a7047
MD
1667 RAMBlock *block;
1668 void *ptr;
3fd74b84 1669
0dc3f44a 1670 rcu_read_lock();
ae3a7047
MD
1671 block = qemu_get_ram_block(addr);
1672 ptr = ramblock_ptr(block, 0);
0dc3f44a 1673 rcu_read_unlock();
ae3a7047 1674 return ptr;
3fd74b84
DM
1675}
1676
1b5ec234 1677/* Return a host pointer to ram allocated with qemu_ram_alloc.
ae3a7047
MD
1678 * This should not be used for general purpose DMA. Use address_space_map
1679 * or address_space_rw instead. For local memory (e.g. video ram) that the
1680 * device owns, use memory_region_get_ram_ptr.
0dc3f44a
MD
1681 *
1682 * By the time this function returns, the returned pointer is not protected
1683 * by RCU anymore. If the caller is not within an RCU critical section and
1684 * does not hold the iothread lock, it must have other means of protecting the
1685 * pointer, such as a reference to the region that includes the incoming
1686 * ram_addr_t.
1b5ec234
PB
1687 */
1688void *qemu_get_ram_ptr(ram_addr_t addr)
1689{
ae3a7047
MD
1690 RAMBlock *block;
1691 void *ptr;
1b5ec234 1692
0dc3f44a 1693 rcu_read_lock();
ae3a7047
MD
1694 block = qemu_get_ram_block(addr);
1695
1696 if (xen_enabled() && block->host == NULL) {
0d6d3c87
PB
1697 /* We need to check if the requested address is in the RAM
1698 * because we don't want to map the entire memory in QEMU.
1699 * In that case just map until the end of the page.
1700 */
1701 if (block->offset == 0) {
ae3a7047 1702 ptr = xen_map_cache(addr, 0, 0);
0dc3f44a 1703 goto unlock;
0d6d3c87 1704 }
ae3a7047
MD
1705
1706 block->host = xen_map_cache(block->offset, block->max_length, 1);
0d6d3c87 1707 }
ae3a7047
MD
1708 ptr = ramblock_ptr(block, addr - block->offset);
1709
0dc3f44a
MD
1710unlock:
1711 rcu_read_unlock();
ae3a7047 1712 return ptr;
dc828ca1
PB
1713}
1714
38bee5dc 1715/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
ae3a7047 1716 * but takes a size argument.
0dc3f44a
MD
1717 *
1718 * By the time this function returns, the returned pointer is not protected
1719 * by RCU anymore. If the caller is not within an RCU critical section and
1720 * does not hold the iothread lock, it must have other means of protecting the
1721 * pointer, such as a reference to the region that includes the incoming
1722 * ram_addr_t.
ae3a7047 1723 */
cb85f7ab 1724static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1725{
ae3a7047 1726 void *ptr;
8ab934f9
SS
1727 if (*size == 0) {
1728 return NULL;
1729 }
868bb33f 1730 if (xen_enabled()) {
e41d7c69 1731 return xen_map_cache(addr, *size, 1);
868bb33f 1732 } else {
38bee5dc 1733 RAMBlock *block;
0dc3f44a
MD
1734 rcu_read_lock();
1735 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5
MT
1736 if (addr - block->offset < block->max_length) {
1737 if (addr - block->offset + *size > block->max_length)
1738 *size = block->max_length - addr + block->offset;
ae3a7047 1739 ptr = ramblock_ptr(block, addr - block->offset);
0dc3f44a 1740 rcu_read_unlock();
ae3a7047 1741 return ptr;
38bee5dc
SS
1742 }
1743 }
1744
1745 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1746 abort();
38bee5dc
SS
1747 }
1748}
1749
7443b437 1750/* Some of the softmmu routines need to translate from a host pointer
ae3a7047
MD
1751 * (typically a TLB entry) back to a ram offset.
1752 *
1753 * By the time this function returns, the returned pointer is not protected
1754 * by RCU anymore. If the caller is not within an RCU critical section and
1755 * does not hold the iothread lock, it must have other means of protecting the
1756 * pointer, such as a reference to the region that includes the incoming
1757 * ram_addr_t.
1758 */
1b5ec234 1759MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1760{
94a6b54f
PB
1761 RAMBlock *block;
1762 uint8_t *host = ptr;
ae3a7047 1763 MemoryRegion *mr;
94a6b54f 1764
868bb33f 1765 if (xen_enabled()) {
0dc3f44a 1766 rcu_read_lock();
e41d7c69 1767 *ram_addr = xen_ram_addr_from_mapcache(ptr);
ae3a7047 1768 mr = qemu_get_ram_block(*ram_addr)->mr;
0dc3f44a 1769 rcu_read_unlock();
ae3a7047 1770 return mr;
712c2b41
SS
1771 }
1772
0dc3f44a
MD
1773 rcu_read_lock();
1774 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 1775 if (block && block->host && host - block->host < block->max_length) {
23887b79
PB
1776 goto found;
1777 }
1778
0dc3f44a 1779 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
432d268c
JN
1780 /* This case append when the block is not mapped. */
1781 if (block->host == NULL) {
1782 continue;
1783 }
9b8424d5 1784 if (host - block->host < block->max_length) {
23887b79 1785 goto found;
f471a17e 1786 }
94a6b54f 1787 }
432d268c 1788
0dc3f44a 1789 rcu_read_unlock();
1b5ec234 1790 return NULL;
23887b79
PB
1791
1792found:
1793 *ram_addr = block->offset + (host - block->host);
ae3a7047 1794 mr = block->mr;
0dc3f44a 1795 rcu_read_unlock();
ae3a7047 1796 return mr;
e890261f 1797}
f471a17e 1798
a8170e5e 1799static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1800 uint64_t val, unsigned size)
9fa3e853 1801{
52159192 1802 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1803 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1804 }
0e0df1e2
AK
1805 switch (size) {
1806 case 1:
1807 stb_p(qemu_get_ram_ptr(ram_addr), val);
1808 break;
1809 case 2:
1810 stw_p(qemu_get_ram_ptr(ram_addr), val);
1811 break;
1812 case 4:
1813 stl_p(qemu_get_ram_ptr(ram_addr), val);
1814 break;
1815 default:
1816 abort();
3a7d929e 1817 }
58d2707e
PB
1818 /* Set both VGA and migration bits for simplicity and to remove
1819 * the notdirty callback faster.
1820 */
1821 cpu_physical_memory_set_dirty_range(ram_addr, size,
1822 DIRTY_CLIENTS_NOCODE);
f23db169
FB
1823 /* we remove the notdirty callback only if the code has been
1824 flushed */
a2cd8c85 1825 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1826 CPUArchState *env = current_cpu->env_ptr;
93afeade 1827 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1828 }
9fa3e853
FB
1829}
1830
b018ddf6
PB
1831static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1832 unsigned size, bool is_write)
1833{
1834 return is_write;
1835}
1836
0e0df1e2 1837static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1838 .write = notdirty_mem_write,
b018ddf6 1839 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1840 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1841};
1842
0f459d16 1843/* Generate a debug exception if a watchpoint has been hit. */
66b9b43c 1844static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
0f459d16 1845{
93afeade
AF
1846 CPUState *cpu = current_cpu;
1847 CPUArchState *env = cpu->env_ptr;
06d55cc1 1848 target_ulong pc, cs_base;
0f459d16 1849 target_ulong vaddr;
a1d1bb31 1850 CPUWatchpoint *wp;
06d55cc1 1851 int cpu_flags;
0f459d16 1852
ff4700b0 1853 if (cpu->watchpoint_hit) {
06d55cc1
AL
1854 /* We re-entered the check after replacing the TB. Now raise
1855 * the debug interrupt so that is will trigger after the
1856 * current instruction. */
93afeade 1857 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1858 return;
1859 }
93afeade 1860 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1861 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
1862 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1863 && (wp->flags & flags)) {
08225676
PM
1864 if (flags == BP_MEM_READ) {
1865 wp->flags |= BP_WATCHPOINT_HIT_READ;
1866 } else {
1867 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1868 }
1869 wp->hitaddr = vaddr;
66b9b43c 1870 wp->hitattrs = attrs;
ff4700b0
AF
1871 if (!cpu->watchpoint_hit) {
1872 cpu->watchpoint_hit = wp;
239c51a5 1873 tb_check_watchpoint(cpu);
6e140f28 1874 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1875 cpu->exception_index = EXCP_DEBUG;
5638d180 1876 cpu_loop_exit(cpu);
6e140f28
AL
1877 } else {
1878 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1879 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1880 cpu_resume_from_signal(cpu, NULL);
6e140f28 1881 }
06d55cc1 1882 }
6e140f28
AL
1883 } else {
1884 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1885 }
1886 }
1887}
1888
6658ffb8
PB
1889/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1890 so these check for a hit then pass through to the normal out-of-line
1891 phys routines. */
66b9b43c
PM
1892static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1893 unsigned size, MemTxAttrs attrs)
6658ffb8 1894{
66b9b43c
PM
1895 MemTxResult res;
1896 uint64_t data;
1897
1898 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
1ec9b909 1899 switch (size) {
66b9b43c
PM
1900 case 1:
1901 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
1902 break;
1903 case 2:
1904 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
1905 break;
1906 case 4:
1907 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
1908 break;
1ec9b909
AK
1909 default: abort();
1910 }
66b9b43c
PM
1911 *pdata = data;
1912 return res;
6658ffb8
PB
1913}
1914
66b9b43c
PM
1915static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
1916 uint64_t val, unsigned size,
1917 MemTxAttrs attrs)
6658ffb8 1918{
66b9b43c
PM
1919 MemTxResult res;
1920
1921 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1ec9b909 1922 switch (size) {
67364150 1923 case 1:
66b9b43c 1924 address_space_stb(&address_space_memory, addr, val, attrs, &res);
67364150
MF
1925 break;
1926 case 2:
66b9b43c 1927 address_space_stw(&address_space_memory, addr, val, attrs, &res);
67364150
MF
1928 break;
1929 case 4:
66b9b43c 1930 address_space_stl(&address_space_memory, addr, val, attrs, &res);
67364150 1931 break;
1ec9b909
AK
1932 default: abort();
1933 }
66b9b43c 1934 return res;
6658ffb8
PB
1935}
1936
1ec9b909 1937static const MemoryRegionOps watch_mem_ops = {
66b9b43c
PM
1938 .read_with_attrs = watch_mem_read,
1939 .write_with_attrs = watch_mem_write,
1ec9b909 1940 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1941};
6658ffb8 1942
f25a49e0
PM
1943static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
1944 unsigned len, MemTxAttrs attrs)
db7b5426 1945{
acc9d80b 1946 subpage_t *subpage = opaque;
ff6cff75 1947 uint8_t buf[8];
5c9eb028 1948 MemTxResult res;
791af8c8 1949
db7b5426 1950#if defined(DEBUG_SUBPAGE)
016e9d62 1951 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1952 subpage, len, addr);
db7b5426 1953#endif
5c9eb028
PM
1954 res = address_space_read(subpage->as, addr + subpage->base,
1955 attrs, buf, len);
1956 if (res) {
1957 return res;
f25a49e0 1958 }
acc9d80b
JK
1959 switch (len) {
1960 case 1:
f25a49e0
PM
1961 *data = ldub_p(buf);
1962 return MEMTX_OK;
acc9d80b 1963 case 2:
f25a49e0
PM
1964 *data = lduw_p(buf);
1965 return MEMTX_OK;
acc9d80b 1966 case 4:
f25a49e0
PM
1967 *data = ldl_p(buf);
1968 return MEMTX_OK;
ff6cff75 1969 case 8:
f25a49e0
PM
1970 *data = ldq_p(buf);
1971 return MEMTX_OK;
acc9d80b
JK
1972 default:
1973 abort();
1974 }
db7b5426
BS
1975}
1976
f25a49e0
PM
1977static MemTxResult subpage_write(void *opaque, hwaddr addr,
1978 uint64_t value, unsigned len, MemTxAttrs attrs)
db7b5426 1979{
acc9d80b 1980 subpage_t *subpage = opaque;
ff6cff75 1981 uint8_t buf[8];
acc9d80b 1982
db7b5426 1983#if defined(DEBUG_SUBPAGE)
016e9d62 1984 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1985 " value %"PRIx64"\n",
1986 __func__, subpage, len, addr, value);
db7b5426 1987#endif
acc9d80b
JK
1988 switch (len) {
1989 case 1:
1990 stb_p(buf, value);
1991 break;
1992 case 2:
1993 stw_p(buf, value);
1994 break;
1995 case 4:
1996 stl_p(buf, value);
1997 break;
ff6cff75
PB
1998 case 8:
1999 stq_p(buf, value);
2000 break;
acc9d80b
JK
2001 default:
2002 abort();
2003 }
5c9eb028
PM
2004 return address_space_write(subpage->as, addr + subpage->base,
2005 attrs, buf, len);
db7b5426
BS
2006}
2007
c353e4cc 2008static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 2009 unsigned len, bool is_write)
c353e4cc 2010{
acc9d80b 2011 subpage_t *subpage = opaque;
c353e4cc 2012#if defined(DEBUG_SUBPAGE)
016e9d62 2013 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 2014 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
2015#endif
2016
acc9d80b 2017 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 2018 len, is_write);
c353e4cc
PB
2019}
2020
70c68e44 2021static const MemoryRegionOps subpage_ops = {
f25a49e0
PM
2022 .read_with_attrs = subpage_read,
2023 .write_with_attrs = subpage_write,
ff6cff75
PB
2024 .impl.min_access_size = 1,
2025 .impl.max_access_size = 8,
2026 .valid.min_access_size = 1,
2027 .valid.max_access_size = 8,
c353e4cc 2028 .valid.accepts = subpage_accepts,
70c68e44 2029 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
2030};
2031
c227f099 2032static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2033 uint16_t section)
db7b5426
BS
2034{
2035 int idx, eidx;
2036
2037 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2038 return -1;
2039 idx = SUBPAGE_IDX(start);
2040 eidx = SUBPAGE_IDX(end);
2041#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2042 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2043 __func__, mmio, start, end, idx, eidx, section);
db7b5426 2044#endif
db7b5426 2045 for (; idx <= eidx; idx++) {
5312bd8b 2046 mmio->sub_section[idx] = section;
db7b5426
BS
2047 }
2048
2049 return 0;
2050}
2051
acc9d80b 2052static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 2053{
c227f099 2054 subpage_t *mmio;
db7b5426 2055
7267c094 2056 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 2057
acc9d80b 2058 mmio->as = as;
1eec614b 2059 mmio->base = base;
2c9b15ca 2060 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 2061 NULL, TARGET_PAGE_SIZE);
b3b00c78 2062 mmio->iomem.subpage = true;
db7b5426 2063#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2064 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2065 mmio, base, TARGET_PAGE_SIZE);
db7b5426 2066#endif
b41aac4f 2067 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
2068
2069 return mmio;
2070}
2071
a656e22f
PC
2072static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2073 MemoryRegion *mr)
5312bd8b 2074{
a656e22f 2075 assert(as);
5312bd8b 2076 MemoryRegionSection section = {
a656e22f 2077 .address_space = as,
5312bd8b
AK
2078 .mr = mr,
2079 .offset_within_address_space = 0,
2080 .offset_within_region = 0,
052e87b0 2081 .size = int128_2_64(),
5312bd8b
AK
2082 };
2083
53cb28cb 2084 return phys_section_add(map, &section);
5312bd8b
AK
2085}
2086
9d82b5a7 2087MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
aa102231 2088{
79e2b9ae
PB
2089 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2090 MemoryRegionSection *sections = d->map.sections;
9d82b5a7
PB
2091
2092 return sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
2093}
2094
e9179ce1
AK
2095static void io_mem_init(void)
2096{
1f6245e5 2097 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 2098 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 2099 NULL, UINT64_MAX);
2c9b15ca 2100 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 2101 NULL, UINT64_MAX);
2c9b15ca 2102 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 2103 NULL, UINT64_MAX);
e9179ce1
AK
2104}
2105
ac1970fb 2106static void mem_begin(MemoryListener *listener)
00752703
PB
2107{
2108 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
2109 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2110 uint16_t n;
2111
a656e22f 2112 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 2113 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 2114 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 2115 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 2116 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 2117 assert(n == PHYS_SECTION_ROM);
a656e22f 2118 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 2119 assert(n == PHYS_SECTION_WATCH);
00752703 2120
9736e55b 2121 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
2122 d->as = as;
2123 as->next_dispatch = d;
2124}
2125
79e2b9ae
PB
2126static void address_space_dispatch_free(AddressSpaceDispatch *d)
2127{
2128 phys_sections_free(&d->map);
2129 g_free(d);
2130}
2131
00752703 2132static void mem_commit(MemoryListener *listener)
ac1970fb 2133{
89ae337a 2134 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
2135 AddressSpaceDispatch *cur = as->dispatch;
2136 AddressSpaceDispatch *next = as->next_dispatch;
2137
53cb28cb 2138 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 2139
79e2b9ae 2140 atomic_rcu_set(&as->dispatch, next);
53cb28cb 2141 if (cur) {
79e2b9ae 2142 call_rcu(cur, address_space_dispatch_free, rcu);
53cb28cb 2143 }
9affd6fc
PB
2144}
2145
1d71148e 2146static void tcg_commit(MemoryListener *listener)
50c1e149 2147{
182735ef 2148 CPUState *cpu;
117712c3
AK
2149
2150 /* since each CPU stores ram addresses in its TLB cache, we must
2151 reset the modified entries */
2152 /* XXX: slow ! */
bdc44640 2153 CPU_FOREACH(cpu) {
33bde2e1
EI
2154 /* FIXME: Disentangle the cpu.h circular files deps so we can
2155 directly get the right CPU from listener. */
2156 if (cpu->tcg_as_listener != listener) {
2157 continue;
2158 }
76e5c76f 2159 cpu_reload_memory_map(cpu);
117712c3 2160 }
50c1e149
AK
2161}
2162
ac1970fb
AK
2163void address_space_init_dispatch(AddressSpace *as)
2164{
00752703 2165 as->dispatch = NULL;
89ae337a 2166 as->dispatch_listener = (MemoryListener) {
ac1970fb 2167 .begin = mem_begin,
00752703 2168 .commit = mem_commit,
ac1970fb
AK
2169 .region_add = mem_add,
2170 .region_nop = mem_add,
2171 .priority = 0,
2172 };
89ae337a 2173 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
2174}
2175
6e48e8f9
PB
2176void address_space_unregister(AddressSpace *as)
2177{
2178 memory_listener_unregister(&as->dispatch_listener);
2179}
2180
83f3c251
AK
2181void address_space_destroy_dispatch(AddressSpace *as)
2182{
2183 AddressSpaceDispatch *d = as->dispatch;
2184
79e2b9ae
PB
2185 atomic_rcu_set(&as->dispatch, NULL);
2186 if (d) {
2187 call_rcu(d, address_space_dispatch_free, rcu);
2188 }
83f3c251
AK
2189}
2190
62152b8a
AK
2191static void memory_map_init(void)
2192{
7267c094 2193 system_memory = g_malloc(sizeof(*system_memory));
03f49957 2194
57271d63 2195 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2196 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2197
7267c094 2198 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2199 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2200 65536);
7dca8043 2201 address_space_init(&address_space_io, system_io, "I/O");
62152b8a
AK
2202}
2203
2204MemoryRegion *get_system_memory(void)
2205{
2206 return system_memory;
2207}
2208
309cb471
AK
2209MemoryRegion *get_system_io(void)
2210{
2211 return system_io;
2212}
2213
e2eef170
PB
2214#endif /* !defined(CONFIG_USER_ONLY) */
2215
13eb76e0
FB
2216/* physical memory access (slow version, mainly for debug) */
2217#if defined(CONFIG_USER_ONLY)
f17ec444 2218int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2219 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2220{
2221 int l, flags;
2222 target_ulong page;
53a5960a 2223 void * p;
13eb76e0
FB
2224
2225 while (len > 0) {
2226 page = addr & TARGET_PAGE_MASK;
2227 l = (page + TARGET_PAGE_SIZE) - addr;
2228 if (l > len)
2229 l = len;
2230 flags = page_get_flags(page);
2231 if (!(flags & PAGE_VALID))
a68fe89c 2232 return -1;
13eb76e0
FB
2233 if (is_write) {
2234 if (!(flags & PAGE_WRITE))
a68fe89c 2235 return -1;
579a97f7 2236 /* XXX: this code should not depend on lock_user */
72fb7daa 2237 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2238 return -1;
72fb7daa
AJ
2239 memcpy(p, buf, l);
2240 unlock_user(p, addr, l);
13eb76e0
FB
2241 } else {
2242 if (!(flags & PAGE_READ))
a68fe89c 2243 return -1;
579a97f7 2244 /* XXX: this code should not depend on lock_user */
72fb7daa 2245 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2246 return -1;
72fb7daa 2247 memcpy(buf, p, l);
5b257578 2248 unlock_user(p, addr, 0);
13eb76e0
FB
2249 }
2250 len -= l;
2251 buf += l;
2252 addr += l;
2253 }
a68fe89c 2254 return 0;
13eb76e0 2255}
8df1cd07 2256
13eb76e0 2257#else
51d7a9eb 2258
845b6214 2259static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
a8170e5e 2260 hwaddr length)
51d7a9eb 2261{
f874bf90 2262 if (cpu_physical_memory_range_includes_clean(addr, length)) {
845b6214
PB
2263 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2264 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
35865339 2265 tb_invalidate_phys_range(addr, addr + length);
845b6214
PB
2266 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2267 }
58d2707e 2268 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
49dfcec4
PB
2269 } else {
2270 xen_modified_memory(addr, length);
51d7a9eb
AP
2271 }
2272}
2273
23326164 2274static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2275{
e1622f4b 2276 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2277
2278 /* Regions are assumed to support 1-4 byte accesses unless
2279 otherwise specified. */
23326164
RH
2280 if (access_size_max == 0) {
2281 access_size_max = 4;
2282 }
2283
2284 /* Bound the maximum access by the alignment of the address. */
2285 if (!mr->ops->impl.unaligned) {
2286 unsigned align_size_max = addr & -addr;
2287 if (align_size_max != 0 && align_size_max < access_size_max) {
2288 access_size_max = align_size_max;
2289 }
82f2563f 2290 }
23326164
RH
2291
2292 /* Don't attempt accesses larger than the maximum. */
2293 if (l > access_size_max) {
2294 l = access_size_max;
82f2563f 2295 }
098178f2
PB
2296 if (l & (l - 1)) {
2297 l = 1 << (qemu_fls(l) - 1);
2298 }
23326164
RH
2299
2300 return l;
82f2563f
PB
2301}
2302
5c9eb028
PM
2303MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2304 uint8_t *buf, int len, bool is_write)
13eb76e0 2305{
149f54b5 2306 hwaddr l;
13eb76e0 2307 uint8_t *ptr;
791af8c8 2308 uint64_t val;
149f54b5 2309 hwaddr addr1;
5c8a00ce 2310 MemoryRegion *mr;
3b643495 2311 MemTxResult result = MEMTX_OK;
3b46e624 2312
41063e1e 2313 rcu_read_lock();
13eb76e0 2314 while (len > 0) {
149f54b5 2315 l = len;
5c8a00ce 2316 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2317
13eb76e0 2318 if (is_write) {
5c8a00ce
PB
2319 if (!memory_access_is_direct(mr, is_write)) {
2320 l = memory_access_size(mr, l, addr1);
4917cf44 2321 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2322 potential bugs */
23326164
RH
2323 switch (l) {
2324 case 8:
2325 /* 64 bit write access */
2326 val = ldq_p(buf);
3b643495
PM
2327 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2328 attrs);
23326164
RH
2329 break;
2330 case 4:
1c213d19 2331 /* 32 bit write access */
c27004ec 2332 val = ldl_p(buf);
3b643495
PM
2333 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2334 attrs);
23326164
RH
2335 break;
2336 case 2:
1c213d19 2337 /* 16 bit write access */
c27004ec 2338 val = lduw_p(buf);
3b643495
PM
2339 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2340 attrs);
23326164
RH
2341 break;
2342 case 1:
1c213d19 2343 /* 8 bit write access */
c27004ec 2344 val = ldub_p(buf);
3b643495
PM
2345 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2346 attrs);
23326164
RH
2347 break;
2348 default:
2349 abort();
13eb76e0 2350 }
2bbfa05d 2351 } else {
5c8a00ce 2352 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2353 /* RAM case */
5579c7f3 2354 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2355 memcpy(ptr, buf, l);
845b6214 2356 invalidate_and_set_dirty(mr, addr1, l);
13eb76e0
FB
2357 }
2358 } else {
5c8a00ce 2359 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2360 /* I/O case */
5c8a00ce 2361 l = memory_access_size(mr, l, addr1);
23326164
RH
2362 switch (l) {
2363 case 8:
2364 /* 64 bit read access */
3b643495
PM
2365 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2366 attrs);
23326164
RH
2367 stq_p(buf, val);
2368 break;
2369 case 4:
13eb76e0 2370 /* 32 bit read access */
3b643495
PM
2371 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2372 attrs);
c27004ec 2373 stl_p(buf, val);
23326164
RH
2374 break;
2375 case 2:
13eb76e0 2376 /* 16 bit read access */
3b643495
PM
2377 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2378 attrs);
c27004ec 2379 stw_p(buf, val);
23326164
RH
2380 break;
2381 case 1:
1c213d19 2382 /* 8 bit read access */
3b643495
PM
2383 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2384 attrs);
c27004ec 2385 stb_p(buf, val);
23326164
RH
2386 break;
2387 default:
2388 abort();
13eb76e0
FB
2389 }
2390 } else {
2391 /* RAM case */
5c8a00ce 2392 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2393 memcpy(buf, ptr, l);
13eb76e0
FB
2394 }
2395 }
2396 len -= l;
2397 buf += l;
2398 addr += l;
2399 }
41063e1e 2400 rcu_read_unlock();
fd8aaa76 2401
3b643495 2402 return result;
13eb76e0 2403}
8df1cd07 2404
5c9eb028
PM
2405MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2406 const uint8_t *buf, int len)
ac1970fb 2407{
5c9eb028 2408 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
ac1970fb
AK
2409}
2410
5c9eb028
PM
2411MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2412 uint8_t *buf, int len)
ac1970fb 2413{
5c9eb028 2414 return address_space_rw(as, addr, attrs, buf, len, false);
ac1970fb
AK
2415}
2416
2417
a8170e5e 2418void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2419 int len, int is_write)
2420{
5c9eb028
PM
2421 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2422 buf, len, is_write);
ac1970fb
AK
2423}
2424
582b55a9
AG
2425enum write_rom_type {
2426 WRITE_DATA,
2427 FLUSH_CACHE,
2428};
2429
2a221651 2430static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2431 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2432{
149f54b5 2433 hwaddr l;
d0ecd2aa 2434 uint8_t *ptr;
149f54b5 2435 hwaddr addr1;
5c8a00ce 2436 MemoryRegion *mr;
3b46e624 2437
41063e1e 2438 rcu_read_lock();
d0ecd2aa 2439 while (len > 0) {
149f54b5 2440 l = len;
2a221651 2441 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2442
5c8a00ce
PB
2443 if (!(memory_region_is_ram(mr) ||
2444 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2445 /* do nothing */
2446 } else {
5c8a00ce 2447 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2448 /* ROM/RAM case */
5579c7f3 2449 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2450 switch (type) {
2451 case WRITE_DATA:
2452 memcpy(ptr, buf, l);
845b6214 2453 invalidate_and_set_dirty(mr, addr1, l);
582b55a9
AG
2454 break;
2455 case FLUSH_CACHE:
2456 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2457 break;
2458 }
d0ecd2aa
FB
2459 }
2460 len -= l;
2461 buf += l;
2462 addr += l;
2463 }
41063e1e 2464 rcu_read_unlock();
d0ecd2aa
FB
2465}
2466
582b55a9 2467/* used for ROM loading : can write in RAM and ROM */
2a221651 2468void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2469 const uint8_t *buf, int len)
2470{
2a221651 2471 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2472}
2473
2474void cpu_flush_icache_range(hwaddr start, int len)
2475{
2476 /*
2477 * This function should do the same thing as an icache flush that was
2478 * triggered from within the guest. For TCG we are always cache coherent,
2479 * so there is no need to flush anything. For KVM / Xen we need to flush
2480 * the host's instruction cache at least.
2481 */
2482 if (tcg_enabled()) {
2483 return;
2484 }
2485
2a221651
EI
2486 cpu_physical_memory_write_rom_internal(&address_space_memory,
2487 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2488}
2489
6d16c2f8 2490typedef struct {
d3e71559 2491 MemoryRegion *mr;
6d16c2f8 2492 void *buffer;
a8170e5e
AK
2493 hwaddr addr;
2494 hwaddr len;
c2cba0ff 2495 bool in_use;
6d16c2f8
AL
2496} BounceBuffer;
2497
2498static BounceBuffer bounce;
2499
ba223c29 2500typedef struct MapClient {
e95205e1 2501 QEMUBH *bh;
72cf2d4f 2502 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2503} MapClient;
2504
38e047b5 2505QemuMutex map_client_list_lock;
72cf2d4f
BS
2506static QLIST_HEAD(map_client_list, MapClient) map_client_list
2507 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29 2508
e95205e1
FZ
2509static void cpu_unregister_map_client_do(MapClient *client)
2510{
2511 QLIST_REMOVE(client, link);
2512 g_free(client);
2513}
2514
33b6c2ed
FZ
2515static void cpu_notify_map_clients_locked(void)
2516{
2517 MapClient *client;
2518
2519 while (!QLIST_EMPTY(&map_client_list)) {
2520 client = QLIST_FIRST(&map_client_list);
e95205e1
FZ
2521 qemu_bh_schedule(client->bh);
2522 cpu_unregister_map_client_do(client);
33b6c2ed
FZ
2523 }
2524}
2525
e95205e1 2526void cpu_register_map_client(QEMUBH *bh)
ba223c29 2527{
7267c094 2528 MapClient *client = g_malloc(sizeof(*client));
ba223c29 2529
38e047b5 2530 qemu_mutex_lock(&map_client_list_lock);
e95205e1 2531 client->bh = bh;
72cf2d4f 2532 QLIST_INSERT_HEAD(&map_client_list, client, link);
33b6c2ed
FZ
2533 if (!atomic_read(&bounce.in_use)) {
2534 cpu_notify_map_clients_locked();
2535 }
38e047b5 2536 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2537}
2538
38e047b5 2539void cpu_exec_init_all(void)
ba223c29 2540{
38e047b5
FZ
2541 qemu_mutex_init(&ram_list.mutex);
2542 memory_map_init();
2543 io_mem_init();
2544 qemu_mutex_init(&map_client_list_lock);
ba223c29
AL
2545}
2546
e95205e1 2547void cpu_unregister_map_client(QEMUBH *bh)
ba223c29
AL
2548{
2549 MapClient *client;
2550
e95205e1
FZ
2551 qemu_mutex_lock(&map_client_list_lock);
2552 QLIST_FOREACH(client, &map_client_list, link) {
2553 if (client->bh == bh) {
2554 cpu_unregister_map_client_do(client);
2555 break;
2556 }
ba223c29 2557 }
e95205e1 2558 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2559}
2560
2561static void cpu_notify_map_clients(void)
2562{
38e047b5 2563 qemu_mutex_lock(&map_client_list_lock);
33b6c2ed 2564 cpu_notify_map_clients_locked();
38e047b5 2565 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2566}
2567
51644ab7
PB
2568bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2569{
5c8a00ce 2570 MemoryRegion *mr;
51644ab7
PB
2571 hwaddr l, xlat;
2572
41063e1e 2573 rcu_read_lock();
51644ab7
PB
2574 while (len > 0) {
2575 l = len;
5c8a00ce
PB
2576 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2577 if (!memory_access_is_direct(mr, is_write)) {
2578 l = memory_access_size(mr, l, addr);
2579 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2580 return false;
2581 }
2582 }
2583
2584 len -= l;
2585 addr += l;
2586 }
41063e1e 2587 rcu_read_unlock();
51644ab7
PB
2588 return true;
2589}
2590
6d16c2f8
AL
2591/* Map a physical memory region into a host virtual address.
2592 * May map a subset of the requested range, given by and returned in *plen.
2593 * May return NULL if resources needed to perform the mapping are exhausted.
2594 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2595 * Use cpu_register_map_client() to know when retrying the map operation is
2596 * likely to succeed.
6d16c2f8 2597 */
ac1970fb 2598void *address_space_map(AddressSpace *as,
a8170e5e
AK
2599 hwaddr addr,
2600 hwaddr *plen,
ac1970fb 2601 bool is_write)
6d16c2f8 2602{
a8170e5e 2603 hwaddr len = *plen;
e3127ae0
PB
2604 hwaddr done = 0;
2605 hwaddr l, xlat, base;
2606 MemoryRegion *mr, *this_mr;
2607 ram_addr_t raddr;
6d16c2f8 2608
e3127ae0
PB
2609 if (len == 0) {
2610 return NULL;
2611 }
38bee5dc 2612
e3127ae0 2613 l = len;
41063e1e 2614 rcu_read_lock();
e3127ae0 2615 mr = address_space_translate(as, addr, &xlat, &l, is_write);
41063e1e 2616
e3127ae0 2617 if (!memory_access_is_direct(mr, is_write)) {
c2cba0ff 2618 if (atomic_xchg(&bounce.in_use, true)) {
41063e1e 2619 rcu_read_unlock();
e3127ae0 2620 return NULL;
6d16c2f8 2621 }
e85d9db5
KW
2622 /* Avoid unbounded allocations */
2623 l = MIN(l, TARGET_PAGE_SIZE);
2624 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2625 bounce.addr = addr;
2626 bounce.len = l;
d3e71559
PB
2627
2628 memory_region_ref(mr);
2629 bounce.mr = mr;
e3127ae0 2630 if (!is_write) {
5c9eb028
PM
2631 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2632 bounce.buffer, l);
8ab934f9 2633 }
6d16c2f8 2634
41063e1e 2635 rcu_read_unlock();
e3127ae0
PB
2636 *plen = l;
2637 return bounce.buffer;
2638 }
2639
2640 base = xlat;
2641 raddr = memory_region_get_ram_addr(mr);
2642
2643 for (;;) {
6d16c2f8
AL
2644 len -= l;
2645 addr += l;
e3127ae0
PB
2646 done += l;
2647 if (len == 0) {
2648 break;
2649 }
2650
2651 l = len;
2652 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2653 if (this_mr != mr || xlat != base + done) {
2654 break;
2655 }
6d16c2f8 2656 }
e3127ae0 2657
d3e71559 2658 memory_region_ref(mr);
41063e1e 2659 rcu_read_unlock();
e3127ae0
PB
2660 *plen = done;
2661 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2662}
2663
ac1970fb 2664/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2665 * Will also mark the memory as dirty if is_write == 1. access_len gives
2666 * the amount of memory that was actually read or written by the caller.
2667 */
a8170e5e
AK
2668void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2669 int is_write, hwaddr access_len)
6d16c2f8
AL
2670{
2671 if (buffer != bounce.buffer) {
d3e71559
PB
2672 MemoryRegion *mr;
2673 ram_addr_t addr1;
2674
2675 mr = qemu_ram_addr_from_host(buffer, &addr1);
2676 assert(mr != NULL);
6d16c2f8 2677 if (is_write) {
845b6214 2678 invalidate_and_set_dirty(mr, addr1, access_len);
6d16c2f8 2679 }
868bb33f 2680 if (xen_enabled()) {
e41d7c69 2681 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2682 }
d3e71559 2683 memory_region_unref(mr);
6d16c2f8
AL
2684 return;
2685 }
2686 if (is_write) {
5c9eb028
PM
2687 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2688 bounce.buffer, access_len);
6d16c2f8 2689 }
f8a83245 2690 qemu_vfree(bounce.buffer);
6d16c2f8 2691 bounce.buffer = NULL;
d3e71559 2692 memory_region_unref(bounce.mr);
c2cba0ff 2693 atomic_mb_set(&bounce.in_use, false);
ba223c29 2694 cpu_notify_map_clients();
6d16c2f8 2695}
d0ecd2aa 2696
a8170e5e
AK
2697void *cpu_physical_memory_map(hwaddr addr,
2698 hwaddr *plen,
ac1970fb
AK
2699 int is_write)
2700{
2701 return address_space_map(&address_space_memory, addr, plen, is_write);
2702}
2703
a8170e5e
AK
2704void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2705 int is_write, hwaddr access_len)
ac1970fb
AK
2706{
2707 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2708}
2709
8df1cd07 2710/* warning: addr must be aligned */
50013115
PM
2711static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2712 MemTxAttrs attrs,
2713 MemTxResult *result,
2714 enum device_endian endian)
8df1cd07 2715{
8df1cd07 2716 uint8_t *ptr;
791af8c8 2717 uint64_t val;
5c8a00ce 2718 MemoryRegion *mr;
149f54b5
PB
2719 hwaddr l = 4;
2720 hwaddr addr1;
50013115 2721 MemTxResult r;
8df1cd07 2722
41063e1e 2723 rcu_read_lock();
fdfba1a2 2724 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2725 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2726 /* I/O case */
50013115 2727 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
1e78bcc1
AG
2728#if defined(TARGET_WORDS_BIGENDIAN)
2729 if (endian == DEVICE_LITTLE_ENDIAN) {
2730 val = bswap32(val);
2731 }
2732#else
2733 if (endian == DEVICE_BIG_ENDIAN) {
2734 val = bswap32(val);
2735 }
2736#endif
8df1cd07
FB
2737 } else {
2738 /* RAM case */
5c8a00ce 2739 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2740 & TARGET_PAGE_MASK)
149f54b5 2741 + addr1);
1e78bcc1
AG
2742 switch (endian) {
2743 case DEVICE_LITTLE_ENDIAN:
2744 val = ldl_le_p(ptr);
2745 break;
2746 case DEVICE_BIG_ENDIAN:
2747 val = ldl_be_p(ptr);
2748 break;
2749 default:
2750 val = ldl_p(ptr);
2751 break;
2752 }
50013115
PM
2753 r = MEMTX_OK;
2754 }
2755 if (result) {
2756 *result = r;
8df1cd07 2757 }
41063e1e 2758 rcu_read_unlock();
8df1cd07
FB
2759 return val;
2760}
2761
50013115
PM
2762uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2763 MemTxAttrs attrs, MemTxResult *result)
2764{
2765 return address_space_ldl_internal(as, addr, attrs, result,
2766 DEVICE_NATIVE_ENDIAN);
2767}
2768
2769uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2770 MemTxAttrs attrs, MemTxResult *result)
2771{
2772 return address_space_ldl_internal(as, addr, attrs, result,
2773 DEVICE_LITTLE_ENDIAN);
2774}
2775
2776uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2777 MemTxAttrs attrs, MemTxResult *result)
2778{
2779 return address_space_ldl_internal(as, addr, attrs, result,
2780 DEVICE_BIG_ENDIAN);
2781}
2782
fdfba1a2 2783uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2784{
50013115 2785 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2786}
2787
fdfba1a2 2788uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2789{
50013115 2790 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2791}
2792
fdfba1a2 2793uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2794{
50013115 2795 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2796}
2797
84b7b8e7 2798/* warning: addr must be aligned */
50013115
PM
2799static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2800 MemTxAttrs attrs,
2801 MemTxResult *result,
2802 enum device_endian endian)
84b7b8e7 2803{
84b7b8e7
FB
2804 uint8_t *ptr;
2805 uint64_t val;
5c8a00ce 2806 MemoryRegion *mr;
149f54b5
PB
2807 hwaddr l = 8;
2808 hwaddr addr1;
50013115 2809 MemTxResult r;
84b7b8e7 2810
41063e1e 2811 rcu_read_lock();
2c17449b 2812 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2813 false);
2814 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2815 /* I/O case */
50013115 2816 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
968a5627
PB
2817#if defined(TARGET_WORDS_BIGENDIAN)
2818 if (endian == DEVICE_LITTLE_ENDIAN) {
2819 val = bswap64(val);
2820 }
2821#else
2822 if (endian == DEVICE_BIG_ENDIAN) {
2823 val = bswap64(val);
2824 }
84b7b8e7
FB
2825#endif
2826 } else {
2827 /* RAM case */
5c8a00ce 2828 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2829 & TARGET_PAGE_MASK)
149f54b5 2830 + addr1);
1e78bcc1
AG
2831 switch (endian) {
2832 case DEVICE_LITTLE_ENDIAN:
2833 val = ldq_le_p(ptr);
2834 break;
2835 case DEVICE_BIG_ENDIAN:
2836 val = ldq_be_p(ptr);
2837 break;
2838 default:
2839 val = ldq_p(ptr);
2840 break;
2841 }
50013115
PM
2842 r = MEMTX_OK;
2843 }
2844 if (result) {
2845 *result = r;
84b7b8e7 2846 }
41063e1e 2847 rcu_read_unlock();
84b7b8e7
FB
2848 return val;
2849}
2850
50013115
PM
2851uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2852 MemTxAttrs attrs, MemTxResult *result)
2853{
2854 return address_space_ldq_internal(as, addr, attrs, result,
2855 DEVICE_NATIVE_ENDIAN);
2856}
2857
2858uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
2859 MemTxAttrs attrs, MemTxResult *result)
2860{
2861 return address_space_ldq_internal(as, addr, attrs, result,
2862 DEVICE_LITTLE_ENDIAN);
2863}
2864
2865uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
2866 MemTxAttrs attrs, MemTxResult *result)
2867{
2868 return address_space_ldq_internal(as, addr, attrs, result,
2869 DEVICE_BIG_ENDIAN);
2870}
2871
2c17449b 2872uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2873{
50013115 2874 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2875}
2876
2c17449b 2877uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2878{
50013115 2879 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2880}
2881
2c17449b 2882uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2883{
50013115 2884 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2885}
2886
aab33094 2887/* XXX: optimize */
50013115
PM
2888uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
2889 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
2890{
2891 uint8_t val;
50013115
PM
2892 MemTxResult r;
2893
2894 r = address_space_rw(as, addr, attrs, &val, 1, 0);
2895 if (result) {
2896 *result = r;
2897 }
aab33094
FB
2898 return val;
2899}
2900
50013115
PM
2901uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2902{
2903 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
2904}
2905
733f0b02 2906/* warning: addr must be aligned */
50013115
PM
2907static inline uint32_t address_space_lduw_internal(AddressSpace *as,
2908 hwaddr addr,
2909 MemTxAttrs attrs,
2910 MemTxResult *result,
2911 enum device_endian endian)
aab33094 2912{
733f0b02
MT
2913 uint8_t *ptr;
2914 uint64_t val;
5c8a00ce 2915 MemoryRegion *mr;
149f54b5
PB
2916 hwaddr l = 2;
2917 hwaddr addr1;
50013115 2918 MemTxResult r;
733f0b02 2919
41063e1e 2920 rcu_read_lock();
41701aa4 2921 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2922 false);
2923 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2924 /* I/O case */
50013115 2925 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
1e78bcc1
AG
2926#if defined(TARGET_WORDS_BIGENDIAN)
2927 if (endian == DEVICE_LITTLE_ENDIAN) {
2928 val = bswap16(val);
2929 }
2930#else
2931 if (endian == DEVICE_BIG_ENDIAN) {
2932 val = bswap16(val);
2933 }
2934#endif
733f0b02
MT
2935 } else {
2936 /* RAM case */
5c8a00ce 2937 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2938 & TARGET_PAGE_MASK)
149f54b5 2939 + addr1);
1e78bcc1
AG
2940 switch (endian) {
2941 case DEVICE_LITTLE_ENDIAN:
2942 val = lduw_le_p(ptr);
2943 break;
2944 case DEVICE_BIG_ENDIAN:
2945 val = lduw_be_p(ptr);
2946 break;
2947 default:
2948 val = lduw_p(ptr);
2949 break;
2950 }
50013115
PM
2951 r = MEMTX_OK;
2952 }
2953 if (result) {
2954 *result = r;
733f0b02 2955 }
41063e1e 2956 rcu_read_unlock();
733f0b02 2957 return val;
aab33094
FB
2958}
2959
50013115
PM
2960uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
2961 MemTxAttrs attrs, MemTxResult *result)
2962{
2963 return address_space_lduw_internal(as, addr, attrs, result,
2964 DEVICE_NATIVE_ENDIAN);
2965}
2966
2967uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
2968 MemTxAttrs attrs, MemTxResult *result)
2969{
2970 return address_space_lduw_internal(as, addr, attrs, result,
2971 DEVICE_LITTLE_ENDIAN);
2972}
2973
2974uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
2975 MemTxAttrs attrs, MemTxResult *result)
2976{
2977 return address_space_lduw_internal(as, addr, attrs, result,
2978 DEVICE_BIG_ENDIAN);
2979}
2980
41701aa4 2981uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2982{
50013115 2983 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2984}
2985
41701aa4 2986uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2987{
50013115 2988 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2989}
2990
41701aa4 2991uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2992{
50013115 2993 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2994}
2995
8df1cd07
FB
2996/* warning: addr must be aligned. The ram page is not masked as dirty
2997 and the code inside is not invalidated. It is useful if the dirty
2998 bits are used to track modified PTEs */
50013115
PM
2999void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3000 MemTxAttrs attrs, MemTxResult *result)
8df1cd07 3001{
8df1cd07 3002 uint8_t *ptr;
5c8a00ce 3003 MemoryRegion *mr;
149f54b5
PB
3004 hwaddr l = 4;
3005 hwaddr addr1;
50013115 3006 MemTxResult r;
845b6214 3007 uint8_t dirty_log_mask;
8df1cd07 3008
41063e1e 3009 rcu_read_lock();
2198a121 3010 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3011 true);
3012 if (l < 4 || !memory_access_is_direct(mr, true)) {
50013115 3013 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3014 } else {
5c8a00ce 3015 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3016 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3017 stl_p(ptr, val);
74576198 3018
845b6214
PB
3019 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3020 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
58d2707e 3021 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
50013115
PM
3022 r = MEMTX_OK;
3023 }
3024 if (result) {
3025 *result = r;
8df1cd07 3026 }
41063e1e 3027 rcu_read_unlock();
8df1cd07
FB
3028}
3029
50013115
PM
3030void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3031{
3032 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3033}
3034
8df1cd07 3035/* warning: addr must be aligned */
50013115
PM
3036static inline void address_space_stl_internal(AddressSpace *as,
3037 hwaddr addr, uint32_t val,
3038 MemTxAttrs attrs,
3039 MemTxResult *result,
3040 enum device_endian endian)
8df1cd07 3041{
8df1cd07 3042 uint8_t *ptr;
5c8a00ce 3043 MemoryRegion *mr;
149f54b5
PB
3044 hwaddr l = 4;
3045 hwaddr addr1;
50013115 3046 MemTxResult r;
8df1cd07 3047
41063e1e 3048 rcu_read_lock();
ab1da857 3049 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3050 true);
3051 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
3052#if defined(TARGET_WORDS_BIGENDIAN)
3053 if (endian == DEVICE_LITTLE_ENDIAN) {
3054 val = bswap32(val);
3055 }
3056#else
3057 if (endian == DEVICE_BIG_ENDIAN) {
3058 val = bswap32(val);
3059 }
3060#endif
50013115 3061 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3062 } else {
8df1cd07 3063 /* RAM case */
5c8a00ce 3064 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3065 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3066 switch (endian) {
3067 case DEVICE_LITTLE_ENDIAN:
3068 stl_le_p(ptr, val);
3069 break;
3070 case DEVICE_BIG_ENDIAN:
3071 stl_be_p(ptr, val);
3072 break;
3073 default:
3074 stl_p(ptr, val);
3075 break;
3076 }
845b6214 3077 invalidate_and_set_dirty(mr, addr1, 4);
50013115
PM
3078 r = MEMTX_OK;
3079 }
3080 if (result) {
3081 *result = r;
8df1cd07 3082 }
41063e1e 3083 rcu_read_unlock();
8df1cd07
FB
3084}
3085
50013115
PM
3086void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3087 MemTxAttrs attrs, MemTxResult *result)
3088{
3089 address_space_stl_internal(as, addr, val, attrs, result,
3090 DEVICE_NATIVE_ENDIAN);
3091}
3092
3093void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3094 MemTxAttrs attrs, MemTxResult *result)
3095{
3096 address_space_stl_internal(as, addr, val, attrs, result,
3097 DEVICE_LITTLE_ENDIAN);
3098}
3099
3100void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3101 MemTxAttrs attrs, MemTxResult *result)
3102{
3103 address_space_stl_internal(as, addr, val, attrs, result,
3104 DEVICE_BIG_ENDIAN);
3105}
3106
ab1da857 3107void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3108{
50013115 3109 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3110}
3111
ab1da857 3112void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3113{
50013115 3114 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3115}
3116
ab1da857 3117void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3118{
50013115 3119 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3120}
3121
aab33094 3122/* XXX: optimize */
50013115
PM
3123void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3124 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
3125{
3126 uint8_t v = val;
50013115
PM
3127 MemTxResult r;
3128
3129 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3130 if (result) {
3131 *result = r;
3132 }
3133}
3134
3135void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3136{
3137 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
aab33094
FB
3138}
3139
733f0b02 3140/* warning: addr must be aligned */
50013115
PM
3141static inline void address_space_stw_internal(AddressSpace *as,
3142 hwaddr addr, uint32_t val,
3143 MemTxAttrs attrs,
3144 MemTxResult *result,
3145 enum device_endian endian)
aab33094 3146{
733f0b02 3147 uint8_t *ptr;
5c8a00ce 3148 MemoryRegion *mr;
149f54b5
PB
3149 hwaddr l = 2;
3150 hwaddr addr1;
50013115 3151 MemTxResult r;
733f0b02 3152
41063e1e 3153 rcu_read_lock();
5ce5944d 3154 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 3155 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
3156#if defined(TARGET_WORDS_BIGENDIAN)
3157 if (endian == DEVICE_LITTLE_ENDIAN) {
3158 val = bswap16(val);
3159 }
3160#else
3161 if (endian == DEVICE_BIG_ENDIAN) {
3162 val = bswap16(val);
3163 }
3164#endif
50013115 3165 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
733f0b02 3166 } else {
733f0b02 3167 /* RAM case */
5c8a00ce 3168 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 3169 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3170 switch (endian) {
3171 case DEVICE_LITTLE_ENDIAN:
3172 stw_le_p(ptr, val);
3173 break;
3174 case DEVICE_BIG_ENDIAN:
3175 stw_be_p(ptr, val);
3176 break;
3177 default:
3178 stw_p(ptr, val);
3179 break;
3180 }
845b6214 3181 invalidate_and_set_dirty(mr, addr1, 2);
50013115
PM
3182 r = MEMTX_OK;
3183 }
3184 if (result) {
3185 *result = r;
733f0b02 3186 }
41063e1e 3187 rcu_read_unlock();
aab33094
FB
3188}
3189
50013115
PM
3190void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3191 MemTxAttrs attrs, MemTxResult *result)
3192{
3193 address_space_stw_internal(as, addr, val, attrs, result,
3194 DEVICE_NATIVE_ENDIAN);
3195}
3196
3197void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3198 MemTxAttrs attrs, MemTxResult *result)
3199{
3200 address_space_stw_internal(as, addr, val, attrs, result,
3201 DEVICE_LITTLE_ENDIAN);
3202}
3203
3204void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3205 MemTxAttrs attrs, MemTxResult *result)
3206{
3207 address_space_stw_internal(as, addr, val, attrs, result,
3208 DEVICE_BIG_ENDIAN);
3209}
3210
5ce5944d 3211void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3212{
50013115 3213 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3214}
3215
5ce5944d 3216void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3217{
50013115 3218 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3219}
3220
5ce5944d 3221void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3222{
50013115 3223 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3224}
3225
aab33094 3226/* XXX: optimize */
50013115
PM
3227void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3228 MemTxAttrs attrs, MemTxResult *result)
aab33094 3229{
50013115 3230 MemTxResult r;
aab33094 3231 val = tswap64(val);
50013115
PM
3232 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3233 if (result) {
3234 *result = r;
3235 }
aab33094
FB
3236}
3237
50013115
PM
3238void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3239 MemTxAttrs attrs, MemTxResult *result)
1e78bcc1 3240{
50013115 3241 MemTxResult r;
1e78bcc1 3242 val = cpu_to_le64(val);
50013115
PM
3243 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3244 if (result) {
3245 *result = r;
3246 }
3247}
3248void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3249 MemTxAttrs attrs, MemTxResult *result)
3250{
3251 MemTxResult r;
3252 val = cpu_to_be64(val);
3253 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3254 if (result) {
3255 *result = r;
3256 }
3257}
3258
3259void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3260{
3261 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3262}
3263
3264void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3265{
3266 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3267}
3268
f606604f 3269void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1 3270{
50013115 3271 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3272}
3273
5e2972fd 3274/* virtual memory access for debug (includes writing to ROM) */
f17ec444 3275int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 3276 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3277{
3278 int l;
a8170e5e 3279 hwaddr phys_addr;
9b3c35e0 3280 target_ulong page;
13eb76e0
FB
3281
3282 while (len > 0) {
3283 page = addr & TARGET_PAGE_MASK;
f17ec444 3284 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
3285 /* if no physical page mapped, return an error */
3286 if (phys_addr == -1)
3287 return -1;
3288 l = (page + TARGET_PAGE_SIZE) - addr;
3289 if (l > len)
3290 l = len;
5e2972fd 3291 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
3292 if (is_write) {
3293 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3294 } else {
5c9eb028
PM
3295 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3296 buf, l, 0);
2e38847b 3297 }
13eb76e0
FB
3298 len -= l;
3299 buf += l;
3300 addr += l;
3301 }
3302 return 0;
3303}
a68fe89c 3304#endif
13eb76e0 3305
8e4a424b
BS
3306/*
3307 * A helper function for the _utterly broken_ virtio device model to find out if
3308 * it's running on a big endian machine. Don't do this at home kids!
3309 */
98ed8ecf
GK
3310bool target_words_bigendian(void);
3311bool target_words_bigendian(void)
8e4a424b
BS
3312{
3313#if defined(TARGET_WORDS_BIGENDIAN)
3314 return true;
3315#else
3316 return false;
3317#endif
3318}
3319
76f35538 3320#ifndef CONFIG_USER_ONLY
a8170e5e 3321bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 3322{
5c8a00ce 3323 MemoryRegion*mr;
149f54b5 3324 hwaddr l = 1;
41063e1e 3325 bool res;
76f35538 3326
41063e1e 3327 rcu_read_lock();
5c8a00ce
PB
3328 mr = address_space_translate(&address_space_memory,
3329 phys_addr, &phys_addr, &l, false);
76f35538 3330
41063e1e
PB
3331 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3332 rcu_read_unlock();
3333 return res;
76f35538 3334}
bd2fa51f
MH
3335
3336void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3337{
3338 RAMBlock *block;
3339
0dc3f44a
MD
3340 rcu_read_lock();
3341 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5 3342 func(block->host, block->offset, block->used_length, opaque);
bd2fa51f 3343 }
0dc3f44a 3344 rcu_read_unlock();
bd2fa51f 3345}
ec3f8c99 3346#endif
This page took 1.425332 seconds and 4 git commands to generate.