]> Git Repo - qemu.git/blame - exec.c
translate-all: make less of tb_invalidate_phys_page_range depend on is_cpu_write_access
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
4485bd26 29#if !defined(CONFIG_USER_ONLY)
47c8ca53 30#include "hw/boards.h"
4485bd26 31#endif
cc9e98cb 32#include "hw/qdev.h"
1de7afc9 33#include "qemu/osdep.h"
9c17d615 34#include "sysemu/kvm.h"
2ff3de68 35#include "sysemu/sysemu.h"
0d09e41a 36#include "hw/xen/xen.h"
1de7afc9
PB
37#include "qemu/timer.h"
38#include "qemu/config-file.h"
75a34036 39#include "qemu/error-report.h"
022c62cb 40#include "exec/memory.h"
9c17d615 41#include "sysemu/dma.h"
022c62cb 42#include "exec/address-spaces.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
432d268c 45#else /* !CONFIG_USER_ONLY */
9c17d615 46#include "sysemu/xen-mapcache.h"
6506e4f9 47#include "trace.h"
53a5960a 48#endif
0d6d3c87 49#include "exec/cpu-all.h"
0dc3f44a 50#include "qemu/rcu_queue.h"
022c62cb 51#include "exec/cputlb.h"
5b6dd868 52#include "translate-all.h"
0cac1b66 53
022c62cb 54#include "exec/memory-internal.h"
220c3ebd 55#include "exec/ram_addr.h"
67d95c15 56
b35ba30f
MT
57#include "qemu/range.h"
58
db7b5426 59//#define DEBUG_SUBPAGE
1196be37 60
e2eef170 61#if !defined(CONFIG_USER_ONLY)
0dc3f44a
MD
62/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
63 * are protected by the ramlist lock.
64 */
0d53d9fe 65RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
66
67static MemoryRegion *system_memory;
309cb471 68static MemoryRegion *system_io;
62152b8a 69
f6790af6
AK
70AddressSpace address_space_io;
71AddressSpace address_space_memory;
2673a5da 72
0844e007 73MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 74static MemoryRegion io_mem_unassigned;
0e0df1e2 75
7bd4f430
PB
76/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
77#define RAM_PREALLOC (1 << 0)
78
dbcb8981
PB
79/* RAM is mmap-ed with MAP_SHARED */
80#define RAM_SHARED (1 << 1)
81
62be4e3a
MT
82/* Only a portion of RAM (used_length) is actually used, and migrated.
83 * This used_length size can change across reboots.
84 */
85#define RAM_RESIZEABLE (1 << 2)
86
e2eef170 87#endif
9fa3e853 88
bdc44640 89struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
90/* current CPU in the current thread. It is only valid inside
91 cpu_exec() */
4917cf44 92DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 93/* 0 = Do not count executed instructions.
bf20dc07 94 1 = Precise instruction counting.
2e70f6ef 95 2 = Adaptive rate instruction counting. */
5708fc66 96int use_icount;
6a00d601 97
e2eef170 98#if !defined(CONFIG_USER_ONLY)
4346ae3e 99
1db8abb1
PB
100typedef struct PhysPageEntry PhysPageEntry;
101
102struct PhysPageEntry {
9736e55b 103 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 104 uint32_t skip : 6;
9736e55b 105 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 106 uint32_t ptr : 26;
1db8abb1
PB
107};
108
8b795765
MT
109#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
110
03f49957 111/* Size of the L2 (and L3, etc) page tables. */
57271d63 112#define ADDR_SPACE_BITS 64
03f49957 113
026736ce 114#define P_L2_BITS 9
03f49957
PB
115#define P_L2_SIZE (1 << P_L2_BITS)
116
117#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
118
119typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 120
53cb28cb 121typedef struct PhysPageMap {
79e2b9ae
PB
122 struct rcu_head rcu;
123
53cb28cb
MA
124 unsigned sections_nb;
125 unsigned sections_nb_alloc;
126 unsigned nodes_nb;
127 unsigned nodes_nb_alloc;
128 Node *nodes;
129 MemoryRegionSection *sections;
130} PhysPageMap;
131
1db8abb1 132struct AddressSpaceDispatch {
79e2b9ae
PB
133 struct rcu_head rcu;
134
1db8abb1
PB
135 /* This is a multi-level map on the physical address space.
136 * The bottom level has pointers to MemoryRegionSections.
137 */
138 PhysPageEntry phys_map;
53cb28cb 139 PhysPageMap map;
acc9d80b 140 AddressSpace *as;
1db8abb1
PB
141};
142
90260c6c
JK
143#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
144typedef struct subpage_t {
145 MemoryRegion iomem;
acc9d80b 146 AddressSpace *as;
90260c6c
JK
147 hwaddr base;
148 uint16_t sub_section[TARGET_PAGE_SIZE];
149} subpage_t;
150
b41aac4f
LPF
151#define PHYS_SECTION_UNASSIGNED 0
152#define PHYS_SECTION_NOTDIRTY 1
153#define PHYS_SECTION_ROM 2
154#define PHYS_SECTION_WATCH 3
5312bd8b 155
e2eef170 156static void io_mem_init(void);
62152b8a 157static void memory_map_init(void);
09daed84 158static void tcg_commit(MemoryListener *listener);
e2eef170 159
1ec9b909 160static MemoryRegion io_mem_watch;
6658ffb8 161#endif
fd6ce8f6 162
6d9a1304 163#if !defined(CONFIG_USER_ONLY)
d6f2ea22 164
53cb28cb 165static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 166{
53cb28cb
MA
167 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
168 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
169 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
170 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 171 }
f7bf5461
AK
172}
173
db94604b 174static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
f7bf5461
AK
175{
176 unsigned i;
8b795765 177 uint32_t ret;
db94604b
PB
178 PhysPageEntry e;
179 PhysPageEntry *p;
f7bf5461 180
53cb28cb 181 ret = map->nodes_nb++;
db94604b 182 p = map->nodes[ret];
f7bf5461 183 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 184 assert(ret != map->nodes_nb_alloc);
db94604b
PB
185
186 e.skip = leaf ? 0 : 1;
187 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
03f49957 188 for (i = 0; i < P_L2_SIZE; ++i) {
db94604b 189 memcpy(&p[i], &e, sizeof(e));
d6f2ea22 190 }
f7bf5461 191 return ret;
d6f2ea22
AK
192}
193
53cb28cb
MA
194static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
195 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 196 int level)
f7bf5461
AK
197{
198 PhysPageEntry *p;
03f49957 199 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 200
9736e55b 201 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
db94604b 202 lp->ptr = phys_map_node_alloc(map, level == 0);
92e873b9 203 }
db94604b 204 p = map->nodes[lp->ptr];
03f49957 205 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 206
03f49957 207 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 208 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 209 lp->skip = 0;
c19e8800 210 lp->ptr = leaf;
07f07b31
AK
211 *index += step;
212 *nb -= step;
2999097b 213 } else {
53cb28cb 214 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
215 }
216 ++lp;
f7bf5461
AK
217 }
218}
219
ac1970fb 220static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 221 hwaddr index, hwaddr nb,
2999097b 222 uint16_t leaf)
f7bf5461 223{
2999097b 224 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 225 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 226
53cb28cb 227 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
228}
229
b35ba30f
MT
230/* Compact a non leaf page entry. Simply detect that the entry has a single child,
231 * and update our entry so we can skip it and go directly to the destination.
232 */
233static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
234{
235 unsigned valid_ptr = P_L2_SIZE;
236 int valid = 0;
237 PhysPageEntry *p;
238 int i;
239
240 if (lp->ptr == PHYS_MAP_NODE_NIL) {
241 return;
242 }
243
244 p = nodes[lp->ptr];
245 for (i = 0; i < P_L2_SIZE; i++) {
246 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
247 continue;
248 }
249
250 valid_ptr = i;
251 valid++;
252 if (p[i].skip) {
253 phys_page_compact(&p[i], nodes, compacted);
254 }
255 }
256
257 /* We can only compress if there's only one child. */
258 if (valid != 1) {
259 return;
260 }
261
262 assert(valid_ptr < P_L2_SIZE);
263
264 /* Don't compress if it won't fit in the # of bits we have. */
265 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
266 return;
267 }
268
269 lp->ptr = p[valid_ptr].ptr;
270 if (!p[valid_ptr].skip) {
271 /* If our only child is a leaf, make this a leaf. */
272 /* By design, we should have made this node a leaf to begin with so we
273 * should never reach here.
274 * But since it's so simple to handle this, let's do it just in case we
275 * change this rule.
276 */
277 lp->skip = 0;
278 } else {
279 lp->skip += p[valid_ptr].skip;
280 }
281}
282
283static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
284{
285 DECLARE_BITMAP(compacted, nodes_nb);
286
287 if (d->phys_map.skip) {
53cb28cb 288 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
289 }
290}
291
97115a8d 292static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 293 Node *nodes, MemoryRegionSection *sections)
92e873b9 294{
31ab2b4a 295 PhysPageEntry *p;
97115a8d 296 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 297 int i;
f1f6e3b8 298
9736e55b 299 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 300 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 301 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 302 }
9affd6fc 303 p = nodes[lp.ptr];
03f49957 304 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 305 }
b35ba30f
MT
306
307 if (sections[lp.ptr].size.hi ||
308 range_covers_byte(sections[lp.ptr].offset_within_address_space,
309 sections[lp.ptr].size.lo, addr)) {
310 return &sections[lp.ptr];
311 } else {
312 return &sections[PHYS_SECTION_UNASSIGNED];
313 }
f3705d53
AK
314}
315
e5548617
BS
316bool memory_region_is_unassigned(MemoryRegion *mr)
317{
2a8e7499 318 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 319 && mr != &io_mem_watch;
fd6ce8f6 320}
149f54b5 321
79e2b9ae 322/* Called from RCU critical section */
c7086b4a 323static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
324 hwaddr addr,
325 bool resolve_subpage)
9f029603 326{
90260c6c
JK
327 MemoryRegionSection *section;
328 subpage_t *subpage;
329
53cb28cb 330 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
331 if (resolve_subpage && section->mr->subpage) {
332 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 333 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
334 }
335 return section;
9f029603
JK
336}
337
79e2b9ae 338/* Called from RCU critical section */
90260c6c 339static MemoryRegionSection *
c7086b4a 340address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 341 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
342{
343 MemoryRegionSection *section;
a87f3954 344 Int128 diff;
149f54b5 345
c7086b4a 346 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
347 /* Compute offset within MemoryRegionSection */
348 addr -= section->offset_within_address_space;
349
350 /* Compute offset within MemoryRegion */
351 *xlat = addr + section->offset_within_region;
352
353 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 354 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
355 return section;
356}
90260c6c 357
a87f3954
PB
358static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
359{
360 if (memory_region_is_ram(mr)) {
361 return !(is_write && mr->readonly);
362 }
363 if (memory_region_is_romd(mr)) {
364 return !is_write;
365 }
366
367 return false;
368}
369
41063e1e 370/* Called from RCU critical section */
5c8a00ce
PB
371MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
372 hwaddr *xlat, hwaddr *plen,
373 bool is_write)
90260c6c 374{
30951157
AK
375 IOMMUTLBEntry iotlb;
376 MemoryRegionSection *section;
377 MemoryRegion *mr;
30951157
AK
378
379 for (;;) {
79e2b9ae
PB
380 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
381 section = address_space_translate_internal(d, addr, &addr, plen, true);
30951157
AK
382 mr = section->mr;
383
384 if (!mr->iommu_ops) {
385 break;
386 }
387
8d7b8cb9 388 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
389 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
390 | (addr & iotlb.addr_mask));
23820dbf 391 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
30951157
AK
392 if (!(iotlb.perm & (1 << is_write))) {
393 mr = &io_mem_unassigned;
394 break;
395 }
396
397 as = iotlb.target_as;
398 }
399
fe680d0d 400 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954 401 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
23820dbf 402 *plen = MIN(page, *plen);
a87f3954
PB
403 }
404
30951157
AK
405 *xlat = addr;
406 return mr;
90260c6c
JK
407}
408
79e2b9ae 409/* Called from RCU critical section */
90260c6c 410MemoryRegionSection *
9d82b5a7
PB
411address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
412 hwaddr *xlat, hwaddr *plen)
90260c6c 413{
30951157 414 MemoryRegionSection *section;
9d82b5a7
PB
415 section = address_space_translate_internal(cpu->memory_dispatch,
416 addr, xlat, plen, false);
30951157
AK
417
418 assert(!section->mr->iommu_ops);
419 return section;
90260c6c 420}
5b6dd868 421#endif
fd6ce8f6 422
b170fce3 423#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
424
425static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 426{
259186a7 427 CPUState *cpu = opaque;
a513fe19 428
5b6dd868
BS
429 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
430 version_id is increased. */
259186a7 431 cpu->interrupt_request &= ~0x01;
c01a71c1 432 tlb_flush(cpu, 1);
5b6dd868
BS
433
434 return 0;
a513fe19 435}
7501267e 436
6c3bff0e
PD
437static int cpu_common_pre_load(void *opaque)
438{
439 CPUState *cpu = opaque;
440
adee6424 441 cpu->exception_index = -1;
6c3bff0e
PD
442
443 return 0;
444}
445
446static bool cpu_common_exception_index_needed(void *opaque)
447{
448 CPUState *cpu = opaque;
449
adee6424 450 return tcg_enabled() && cpu->exception_index != -1;
6c3bff0e
PD
451}
452
453static const VMStateDescription vmstate_cpu_common_exception_index = {
454 .name = "cpu_common/exception_index",
455 .version_id = 1,
456 .minimum_version_id = 1,
457 .fields = (VMStateField[]) {
458 VMSTATE_INT32(exception_index, CPUState),
459 VMSTATE_END_OF_LIST()
460 }
461};
462
1a1562f5 463const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
464 .name = "cpu_common",
465 .version_id = 1,
466 .minimum_version_id = 1,
6c3bff0e 467 .pre_load = cpu_common_pre_load,
5b6dd868 468 .post_load = cpu_common_post_load,
35d08458 469 .fields = (VMStateField[]) {
259186a7
AF
470 VMSTATE_UINT32(halted, CPUState),
471 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 472 VMSTATE_END_OF_LIST()
6c3bff0e
PD
473 },
474 .subsections = (VMStateSubsection[]) {
475 {
476 .vmsd = &vmstate_cpu_common_exception_index,
477 .needed = cpu_common_exception_index_needed,
478 } , {
479 /* empty */
480 }
5b6dd868
BS
481 }
482};
1a1562f5 483
5b6dd868 484#endif
ea041c0e 485
38d8f5c8 486CPUState *qemu_get_cpu(int index)
ea041c0e 487{
bdc44640 488 CPUState *cpu;
ea041c0e 489
bdc44640 490 CPU_FOREACH(cpu) {
55e5c285 491 if (cpu->cpu_index == index) {
bdc44640 492 return cpu;
55e5c285 493 }
ea041c0e 494 }
5b6dd868 495
bdc44640 496 return NULL;
ea041c0e
FB
497}
498
09daed84
EI
499#if !defined(CONFIG_USER_ONLY)
500void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
501{
502 /* We only support one address space per cpu at the moment. */
503 assert(cpu->as == as);
504
505 if (cpu->tcg_as_listener) {
506 memory_listener_unregister(cpu->tcg_as_listener);
507 } else {
508 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
509 }
510 cpu->tcg_as_listener->commit = tcg_commit;
511 memory_listener_register(cpu->tcg_as_listener, as);
512}
513#endif
514
5b6dd868 515void cpu_exec_init(CPUArchState *env)
ea041c0e 516{
5b6dd868 517 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 518 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 519 CPUState *some_cpu;
5b6dd868
BS
520 int cpu_index;
521
522#if defined(CONFIG_USER_ONLY)
523 cpu_list_lock();
524#endif
5b6dd868 525 cpu_index = 0;
bdc44640 526 CPU_FOREACH(some_cpu) {
5b6dd868
BS
527 cpu_index++;
528 }
55e5c285 529 cpu->cpu_index = cpu_index;
1b1ed8dc 530 cpu->numa_node = 0;
f0c3c505 531 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 532 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 533#ifndef CONFIG_USER_ONLY
09daed84 534 cpu->as = &address_space_memory;
5b6dd868 535 cpu->thread_id = qemu_get_thread_id();
cba70549 536 cpu_reload_memory_map(cpu);
5b6dd868 537#endif
bdc44640 538 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
539#if defined(CONFIG_USER_ONLY)
540 cpu_list_unlock();
541#endif
e0d47944
AF
542 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
543 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
544 }
5b6dd868 545#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
546 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
547 cpu_save, cpu_load, env);
b170fce3 548 assert(cc->vmsd == NULL);
e0d47944 549 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 550#endif
b170fce3
AF
551 if (cc->vmsd != NULL) {
552 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
553 }
ea041c0e
FB
554}
555
94df27fd 556#if defined(CONFIG_USER_ONLY)
00b941e5 557static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
558{
559 tb_invalidate_phys_page_range(pc, pc + 1, 0);
560}
561#else
00b941e5 562static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 563{
e8262a1b
MF
564 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
565 if (phys != -1) {
09daed84 566 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 567 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 568 }
1e7855a5 569}
c27004ec 570#endif
d720b93d 571
c527ee8f 572#if defined(CONFIG_USER_ONLY)
75a34036 573void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
574
575{
576}
577
3ee887e8
PM
578int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
579 int flags)
580{
581 return -ENOSYS;
582}
583
584void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
585{
586}
587
75a34036 588int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
589 int flags, CPUWatchpoint **watchpoint)
590{
591 return -ENOSYS;
592}
593#else
6658ffb8 594/* Add a watchpoint. */
75a34036 595int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 596 int flags, CPUWatchpoint **watchpoint)
6658ffb8 597{
c0ce998e 598 CPUWatchpoint *wp;
6658ffb8 599
05068c0d 600 /* forbid ranges which are empty or run off the end of the address space */
07e2863d 601 if (len == 0 || (addr + len - 1) < addr) {
75a34036
AF
602 error_report("tried to set invalid watchpoint at %"
603 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
604 return -EINVAL;
605 }
7267c094 606 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
607
608 wp->vaddr = addr;
05068c0d 609 wp->len = len;
a1d1bb31
AL
610 wp->flags = flags;
611
2dc9f411 612 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
613 if (flags & BP_GDB) {
614 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
615 } else {
616 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
617 }
6658ffb8 618
31b030d4 619 tlb_flush_page(cpu, addr);
a1d1bb31
AL
620
621 if (watchpoint)
622 *watchpoint = wp;
623 return 0;
6658ffb8
PB
624}
625
a1d1bb31 626/* Remove a specific watchpoint. */
75a34036 627int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 628 int flags)
6658ffb8 629{
a1d1bb31 630 CPUWatchpoint *wp;
6658ffb8 631
ff4700b0 632 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 633 if (addr == wp->vaddr && len == wp->len
6e140f28 634 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 635 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
636 return 0;
637 }
638 }
a1d1bb31 639 return -ENOENT;
6658ffb8
PB
640}
641
a1d1bb31 642/* Remove a specific watchpoint by reference. */
75a34036 643void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 644{
ff4700b0 645 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 646
31b030d4 647 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 648
7267c094 649 g_free(watchpoint);
a1d1bb31
AL
650}
651
652/* Remove all matching watchpoints. */
75a34036 653void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 654{
c0ce998e 655 CPUWatchpoint *wp, *next;
a1d1bb31 656
ff4700b0 657 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
658 if (wp->flags & mask) {
659 cpu_watchpoint_remove_by_ref(cpu, wp);
660 }
c0ce998e 661 }
7d03f82f 662}
05068c0d
PM
663
664/* Return true if this watchpoint address matches the specified
665 * access (ie the address range covered by the watchpoint overlaps
666 * partially or completely with the address range covered by the
667 * access).
668 */
669static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
670 vaddr addr,
671 vaddr len)
672{
673 /* We know the lengths are non-zero, but a little caution is
674 * required to avoid errors in the case where the range ends
675 * exactly at the top of the address space and so addr + len
676 * wraps round to zero.
677 */
678 vaddr wpend = wp->vaddr + wp->len - 1;
679 vaddr addrend = addr + len - 1;
680
681 return !(addr > wpend || wp->vaddr > addrend);
682}
683
c527ee8f 684#endif
7d03f82f 685
a1d1bb31 686/* Add a breakpoint. */
b3310ab3 687int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 688 CPUBreakpoint **breakpoint)
4c3a88a2 689{
c0ce998e 690 CPUBreakpoint *bp;
3b46e624 691
7267c094 692 bp = g_malloc(sizeof(*bp));
4c3a88a2 693
a1d1bb31
AL
694 bp->pc = pc;
695 bp->flags = flags;
696
2dc9f411 697 /* keep all GDB-injected breakpoints in front */
00b941e5 698 if (flags & BP_GDB) {
f0c3c505 699 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 700 } else {
f0c3c505 701 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 702 }
3b46e624 703
f0c3c505 704 breakpoint_invalidate(cpu, pc);
a1d1bb31 705
00b941e5 706 if (breakpoint) {
a1d1bb31 707 *breakpoint = bp;
00b941e5 708 }
4c3a88a2 709 return 0;
4c3a88a2
FB
710}
711
a1d1bb31 712/* Remove a specific breakpoint. */
b3310ab3 713int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 714{
a1d1bb31
AL
715 CPUBreakpoint *bp;
716
f0c3c505 717 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 718 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 719 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
720 return 0;
721 }
7d03f82f 722 }
a1d1bb31 723 return -ENOENT;
7d03f82f
EI
724}
725
a1d1bb31 726/* Remove a specific breakpoint by reference. */
b3310ab3 727void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 728{
f0c3c505
AF
729 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
730
731 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 732
7267c094 733 g_free(breakpoint);
a1d1bb31
AL
734}
735
736/* Remove all matching breakpoints. */
b3310ab3 737void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 738{
c0ce998e 739 CPUBreakpoint *bp, *next;
a1d1bb31 740
f0c3c505 741 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
742 if (bp->flags & mask) {
743 cpu_breakpoint_remove_by_ref(cpu, bp);
744 }
c0ce998e 745 }
4c3a88a2
FB
746}
747
c33a346e
FB
748/* enable or disable single step mode. EXCP_DEBUG is returned by the
749 CPU loop after each instruction */
3825b28f 750void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 751{
ed2803da
AF
752 if (cpu->singlestep_enabled != enabled) {
753 cpu->singlestep_enabled = enabled;
754 if (kvm_enabled()) {
38e478ec 755 kvm_update_guest_debug(cpu, 0);
ed2803da 756 } else {
ccbb4d44 757 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 758 /* XXX: only flush what is necessary */
38e478ec 759 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
760 tb_flush(env);
761 }
c33a346e 762 }
c33a346e
FB
763}
764
a47dddd7 765void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
766{
767 va_list ap;
493ae1f0 768 va_list ap2;
7501267e
FB
769
770 va_start(ap, fmt);
493ae1f0 771 va_copy(ap2, ap);
7501267e
FB
772 fprintf(stderr, "qemu: fatal: ");
773 vfprintf(stderr, fmt, ap);
774 fprintf(stderr, "\n");
878096ee 775 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
776 if (qemu_log_enabled()) {
777 qemu_log("qemu: fatal: ");
778 qemu_log_vprintf(fmt, ap2);
779 qemu_log("\n");
a0762859 780 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 781 qemu_log_flush();
93fcfe39 782 qemu_log_close();
924edcae 783 }
493ae1f0 784 va_end(ap2);
f9373291 785 va_end(ap);
fd052bf6
RV
786#if defined(CONFIG_USER_ONLY)
787 {
788 struct sigaction act;
789 sigfillset(&act.sa_mask);
790 act.sa_handler = SIG_DFL;
791 sigaction(SIGABRT, &act, NULL);
792 }
793#endif
7501267e
FB
794 abort();
795}
796
0124311e 797#if !defined(CONFIG_USER_ONLY)
0dc3f44a 798/* Called from RCU critical section */
041603fe
PB
799static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
800{
801 RAMBlock *block;
802
43771539 803 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 804 if (block && addr - block->offset < block->max_length) {
041603fe
PB
805 goto found;
806 }
0dc3f44a 807 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5 808 if (addr - block->offset < block->max_length) {
041603fe
PB
809 goto found;
810 }
811 }
812
813 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
814 abort();
815
816found:
43771539
PB
817 /* It is safe to write mru_block outside the iothread lock. This
818 * is what happens:
819 *
820 * mru_block = xxx
821 * rcu_read_unlock()
822 * xxx removed from list
823 * rcu_read_lock()
824 * read mru_block
825 * mru_block = NULL;
826 * call_rcu(reclaim_ramblock, xxx);
827 * rcu_read_unlock()
828 *
829 * atomic_rcu_set is not needed here. The block was already published
830 * when it was placed into the list. Here we're just making an extra
831 * copy of the pointer.
832 */
041603fe
PB
833 ram_list.mru_block = block;
834 return block;
835}
836
a2f4d5be 837static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 838{
041603fe 839 ram_addr_t start1;
a2f4d5be
JQ
840 RAMBlock *block;
841 ram_addr_t end;
842
843 end = TARGET_PAGE_ALIGN(start + length);
844 start &= TARGET_PAGE_MASK;
d24981d3 845
0dc3f44a 846 rcu_read_lock();
041603fe
PB
847 block = qemu_get_ram_block(start);
848 assert(block == qemu_get_ram_block(end - 1));
1240be24 849 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
041603fe 850 cpu_tlb_reset_dirty_all(start1, length);
0dc3f44a 851 rcu_read_unlock();
d24981d3
JQ
852}
853
5579c7f3 854/* Note: start and end must be within the same ram block. */
a2f4d5be 855void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 856 unsigned client)
1ccde1cb 857{
1ccde1cb
FB
858 if (length == 0)
859 return;
c8d6f66a 860 cpu_physical_memory_clear_dirty_range_type(start, length, client);
f23db169 861
d24981d3 862 if (tcg_enabled()) {
a2f4d5be 863 tlb_reset_dirty_range_all(start, length);
5579c7f3 864 }
1ccde1cb
FB
865}
866
79e2b9ae 867/* Called from RCU critical section */
bb0e627a 868hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
869 MemoryRegionSection *section,
870 target_ulong vaddr,
871 hwaddr paddr, hwaddr xlat,
872 int prot,
873 target_ulong *address)
e5548617 874{
a8170e5e 875 hwaddr iotlb;
e5548617
BS
876 CPUWatchpoint *wp;
877
cc5bea60 878 if (memory_region_is_ram(section->mr)) {
e5548617
BS
879 /* Normal RAM. */
880 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 881 + xlat;
e5548617 882 if (!section->readonly) {
b41aac4f 883 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 884 } else {
b41aac4f 885 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
886 }
887 } else {
1b3fb98f 888 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 889 iotlb += xlat;
e5548617
BS
890 }
891
892 /* Make accesses to pages with watchpoints go via the
893 watchpoint trap routines. */
ff4700b0 894 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 895 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
896 /* Avoid trapping reads of pages with a write breakpoint. */
897 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 898 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
899 *address |= TLB_MMIO;
900 break;
901 }
902 }
903 }
904
905 return iotlb;
906}
9fa3e853
FB
907#endif /* defined(CONFIG_USER_ONLY) */
908
e2eef170 909#if !defined(CONFIG_USER_ONLY)
8da3ff18 910
c227f099 911static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 912 uint16_t section);
acc9d80b 913static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 914
a2b257d6
IM
915static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
916 qemu_anon_ram_alloc;
91138037
MA
917
918/*
919 * Set a custom physical guest memory alloator.
920 * Accelerators with unusual needs may need this. Hopefully, we can
921 * get rid of it eventually.
922 */
a2b257d6 923void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
91138037
MA
924{
925 phys_mem_alloc = alloc;
926}
927
53cb28cb
MA
928static uint16_t phys_section_add(PhysPageMap *map,
929 MemoryRegionSection *section)
5312bd8b 930{
68f3f65b
PB
931 /* The physical section number is ORed with a page-aligned
932 * pointer to produce the iotlb entries. Thus it should
933 * never overflow into the page-aligned value.
934 */
53cb28cb 935 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 936
53cb28cb
MA
937 if (map->sections_nb == map->sections_nb_alloc) {
938 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
939 map->sections = g_renew(MemoryRegionSection, map->sections,
940 map->sections_nb_alloc);
5312bd8b 941 }
53cb28cb 942 map->sections[map->sections_nb] = *section;
dfde4e6e 943 memory_region_ref(section->mr);
53cb28cb 944 return map->sections_nb++;
5312bd8b
AK
945}
946
058bc4b5
PB
947static void phys_section_destroy(MemoryRegion *mr)
948{
dfde4e6e
PB
949 memory_region_unref(mr);
950
058bc4b5
PB
951 if (mr->subpage) {
952 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 953 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
954 g_free(subpage);
955 }
956}
957
6092666e 958static void phys_sections_free(PhysPageMap *map)
5312bd8b 959{
9affd6fc
PB
960 while (map->sections_nb > 0) {
961 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
962 phys_section_destroy(section->mr);
963 }
9affd6fc
PB
964 g_free(map->sections);
965 g_free(map->nodes);
5312bd8b
AK
966}
967
ac1970fb 968static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
969{
970 subpage_t *subpage;
a8170e5e 971 hwaddr base = section->offset_within_address_space
0f0cb164 972 & TARGET_PAGE_MASK;
97115a8d 973 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 974 d->map.nodes, d->map.sections);
0f0cb164
AK
975 MemoryRegionSection subsection = {
976 .offset_within_address_space = base,
052e87b0 977 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 978 };
a8170e5e 979 hwaddr start, end;
0f0cb164 980
f3705d53 981 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 982
f3705d53 983 if (!(existing->mr->subpage)) {
acc9d80b 984 subpage = subpage_init(d->as, base);
3be91e86 985 subsection.address_space = d->as;
0f0cb164 986 subsection.mr = &subpage->iomem;
ac1970fb 987 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 988 phys_section_add(&d->map, &subsection));
0f0cb164 989 } else {
f3705d53 990 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
991 }
992 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 993 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
994 subpage_register(subpage, start, end,
995 phys_section_add(&d->map, section));
0f0cb164
AK
996}
997
998
052e87b0
PB
999static void register_multipage(AddressSpaceDispatch *d,
1000 MemoryRegionSection *section)
33417e70 1001{
a8170e5e 1002 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 1003 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
1004 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1005 TARGET_PAGE_BITS));
dd81124b 1006
733d5ef5
PB
1007 assert(num_pages);
1008 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
1009}
1010
ac1970fb 1011static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 1012{
89ae337a 1013 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 1014 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 1015 MemoryRegionSection now = *section, remain = *section;
052e87b0 1016 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1017
733d5ef5
PB
1018 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1019 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1020 - now.offset_within_address_space;
1021
052e87b0 1022 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 1023 register_subpage(d, &now);
733d5ef5 1024 } else {
052e87b0 1025 now.size = int128_zero();
733d5ef5 1026 }
052e87b0
PB
1027 while (int128_ne(remain.size, now.size)) {
1028 remain.size = int128_sub(remain.size, now.size);
1029 remain.offset_within_address_space += int128_get64(now.size);
1030 remain.offset_within_region += int128_get64(now.size);
69b67646 1031 now = remain;
052e87b0 1032 if (int128_lt(remain.size, page_size)) {
733d5ef5 1033 register_subpage(d, &now);
88266249 1034 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1035 now.size = page_size;
ac1970fb 1036 register_subpage(d, &now);
69b67646 1037 } else {
052e87b0 1038 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1039 register_multipage(d, &now);
69b67646 1040 }
0f0cb164
AK
1041 }
1042}
1043
62a2744c
SY
1044void qemu_flush_coalesced_mmio_buffer(void)
1045{
1046 if (kvm_enabled())
1047 kvm_flush_coalesced_mmio_buffer();
1048}
1049
b2a8658e
UD
1050void qemu_mutex_lock_ramlist(void)
1051{
1052 qemu_mutex_lock(&ram_list.mutex);
1053}
1054
1055void qemu_mutex_unlock_ramlist(void)
1056{
1057 qemu_mutex_unlock(&ram_list.mutex);
1058}
1059
e1e84ba0 1060#ifdef __linux__
c902760f
MT
1061
1062#include <sys/vfs.h>
1063
1064#define HUGETLBFS_MAGIC 0x958458f6
1065
fc7a5800 1066static long gethugepagesize(const char *path, Error **errp)
c902760f
MT
1067{
1068 struct statfs fs;
1069 int ret;
1070
1071 do {
9742bf26 1072 ret = statfs(path, &fs);
c902760f
MT
1073 } while (ret != 0 && errno == EINTR);
1074
1075 if (ret != 0) {
fc7a5800
HT
1076 error_setg_errno(errp, errno, "failed to get page size of file %s",
1077 path);
9742bf26 1078 return 0;
c902760f
MT
1079 }
1080
1081 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1082 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1083
1084 return fs.f_bsize;
1085}
1086
04b16653
AW
1087static void *file_ram_alloc(RAMBlock *block,
1088 ram_addr_t memory,
7f56e740
PB
1089 const char *path,
1090 Error **errp)
c902760f
MT
1091{
1092 char *filename;
8ca761f6
PF
1093 char *sanitized_name;
1094 char *c;
557529dd 1095 void *area = NULL;
c902760f 1096 int fd;
557529dd 1097 uint64_t hpagesize;
fc7a5800 1098 Error *local_err = NULL;
c902760f 1099
fc7a5800
HT
1100 hpagesize = gethugepagesize(path, &local_err);
1101 if (local_err) {
1102 error_propagate(errp, local_err);
f9a49dfa 1103 goto error;
c902760f 1104 }
a2b257d6 1105 block->mr->align = hpagesize;
c902760f
MT
1106
1107 if (memory < hpagesize) {
557529dd
HT
1108 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1109 "or larger than huge page size 0x%" PRIx64,
1110 memory, hpagesize);
1111 goto error;
c902760f
MT
1112 }
1113
1114 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1115 error_setg(errp,
1116 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1117 goto error;
c902760f
MT
1118 }
1119
8ca761f6 1120 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1121 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1122 for (c = sanitized_name; *c != '\0'; c++) {
1123 if (*c == '/')
1124 *c = '_';
1125 }
1126
1127 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1128 sanitized_name);
1129 g_free(sanitized_name);
c902760f
MT
1130
1131 fd = mkstemp(filename);
1132 if (fd < 0) {
7f56e740
PB
1133 error_setg_errno(errp, errno,
1134 "unable to create backing store for hugepages");
e4ada482 1135 g_free(filename);
f9a49dfa 1136 goto error;
c902760f
MT
1137 }
1138 unlink(filename);
e4ada482 1139 g_free(filename);
c902760f
MT
1140
1141 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1142
1143 /*
1144 * ftruncate is not supported by hugetlbfs in older
1145 * hosts, so don't bother bailing out on errors.
1146 * If anything goes wrong with it under other filesystems,
1147 * mmap will fail.
1148 */
7f56e740 1149 if (ftruncate(fd, memory)) {
9742bf26 1150 perror("ftruncate");
7f56e740 1151 }
c902760f 1152
dbcb8981
PB
1153 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1154 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1155 fd, 0);
c902760f 1156 if (area == MAP_FAILED) {
7f56e740
PB
1157 error_setg_errno(errp, errno,
1158 "unable to map backing store for hugepages");
9742bf26 1159 close(fd);
f9a49dfa 1160 goto error;
c902760f 1161 }
ef36fa14
MT
1162
1163 if (mem_prealloc) {
38183310 1164 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1165 }
1166
04b16653 1167 block->fd = fd;
c902760f 1168 return area;
f9a49dfa
MT
1169
1170error:
1171 if (mem_prealloc) {
81b07353 1172 error_report("%s", error_get_pretty(*errp));
f9a49dfa
MT
1173 exit(1);
1174 }
1175 return NULL;
c902760f
MT
1176}
1177#endif
1178
0dc3f44a 1179/* Called with the ramlist lock held. */
d17b5288 1180static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1181{
1182 RAMBlock *block, *next_block;
3e837b2c 1183 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1184
49cd9ac6
SH
1185 assert(size != 0); /* it would hand out same offset multiple times */
1186
0dc3f44a 1187 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
04b16653 1188 return 0;
0d53d9fe 1189 }
04b16653 1190
0dc3f44a 1191 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
f15fbc4b 1192 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653 1193
62be4e3a 1194 end = block->offset + block->max_length;
04b16653 1195
0dc3f44a 1196 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
04b16653
AW
1197 if (next_block->offset >= end) {
1198 next = MIN(next, next_block->offset);
1199 }
1200 }
1201 if (next - end >= size && next - end < mingap) {
3e837b2c 1202 offset = end;
04b16653
AW
1203 mingap = next - end;
1204 }
1205 }
3e837b2c
AW
1206
1207 if (offset == RAM_ADDR_MAX) {
1208 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1209 (uint64_t)size);
1210 abort();
1211 }
1212
04b16653
AW
1213 return offset;
1214}
1215
652d7ec2 1216ram_addr_t last_ram_offset(void)
d17b5288
AW
1217{
1218 RAMBlock *block;
1219 ram_addr_t last = 0;
1220
0dc3f44a
MD
1221 rcu_read_lock();
1222 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
62be4e3a 1223 last = MAX(last, block->offset + block->max_length);
0d53d9fe 1224 }
0dc3f44a 1225 rcu_read_unlock();
d17b5288
AW
1226 return last;
1227}
1228
ddb97f1d
JB
1229static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1230{
1231 int ret;
ddb97f1d
JB
1232
1233 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
47c8ca53 1234 if (!machine_dump_guest_core(current_machine)) {
ddb97f1d
JB
1235 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1236 if (ret) {
1237 perror("qemu_madvise");
1238 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1239 "but dump_guest_core=off specified\n");
1240 }
1241 }
1242}
1243
0dc3f44a
MD
1244/* Called within an RCU critical section, or while the ramlist lock
1245 * is held.
1246 */
20cfe881 1247static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1248{
20cfe881 1249 RAMBlock *block;
84b89d78 1250
0dc3f44a 1251 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1252 if (block->offset == addr) {
20cfe881 1253 return block;
c5705a77
AK
1254 }
1255 }
20cfe881
HT
1256
1257 return NULL;
1258}
1259
ae3a7047 1260/* Called with iothread lock held. */
20cfe881
HT
1261void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1262{
ae3a7047 1263 RAMBlock *new_block, *block;
20cfe881 1264
0dc3f44a 1265 rcu_read_lock();
ae3a7047 1266 new_block = find_ram_block(addr);
c5705a77
AK
1267 assert(new_block);
1268 assert(!new_block->idstr[0]);
84b89d78 1269
09e5ab63
AL
1270 if (dev) {
1271 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1272 if (id) {
1273 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1274 g_free(id);
84b89d78
CM
1275 }
1276 }
1277 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1278
0dc3f44a 1279 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1280 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1281 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1282 new_block->idstr);
1283 abort();
1284 }
1285 }
0dc3f44a 1286 rcu_read_unlock();
c5705a77
AK
1287}
1288
ae3a7047 1289/* Called with iothread lock held. */
20cfe881
HT
1290void qemu_ram_unset_idstr(ram_addr_t addr)
1291{
ae3a7047 1292 RAMBlock *block;
20cfe881 1293
ae3a7047
MD
1294 /* FIXME: arch_init.c assumes that this is not called throughout
1295 * migration. Ignore the problem since hot-unplug during migration
1296 * does not work anyway.
1297 */
1298
0dc3f44a 1299 rcu_read_lock();
ae3a7047 1300 block = find_ram_block(addr);
20cfe881
HT
1301 if (block) {
1302 memset(block->idstr, 0, sizeof(block->idstr));
1303 }
0dc3f44a 1304 rcu_read_unlock();
20cfe881
HT
1305}
1306
8490fc78
LC
1307static int memory_try_enable_merging(void *addr, size_t len)
1308{
75cc7f01 1309 if (!machine_mem_merge(current_machine)) {
8490fc78
LC
1310 /* disabled by the user */
1311 return 0;
1312 }
1313
1314 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1315}
1316
62be4e3a
MT
1317/* Only legal before guest might have detected the memory size: e.g. on
1318 * incoming migration, or right after reset.
1319 *
1320 * As memory core doesn't know how is memory accessed, it is up to
1321 * resize callback to update device state and/or add assertions to detect
1322 * misuse, if necessary.
1323 */
1324int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1325{
1326 RAMBlock *block = find_ram_block(base);
1327
1328 assert(block);
1329
129ddaf3
MT
1330 newsize = TARGET_PAGE_ALIGN(newsize);
1331
62be4e3a
MT
1332 if (block->used_length == newsize) {
1333 return 0;
1334 }
1335
1336 if (!(block->flags & RAM_RESIZEABLE)) {
1337 error_setg_errno(errp, EINVAL,
1338 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1339 " in != 0x" RAM_ADDR_FMT, block->idstr,
1340 newsize, block->used_length);
1341 return -EINVAL;
1342 }
1343
1344 if (block->max_length < newsize) {
1345 error_setg_errno(errp, EINVAL,
1346 "Length too large: %s: 0x" RAM_ADDR_FMT
1347 " > 0x" RAM_ADDR_FMT, block->idstr,
1348 newsize, block->max_length);
1349 return -EINVAL;
1350 }
1351
1352 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1353 block->used_length = newsize;
1354 cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
1355 memory_region_set_size(block->mr, newsize);
1356 if (block->resized) {
1357 block->resized(block->idstr, newsize, block->host);
1358 }
1359 return 0;
1360}
1361
ef701d7b 1362static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1363{
e1c57ab8 1364 RAMBlock *block;
0d53d9fe 1365 RAMBlock *last_block = NULL;
2152f5ca
JQ
1366 ram_addr_t old_ram_size, new_ram_size;
1367
1368 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1369
b2a8658e 1370 qemu_mutex_lock_ramlist();
9b8424d5 1371 new_block->offset = find_ram_offset(new_block->max_length);
e1c57ab8
PB
1372
1373 if (!new_block->host) {
1374 if (xen_enabled()) {
9b8424d5
MT
1375 xen_ram_alloc(new_block->offset, new_block->max_length,
1376 new_block->mr);
e1c57ab8 1377 } else {
9b8424d5 1378 new_block->host = phys_mem_alloc(new_block->max_length,
a2b257d6 1379 &new_block->mr->align);
39228250 1380 if (!new_block->host) {
ef701d7b
HT
1381 error_setg_errno(errp, errno,
1382 "cannot set up guest memory '%s'",
1383 memory_region_name(new_block->mr));
1384 qemu_mutex_unlock_ramlist();
1385 return -1;
39228250 1386 }
9b8424d5 1387 memory_try_enable_merging(new_block->host, new_block->max_length);
6977dfe6 1388 }
c902760f 1389 }
94a6b54f 1390
0d53d9fe
MD
1391 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1392 * QLIST (which has an RCU-friendly variant) does not have insertion at
1393 * tail, so save the last element in last_block.
1394 */
0dc3f44a 1395 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
0d53d9fe 1396 last_block = block;
9b8424d5 1397 if (block->max_length < new_block->max_length) {
abb26d63
PB
1398 break;
1399 }
1400 }
1401 if (block) {
0dc3f44a 1402 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
0d53d9fe 1403 } else if (last_block) {
0dc3f44a 1404 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
0d53d9fe 1405 } else { /* list is empty */
0dc3f44a 1406 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
abb26d63 1407 }
0d6d3c87 1408 ram_list.mru_block = NULL;
94a6b54f 1409
0dc3f44a
MD
1410 /* Write list before version */
1411 smp_wmb();
f798b07f 1412 ram_list.version++;
b2a8658e 1413 qemu_mutex_unlock_ramlist();
f798b07f 1414
2152f5ca
JQ
1415 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1416
1417 if (new_ram_size > old_ram_size) {
1ab4c8ce 1418 int i;
ae3a7047
MD
1419
1420 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1ab4c8ce
JQ
1421 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1422 ram_list.dirty_memory[i] =
1423 bitmap_zero_extend(ram_list.dirty_memory[i],
1424 old_ram_size, new_ram_size);
1425 }
2152f5ca 1426 }
9b8424d5
MT
1427 cpu_physical_memory_set_dirty_range(new_block->offset,
1428 new_block->used_length);
94a6b54f 1429
a904c911
PB
1430 if (new_block->host) {
1431 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1432 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1433 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1434 if (kvm_enabled()) {
1435 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1436 }
e1c57ab8 1437 }
6f0437e8 1438
94a6b54f
PB
1439 return new_block->offset;
1440}
e9a1ab19 1441
0b183fc8 1442#ifdef __linux__
e1c57ab8 1443ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1444 bool share, const char *mem_path,
7f56e740 1445 Error **errp)
e1c57ab8
PB
1446{
1447 RAMBlock *new_block;
ef701d7b
HT
1448 ram_addr_t addr;
1449 Error *local_err = NULL;
e1c57ab8
PB
1450
1451 if (xen_enabled()) {
7f56e740
PB
1452 error_setg(errp, "-mem-path not supported with Xen");
1453 return -1;
e1c57ab8
PB
1454 }
1455
1456 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1457 /*
1458 * file_ram_alloc() needs to allocate just like
1459 * phys_mem_alloc, but we haven't bothered to provide
1460 * a hook there.
1461 */
7f56e740
PB
1462 error_setg(errp,
1463 "-mem-path not supported with this accelerator");
1464 return -1;
e1c57ab8
PB
1465 }
1466
1467 size = TARGET_PAGE_ALIGN(size);
1468 new_block = g_malloc0(sizeof(*new_block));
1469 new_block->mr = mr;
9b8424d5
MT
1470 new_block->used_length = size;
1471 new_block->max_length = size;
dbcb8981 1472 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1473 new_block->host = file_ram_alloc(new_block, size,
1474 mem_path, errp);
1475 if (!new_block->host) {
1476 g_free(new_block);
1477 return -1;
1478 }
1479
ef701d7b
HT
1480 addr = ram_block_add(new_block, &local_err);
1481 if (local_err) {
1482 g_free(new_block);
1483 error_propagate(errp, local_err);
1484 return -1;
1485 }
1486 return addr;
e1c57ab8 1487}
0b183fc8 1488#endif
e1c57ab8 1489
62be4e3a
MT
1490static
1491ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1492 void (*resized)(const char*,
1493 uint64_t length,
1494 void *host),
1495 void *host, bool resizeable,
ef701d7b 1496 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1497{
1498 RAMBlock *new_block;
ef701d7b
HT
1499 ram_addr_t addr;
1500 Error *local_err = NULL;
e1c57ab8
PB
1501
1502 size = TARGET_PAGE_ALIGN(size);
62be4e3a 1503 max_size = TARGET_PAGE_ALIGN(max_size);
e1c57ab8
PB
1504 new_block = g_malloc0(sizeof(*new_block));
1505 new_block->mr = mr;
62be4e3a 1506 new_block->resized = resized;
9b8424d5
MT
1507 new_block->used_length = size;
1508 new_block->max_length = max_size;
62be4e3a 1509 assert(max_size >= size);
e1c57ab8
PB
1510 new_block->fd = -1;
1511 new_block->host = host;
1512 if (host) {
7bd4f430 1513 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1514 }
62be4e3a
MT
1515 if (resizeable) {
1516 new_block->flags |= RAM_RESIZEABLE;
1517 }
ef701d7b
HT
1518 addr = ram_block_add(new_block, &local_err);
1519 if (local_err) {
1520 g_free(new_block);
1521 error_propagate(errp, local_err);
1522 return -1;
1523 }
1524 return addr;
e1c57ab8
PB
1525}
1526
62be4e3a
MT
1527ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1528 MemoryRegion *mr, Error **errp)
1529{
1530 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1531}
1532
ef701d7b 1533ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1534{
62be4e3a
MT
1535 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1536}
1537
1538ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1539 void (*resized)(const char*,
1540 uint64_t length,
1541 void *host),
1542 MemoryRegion *mr, Error **errp)
1543{
1544 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
6977dfe6
YT
1545}
1546
1f2e98b6
AW
1547void qemu_ram_free_from_ptr(ram_addr_t addr)
1548{
1549 RAMBlock *block;
1550
b2a8658e 1551 qemu_mutex_lock_ramlist();
0dc3f44a 1552 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1f2e98b6 1553 if (addr == block->offset) {
0dc3f44a 1554 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1555 ram_list.mru_block = NULL;
0dc3f44a
MD
1556 /* Write list before version */
1557 smp_wmb();
f798b07f 1558 ram_list.version++;
43771539 1559 g_free_rcu(block, rcu);
b2a8658e 1560 break;
1f2e98b6
AW
1561 }
1562 }
b2a8658e 1563 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1564}
1565
43771539
PB
1566static void reclaim_ramblock(RAMBlock *block)
1567{
1568 if (block->flags & RAM_PREALLOC) {
1569 ;
1570 } else if (xen_enabled()) {
1571 xen_invalidate_map_cache_entry(block->host);
1572#ifndef _WIN32
1573 } else if (block->fd >= 0) {
1574 munmap(block->host, block->max_length);
1575 close(block->fd);
1576#endif
1577 } else {
1578 qemu_anon_ram_free(block->host, block->max_length);
1579 }
1580 g_free(block);
1581}
1582
c227f099 1583void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1584{
04b16653
AW
1585 RAMBlock *block;
1586
b2a8658e 1587 qemu_mutex_lock_ramlist();
0dc3f44a 1588 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
04b16653 1589 if (addr == block->offset) {
0dc3f44a 1590 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1591 ram_list.mru_block = NULL;
0dc3f44a
MD
1592 /* Write list before version */
1593 smp_wmb();
f798b07f 1594 ram_list.version++;
43771539 1595 call_rcu(block, reclaim_ramblock, rcu);
b2a8658e 1596 break;
04b16653
AW
1597 }
1598 }
b2a8658e 1599 qemu_mutex_unlock_ramlist();
e9a1ab19
FB
1600}
1601
cd19cfa2
HY
1602#ifndef _WIN32
1603void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1604{
1605 RAMBlock *block;
1606 ram_addr_t offset;
1607 int flags;
1608 void *area, *vaddr;
1609
0dc3f44a 1610 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
cd19cfa2 1611 offset = addr - block->offset;
9b8424d5 1612 if (offset < block->max_length) {
1240be24 1613 vaddr = ramblock_ptr(block, offset);
7bd4f430 1614 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1615 ;
dfeaf2ab
MA
1616 } else if (xen_enabled()) {
1617 abort();
cd19cfa2
HY
1618 } else {
1619 flags = MAP_FIXED;
3435f395 1620 if (block->fd >= 0) {
dbcb8981
PB
1621 flags |= (block->flags & RAM_SHARED ?
1622 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1623 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1624 flags, block->fd, offset);
cd19cfa2 1625 } else {
2eb9fbaa
MA
1626 /*
1627 * Remap needs to match alloc. Accelerators that
1628 * set phys_mem_alloc never remap. If they did,
1629 * we'd need a remap hook here.
1630 */
1631 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1632
cd19cfa2
HY
1633 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1634 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1635 flags, -1, 0);
cd19cfa2
HY
1636 }
1637 if (area != vaddr) {
f15fbc4b
AP
1638 fprintf(stderr, "Could not remap addr: "
1639 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1640 length, addr);
1641 exit(1);
1642 }
8490fc78 1643 memory_try_enable_merging(vaddr, length);
ddb97f1d 1644 qemu_ram_setup_dump(vaddr, length);
cd19cfa2 1645 }
cd19cfa2
HY
1646 }
1647 }
1648}
1649#endif /* !_WIN32 */
1650
a35ba7be
PB
1651int qemu_get_ram_fd(ram_addr_t addr)
1652{
ae3a7047
MD
1653 RAMBlock *block;
1654 int fd;
a35ba7be 1655
0dc3f44a 1656 rcu_read_lock();
ae3a7047
MD
1657 block = qemu_get_ram_block(addr);
1658 fd = block->fd;
0dc3f44a 1659 rcu_read_unlock();
ae3a7047 1660 return fd;
a35ba7be
PB
1661}
1662
3fd74b84
DM
1663void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1664{
ae3a7047
MD
1665 RAMBlock *block;
1666 void *ptr;
3fd74b84 1667
0dc3f44a 1668 rcu_read_lock();
ae3a7047
MD
1669 block = qemu_get_ram_block(addr);
1670 ptr = ramblock_ptr(block, 0);
0dc3f44a 1671 rcu_read_unlock();
ae3a7047 1672 return ptr;
3fd74b84
DM
1673}
1674
1b5ec234 1675/* Return a host pointer to ram allocated with qemu_ram_alloc.
ae3a7047
MD
1676 * This should not be used for general purpose DMA. Use address_space_map
1677 * or address_space_rw instead. For local memory (e.g. video ram) that the
1678 * device owns, use memory_region_get_ram_ptr.
0dc3f44a
MD
1679 *
1680 * By the time this function returns, the returned pointer is not protected
1681 * by RCU anymore. If the caller is not within an RCU critical section and
1682 * does not hold the iothread lock, it must have other means of protecting the
1683 * pointer, such as a reference to the region that includes the incoming
1684 * ram_addr_t.
1b5ec234
PB
1685 */
1686void *qemu_get_ram_ptr(ram_addr_t addr)
1687{
ae3a7047
MD
1688 RAMBlock *block;
1689 void *ptr;
1b5ec234 1690
0dc3f44a 1691 rcu_read_lock();
ae3a7047
MD
1692 block = qemu_get_ram_block(addr);
1693
1694 if (xen_enabled() && block->host == NULL) {
0d6d3c87
PB
1695 /* We need to check if the requested address is in the RAM
1696 * because we don't want to map the entire memory in QEMU.
1697 * In that case just map until the end of the page.
1698 */
1699 if (block->offset == 0) {
ae3a7047 1700 ptr = xen_map_cache(addr, 0, 0);
0dc3f44a 1701 goto unlock;
0d6d3c87 1702 }
ae3a7047
MD
1703
1704 block->host = xen_map_cache(block->offset, block->max_length, 1);
0d6d3c87 1705 }
ae3a7047
MD
1706 ptr = ramblock_ptr(block, addr - block->offset);
1707
0dc3f44a
MD
1708unlock:
1709 rcu_read_unlock();
ae3a7047 1710 return ptr;
dc828ca1
PB
1711}
1712
38bee5dc 1713/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
ae3a7047 1714 * but takes a size argument.
0dc3f44a
MD
1715 *
1716 * By the time this function returns, the returned pointer is not protected
1717 * by RCU anymore. If the caller is not within an RCU critical section and
1718 * does not hold the iothread lock, it must have other means of protecting the
1719 * pointer, such as a reference to the region that includes the incoming
1720 * ram_addr_t.
ae3a7047 1721 */
cb85f7ab 1722static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1723{
ae3a7047 1724 void *ptr;
8ab934f9
SS
1725 if (*size == 0) {
1726 return NULL;
1727 }
868bb33f 1728 if (xen_enabled()) {
e41d7c69 1729 return xen_map_cache(addr, *size, 1);
868bb33f 1730 } else {
38bee5dc 1731 RAMBlock *block;
0dc3f44a
MD
1732 rcu_read_lock();
1733 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5
MT
1734 if (addr - block->offset < block->max_length) {
1735 if (addr - block->offset + *size > block->max_length)
1736 *size = block->max_length - addr + block->offset;
ae3a7047 1737 ptr = ramblock_ptr(block, addr - block->offset);
0dc3f44a 1738 rcu_read_unlock();
ae3a7047 1739 return ptr;
38bee5dc
SS
1740 }
1741 }
1742
1743 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1744 abort();
38bee5dc
SS
1745 }
1746}
1747
7443b437 1748/* Some of the softmmu routines need to translate from a host pointer
ae3a7047
MD
1749 * (typically a TLB entry) back to a ram offset.
1750 *
1751 * By the time this function returns, the returned pointer is not protected
1752 * by RCU anymore. If the caller is not within an RCU critical section and
1753 * does not hold the iothread lock, it must have other means of protecting the
1754 * pointer, such as a reference to the region that includes the incoming
1755 * ram_addr_t.
1756 */
1b5ec234 1757MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1758{
94a6b54f
PB
1759 RAMBlock *block;
1760 uint8_t *host = ptr;
ae3a7047 1761 MemoryRegion *mr;
94a6b54f 1762
868bb33f 1763 if (xen_enabled()) {
0dc3f44a 1764 rcu_read_lock();
e41d7c69 1765 *ram_addr = xen_ram_addr_from_mapcache(ptr);
ae3a7047 1766 mr = qemu_get_ram_block(*ram_addr)->mr;
0dc3f44a 1767 rcu_read_unlock();
ae3a7047 1768 return mr;
712c2b41
SS
1769 }
1770
0dc3f44a
MD
1771 rcu_read_lock();
1772 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 1773 if (block && block->host && host - block->host < block->max_length) {
23887b79
PB
1774 goto found;
1775 }
1776
0dc3f44a 1777 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
432d268c
JN
1778 /* This case append when the block is not mapped. */
1779 if (block->host == NULL) {
1780 continue;
1781 }
9b8424d5 1782 if (host - block->host < block->max_length) {
23887b79 1783 goto found;
f471a17e 1784 }
94a6b54f 1785 }
432d268c 1786
0dc3f44a 1787 rcu_read_unlock();
1b5ec234 1788 return NULL;
23887b79
PB
1789
1790found:
1791 *ram_addr = block->offset + (host - block->host);
ae3a7047 1792 mr = block->mr;
0dc3f44a 1793 rcu_read_unlock();
ae3a7047 1794 return mr;
e890261f 1795}
f471a17e 1796
a8170e5e 1797static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1798 uint64_t val, unsigned size)
9fa3e853 1799{
52159192 1800 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1801 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1802 }
0e0df1e2
AK
1803 switch (size) {
1804 case 1:
1805 stb_p(qemu_get_ram_ptr(ram_addr), val);
1806 break;
1807 case 2:
1808 stw_p(qemu_get_ram_ptr(ram_addr), val);
1809 break;
1810 case 4:
1811 stl_p(qemu_get_ram_ptr(ram_addr), val);
1812 break;
1813 default:
1814 abort();
3a7d929e 1815 }
6886867e 1816 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
f23db169
FB
1817 /* we remove the notdirty callback only if the code has been
1818 flushed */
a2cd8c85 1819 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1820 CPUArchState *env = current_cpu->env_ptr;
93afeade 1821 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1822 }
9fa3e853
FB
1823}
1824
b018ddf6
PB
1825static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1826 unsigned size, bool is_write)
1827{
1828 return is_write;
1829}
1830
0e0df1e2 1831static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1832 .write = notdirty_mem_write,
b018ddf6 1833 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1834 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1835};
1836
0f459d16 1837/* Generate a debug exception if a watchpoint has been hit. */
66b9b43c 1838static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
0f459d16 1839{
93afeade
AF
1840 CPUState *cpu = current_cpu;
1841 CPUArchState *env = cpu->env_ptr;
06d55cc1 1842 target_ulong pc, cs_base;
0f459d16 1843 target_ulong vaddr;
a1d1bb31 1844 CPUWatchpoint *wp;
06d55cc1 1845 int cpu_flags;
0f459d16 1846
ff4700b0 1847 if (cpu->watchpoint_hit) {
06d55cc1
AL
1848 /* We re-entered the check after replacing the TB. Now raise
1849 * the debug interrupt so that is will trigger after the
1850 * current instruction. */
93afeade 1851 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1852 return;
1853 }
93afeade 1854 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1855 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
1856 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1857 && (wp->flags & flags)) {
08225676
PM
1858 if (flags == BP_MEM_READ) {
1859 wp->flags |= BP_WATCHPOINT_HIT_READ;
1860 } else {
1861 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1862 }
1863 wp->hitaddr = vaddr;
66b9b43c 1864 wp->hitattrs = attrs;
ff4700b0
AF
1865 if (!cpu->watchpoint_hit) {
1866 cpu->watchpoint_hit = wp;
239c51a5 1867 tb_check_watchpoint(cpu);
6e140f28 1868 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1869 cpu->exception_index = EXCP_DEBUG;
5638d180 1870 cpu_loop_exit(cpu);
6e140f28
AL
1871 } else {
1872 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1873 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1874 cpu_resume_from_signal(cpu, NULL);
6e140f28 1875 }
06d55cc1 1876 }
6e140f28
AL
1877 } else {
1878 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1879 }
1880 }
1881}
1882
6658ffb8
PB
1883/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1884 so these check for a hit then pass through to the normal out-of-line
1885 phys routines. */
66b9b43c
PM
1886static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1887 unsigned size, MemTxAttrs attrs)
6658ffb8 1888{
66b9b43c
PM
1889 MemTxResult res;
1890 uint64_t data;
1891
1892 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
1ec9b909 1893 switch (size) {
66b9b43c
PM
1894 case 1:
1895 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
1896 break;
1897 case 2:
1898 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
1899 break;
1900 case 4:
1901 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
1902 break;
1ec9b909
AK
1903 default: abort();
1904 }
66b9b43c
PM
1905 *pdata = data;
1906 return res;
6658ffb8
PB
1907}
1908
66b9b43c
PM
1909static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
1910 uint64_t val, unsigned size,
1911 MemTxAttrs attrs)
6658ffb8 1912{
66b9b43c
PM
1913 MemTxResult res;
1914
1915 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1ec9b909 1916 switch (size) {
67364150 1917 case 1:
66b9b43c 1918 address_space_stb(&address_space_memory, addr, val, attrs, &res);
67364150
MF
1919 break;
1920 case 2:
66b9b43c 1921 address_space_stw(&address_space_memory, addr, val, attrs, &res);
67364150
MF
1922 break;
1923 case 4:
66b9b43c 1924 address_space_stl(&address_space_memory, addr, val, attrs, &res);
67364150 1925 break;
1ec9b909
AK
1926 default: abort();
1927 }
66b9b43c 1928 return res;
6658ffb8
PB
1929}
1930
1ec9b909 1931static const MemoryRegionOps watch_mem_ops = {
66b9b43c
PM
1932 .read_with_attrs = watch_mem_read,
1933 .write_with_attrs = watch_mem_write,
1ec9b909 1934 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1935};
6658ffb8 1936
f25a49e0
PM
1937static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
1938 unsigned len, MemTxAttrs attrs)
db7b5426 1939{
acc9d80b 1940 subpage_t *subpage = opaque;
ff6cff75 1941 uint8_t buf[8];
5c9eb028 1942 MemTxResult res;
791af8c8 1943
db7b5426 1944#if defined(DEBUG_SUBPAGE)
016e9d62 1945 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1946 subpage, len, addr);
db7b5426 1947#endif
5c9eb028
PM
1948 res = address_space_read(subpage->as, addr + subpage->base,
1949 attrs, buf, len);
1950 if (res) {
1951 return res;
f25a49e0 1952 }
acc9d80b
JK
1953 switch (len) {
1954 case 1:
f25a49e0
PM
1955 *data = ldub_p(buf);
1956 return MEMTX_OK;
acc9d80b 1957 case 2:
f25a49e0
PM
1958 *data = lduw_p(buf);
1959 return MEMTX_OK;
acc9d80b 1960 case 4:
f25a49e0
PM
1961 *data = ldl_p(buf);
1962 return MEMTX_OK;
ff6cff75 1963 case 8:
f25a49e0
PM
1964 *data = ldq_p(buf);
1965 return MEMTX_OK;
acc9d80b
JK
1966 default:
1967 abort();
1968 }
db7b5426
BS
1969}
1970
f25a49e0
PM
1971static MemTxResult subpage_write(void *opaque, hwaddr addr,
1972 uint64_t value, unsigned len, MemTxAttrs attrs)
db7b5426 1973{
acc9d80b 1974 subpage_t *subpage = opaque;
ff6cff75 1975 uint8_t buf[8];
acc9d80b 1976
db7b5426 1977#if defined(DEBUG_SUBPAGE)
016e9d62 1978 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1979 " value %"PRIx64"\n",
1980 __func__, subpage, len, addr, value);
db7b5426 1981#endif
acc9d80b
JK
1982 switch (len) {
1983 case 1:
1984 stb_p(buf, value);
1985 break;
1986 case 2:
1987 stw_p(buf, value);
1988 break;
1989 case 4:
1990 stl_p(buf, value);
1991 break;
ff6cff75
PB
1992 case 8:
1993 stq_p(buf, value);
1994 break;
acc9d80b
JK
1995 default:
1996 abort();
1997 }
5c9eb028
PM
1998 return address_space_write(subpage->as, addr + subpage->base,
1999 attrs, buf, len);
db7b5426
BS
2000}
2001
c353e4cc 2002static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 2003 unsigned len, bool is_write)
c353e4cc 2004{
acc9d80b 2005 subpage_t *subpage = opaque;
c353e4cc 2006#if defined(DEBUG_SUBPAGE)
016e9d62 2007 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 2008 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
2009#endif
2010
acc9d80b 2011 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 2012 len, is_write);
c353e4cc
PB
2013}
2014
70c68e44 2015static const MemoryRegionOps subpage_ops = {
f25a49e0
PM
2016 .read_with_attrs = subpage_read,
2017 .write_with_attrs = subpage_write,
ff6cff75
PB
2018 .impl.min_access_size = 1,
2019 .impl.max_access_size = 8,
2020 .valid.min_access_size = 1,
2021 .valid.max_access_size = 8,
c353e4cc 2022 .valid.accepts = subpage_accepts,
70c68e44 2023 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
2024};
2025
c227f099 2026static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2027 uint16_t section)
db7b5426
BS
2028{
2029 int idx, eidx;
2030
2031 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2032 return -1;
2033 idx = SUBPAGE_IDX(start);
2034 eidx = SUBPAGE_IDX(end);
2035#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2036 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2037 __func__, mmio, start, end, idx, eidx, section);
db7b5426 2038#endif
db7b5426 2039 for (; idx <= eidx; idx++) {
5312bd8b 2040 mmio->sub_section[idx] = section;
db7b5426
BS
2041 }
2042
2043 return 0;
2044}
2045
acc9d80b 2046static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 2047{
c227f099 2048 subpage_t *mmio;
db7b5426 2049
7267c094 2050 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 2051
acc9d80b 2052 mmio->as = as;
1eec614b 2053 mmio->base = base;
2c9b15ca 2054 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 2055 NULL, TARGET_PAGE_SIZE);
b3b00c78 2056 mmio->iomem.subpage = true;
db7b5426 2057#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2058 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2059 mmio, base, TARGET_PAGE_SIZE);
db7b5426 2060#endif
b41aac4f 2061 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
2062
2063 return mmio;
2064}
2065
a656e22f
PC
2066static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2067 MemoryRegion *mr)
5312bd8b 2068{
a656e22f 2069 assert(as);
5312bd8b 2070 MemoryRegionSection section = {
a656e22f 2071 .address_space = as,
5312bd8b
AK
2072 .mr = mr,
2073 .offset_within_address_space = 0,
2074 .offset_within_region = 0,
052e87b0 2075 .size = int128_2_64(),
5312bd8b
AK
2076 };
2077
53cb28cb 2078 return phys_section_add(map, &section);
5312bd8b
AK
2079}
2080
9d82b5a7 2081MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
aa102231 2082{
79e2b9ae
PB
2083 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2084 MemoryRegionSection *sections = d->map.sections;
9d82b5a7
PB
2085
2086 return sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
2087}
2088
e9179ce1
AK
2089static void io_mem_init(void)
2090{
1f6245e5 2091 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 2092 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 2093 NULL, UINT64_MAX);
2c9b15ca 2094 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 2095 NULL, UINT64_MAX);
2c9b15ca 2096 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 2097 NULL, UINT64_MAX);
e9179ce1
AK
2098}
2099
ac1970fb 2100static void mem_begin(MemoryListener *listener)
00752703
PB
2101{
2102 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
2103 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2104 uint16_t n;
2105
a656e22f 2106 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 2107 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 2108 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 2109 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 2110 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 2111 assert(n == PHYS_SECTION_ROM);
a656e22f 2112 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 2113 assert(n == PHYS_SECTION_WATCH);
00752703 2114
9736e55b 2115 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
2116 d->as = as;
2117 as->next_dispatch = d;
2118}
2119
79e2b9ae
PB
2120static void address_space_dispatch_free(AddressSpaceDispatch *d)
2121{
2122 phys_sections_free(&d->map);
2123 g_free(d);
2124}
2125
00752703 2126static void mem_commit(MemoryListener *listener)
ac1970fb 2127{
89ae337a 2128 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
2129 AddressSpaceDispatch *cur = as->dispatch;
2130 AddressSpaceDispatch *next = as->next_dispatch;
2131
53cb28cb 2132 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 2133
79e2b9ae 2134 atomic_rcu_set(&as->dispatch, next);
53cb28cb 2135 if (cur) {
79e2b9ae 2136 call_rcu(cur, address_space_dispatch_free, rcu);
53cb28cb 2137 }
9affd6fc
PB
2138}
2139
1d71148e 2140static void tcg_commit(MemoryListener *listener)
50c1e149 2141{
182735ef 2142 CPUState *cpu;
117712c3
AK
2143
2144 /* since each CPU stores ram addresses in its TLB cache, we must
2145 reset the modified entries */
2146 /* XXX: slow ! */
bdc44640 2147 CPU_FOREACH(cpu) {
33bde2e1
EI
2148 /* FIXME: Disentangle the cpu.h circular files deps so we can
2149 directly get the right CPU from listener. */
2150 if (cpu->tcg_as_listener != listener) {
2151 continue;
2152 }
76e5c76f 2153 cpu_reload_memory_map(cpu);
117712c3 2154 }
50c1e149
AK
2155}
2156
ac1970fb
AK
2157void address_space_init_dispatch(AddressSpace *as)
2158{
00752703 2159 as->dispatch = NULL;
89ae337a 2160 as->dispatch_listener = (MemoryListener) {
ac1970fb 2161 .begin = mem_begin,
00752703 2162 .commit = mem_commit,
ac1970fb
AK
2163 .region_add = mem_add,
2164 .region_nop = mem_add,
2165 .priority = 0,
2166 };
89ae337a 2167 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
2168}
2169
6e48e8f9
PB
2170void address_space_unregister(AddressSpace *as)
2171{
2172 memory_listener_unregister(&as->dispatch_listener);
2173}
2174
83f3c251
AK
2175void address_space_destroy_dispatch(AddressSpace *as)
2176{
2177 AddressSpaceDispatch *d = as->dispatch;
2178
79e2b9ae
PB
2179 atomic_rcu_set(&as->dispatch, NULL);
2180 if (d) {
2181 call_rcu(d, address_space_dispatch_free, rcu);
2182 }
83f3c251
AK
2183}
2184
62152b8a
AK
2185static void memory_map_init(void)
2186{
7267c094 2187 system_memory = g_malloc(sizeof(*system_memory));
03f49957 2188
57271d63 2189 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2190 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2191
7267c094 2192 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2193 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2194 65536);
7dca8043 2195 address_space_init(&address_space_io, system_io, "I/O");
62152b8a
AK
2196}
2197
2198MemoryRegion *get_system_memory(void)
2199{
2200 return system_memory;
2201}
2202
309cb471
AK
2203MemoryRegion *get_system_io(void)
2204{
2205 return system_io;
2206}
2207
e2eef170
PB
2208#endif /* !defined(CONFIG_USER_ONLY) */
2209
13eb76e0
FB
2210/* physical memory access (slow version, mainly for debug) */
2211#if defined(CONFIG_USER_ONLY)
f17ec444 2212int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2213 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2214{
2215 int l, flags;
2216 target_ulong page;
53a5960a 2217 void * p;
13eb76e0
FB
2218
2219 while (len > 0) {
2220 page = addr & TARGET_PAGE_MASK;
2221 l = (page + TARGET_PAGE_SIZE) - addr;
2222 if (l > len)
2223 l = len;
2224 flags = page_get_flags(page);
2225 if (!(flags & PAGE_VALID))
a68fe89c 2226 return -1;
13eb76e0
FB
2227 if (is_write) {
2228 if (!(flags & PAGE_WRITE))
a68fe89c 2229 return -1;
579a97f7 2230 /* XXX: this code should not depend on lock_user */
72fb7daa 2231 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2232 return -1;
72fb7daa
AJ
2233 memcpy(p, buf, l);
2234 unlock_user(p, addr, l);
13eb76e0
FB
2235 } else {
2236 if (!(flags & PAGE_READ))
a68fe89c 2237 return -1;
579a97f7 2238 /* XXX: this code should not depend on lock_user */
72fb7daa 2239 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2240 return -1;
72fb7daa 2241 memcpy(buf, p, l);
5b257578 2242 unlock_user(p, addr, 0);
13eb76e0
FB
2243 }
2244 len -= l;
2245 buf += l;
2246 addr += l;
2247 }
a68fe89c 2248 return 0;
13eb76e0 2249}
8df1cd07 2250
13eb76e0 2251#else
51d7a9eb 2252
845b6214 2253static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
a8170e5e 2254 hwaddr length)
51d7a9eb 2255{
f874bf90 2256 if (cpu_physical_memory_range_includes_clean(addr, length)) {
845b6214
PB
2257 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2258 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
35865339 2259 tb_invalidate_phys_range(addr, addr + length);
845b6214
PB
2260 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2261 }
2262 if (dirty_log_mask) {
2263 cpu_physical_memory_set_dirty_range_nocode(addr, length);
2264 }
49dfcec4
PB
2265 } else {
2266 xen_modified_memory(addr, length);
51d7a9eb
AP
2267 }
2268}
2269
23326164 2270static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2271{
e1622f4b 2272 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2273
2274 /* Regions are assumed to support 1-4 byte accesses unless
2275 otherwise specified. */
23326164
RH
2276 if (access_size_max == 0) {
2277 access_size_max = 4;
2278 }
2279
2280 /* Bound the maximum access by the alignment of the address. */
2281 if (!mr->ops->impl.unaligned) {
2282 unsigned align_size_max = addr & -addr;
2283 if (align_size_max != 0 && align_size_max < access_size_max) {
2284 access_size_max = align_size_max;
2285 }
82f2563f 2286 }
23326164
RH
2287
2288 /* Don't attempt accesses larger than the maximum. */
2289 if (l > access_size_max) {
2290 l = access_size_max;
82f2563f 2291 }
098178f2
PB
2292 if (l & (l - 1)) {
2293 l = 1 << (qemu_fls(l) - 1);
2294 }
23326164
RH
2295
2296 return l;
82f2563f
PB
2297}
2298
5c9eb028
PM
2299MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2300 uint8_t *buf, int len, bool is_write)
13eb76e0 2301{
149f54b5 2302 hwaddr l;
13eb76e0 2303 uint8_t *ptr;
791af8c8 2304 uint64_t val;
149f54b5 2305 hwaddr addr1;
5c8a00ce 2306 MemoryRegion *mr;
3b643495 2307 MemTxResult result = MEMTX_OK;
3b46e624 2308
41063e1e 2309 rcu_read_lock();
13eb76e0 2310 while (len > 0) {
149f54b5 2311 l = len;
5c8a00ce 2312 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2313
13eb76e0 2314 if (is_write) {
5c8a00ce
PB
2315 if (!memory_access_is_direct(mr, is_write)) {
2316 l = memory_access_size(mr, l, addr1);
4917cf44 2317 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2318 potential bugs */
23326164
RH
2319 switch (l) {
2320 case 8:
2321 /* 64 bit write access */
2322 val = ldq_p(buf);
3b643495
PM
2323 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2324 attrs);
23326164
RH
2325 break;
2326 case 4:
1c213d19 2327 /* 32 bit write access */
c27004ec 2328 val = ldl_p(buf);
3b643495
PM
2329 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2330 attrs);
23326164
RH
2331 break;
2332 case 2:
1c213d19 2333 /* 16 bit write access */
c27004ec 2334 val = lduw_p(buf);
3b643495
PM
2335 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2336 attrs);
23326164
RH
2337 break;
2338 case 1:
1c213d19 2339 /* 8 bit write access */
c27004ec 2340 val = ldub_p(buf);
3b643495
PM
2341 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2342 attrs);
23326164
RH
2343 break;
2344 default:
2345 abort();
13eb76e0 2346 }
2bbfa05d 2347 } else {
5c8a00ce 2348 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2349 /* RAM case */
5579c7f3 2350 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2351 memcpy(ptr, buf, l);
845b6214 2352 invalidate_and_set_dirty(mr, addr1, l);
13eb76e0
FB
2353 }
2354 } else {
5c8a00ce 2355 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2356 /* I/O case */
5c8a00ce 2357 l = memory_access_size(mr, l, addr1);
23326164
RH
2358 switch (l) {
2359 case 8:
2360 /* 64 bit read access */
3b643495
PM
2361 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2362 attrs);
23326164
RH
2363 stq_p(buf, val);
2364 break;
2365 case 4:
13eb76e0 2366 /* 32 bit read access */
3b643495
PM
2367 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2368 attrs);
c27004ec 2369 stl_p(buf, val);
23326164
RH
2370 break;
2371 case 2:
13eb76e0 2372 /* 16 bit read access */
3b643495
PM
2373 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2374 attrs);
c27004ec 2375 stw_p(buf, val);
23326164
RH
2376 break;
2377 case 1:
1c213d19 2378 /* 8 bit read access */
3b643495
PM
2379 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2380 attrs);
c27004ec 2381 stb_p(buf, val);
23326164
RH
2382 break;
2383 default:
2384 abort();
13eb76e0
FB
2385 }
2386 } else {
2387 /* RAM case */
5c8a00ce 2388 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2389 memcpy(buf, ptr, l);
13eb76e0
FB
2390 }
2391 }
2392 len -= l;
2393 buf += l;
2394 addr += l;
2395 }
41063e1e 2396 rcu_read_unlock();
fd8aaa76 2397
3b643495 2398 return result;
13eb76e0 2399}
8df1cd07 2400
5c9eb028
PM
2401MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2402 const uint8_t *buf, int len)
ac1970fb 2403{
5c9eb028 2404 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
ac1970fb
AK
2405}
2406
5c9eb028
PM
2407MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2408 uint8_t *buf, int len)
ac1970fb 2409{
5c9eb028 2410 return address_space_rw(as, addr, attrs, buf, len, false);
ac1970fb
AK
2411}
2412
2413
a8170e5e 2414void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2415 int len, int is_write)
2416{
5c9eb028
PM
2417 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2418 buf, len, is_write);
ac1970fb
AK
2419}
2420
582b55a9
AG
2421enum write_rom_type {
2422 WRITE_DATA,
2423 FLUSH_CACHE,
2424};
2425
2a221651 2426static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2427 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2428{
149f54b5 2429 hwaddr l;
d0ecd2aa 2430 uint8_t *ptr;
149f54b5 2431 hwaddr addr1;
5c8a00ce 2432 MemoryRegion *mr;
3b46e624 2433
41063e1e 2434 rcu_read_lock();
d0ecd2aa 2435 while (len > 0) {
149f54b5 2436 l = len;
2a221651 2437 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2438
5c8a00ce
PB
2439 if (!(memory_region_is_ram(mr) ||
2440 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2441 /* do nothing */
2442 } else {
5c8a00ce 2443 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2444 /* ROM/RAM case */
5579c7f3 2445 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2446 switch (type) {
2447 case WRITE_DATA:
2448 memcpy(ptr, buf, l);
845b6214 2449 invalidate_and_set_dirty(mr, addr1, l);
582b55a9
AG
2450 break;
2451 case FLUSH_CACHE:
2452 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2453 break;
2454 }
d0ecd2aa
FB
2455 }
2456 len -= l;
2457 buf += l;
2458 addr += l;
2459 }
41063e1e 2460 rcu_read_unlock();
d0ecd2aa
FB
2461}
2462
582b55a9 2463/* used for ROM loading : can write in RAM and ROM */
2a221651 2464void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2465 const uint8_t *buf, int len)
2466{
2a221651 2467 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2468}
2469
2470void cpu_flush_icache_range(hwaddr start, int len)
2471{
2472 /*
2473 * This function should do the same thing as an icache flush that was
2474 * triggered from within the guest. For TCG we are always cache coherent,
2475 * so there is no need to flush anything. For KVM / Xen we need to flush
2476 * the host's instruction cache at least.
2477 */
2478 if (tcg_enabled()) {
2479 return;
2480 }
2481
2a221651
EI
2482 cpu_physical_memory_write_rom_internal(&address_space_memory,
2483 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2484}
2485
6d16c2f8 2486typedef struct {
d3e71559 2487 MemoryRegion *mr;
6d16c2f8 2488 void *buffer;
a8170e5e
AK
2489 hwaddr addr;
2490 hwaddr len;
c2cba0ff 2491 bool in_use;
6d16c2f8
AL
2492} BounceBuffer;
2493
2494static BounceBuffer bounce;
2495
ba223c29 2496typedef struct MapClient {
e95205e1 2497 QEMUBH *bh;
72cf2d4f 2498 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2499} MapClient;
2500
38e047b5 2501QemuMutex map_client_list_lock;
72cf2d4f
BS
2502static QLIST_HEAD(map_client_list, MapClient) map_client_list
2503 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29 2504
e95205e1
FZ
2505static void cpu_unregister_map_client_do(MapClient *client)
2506{
2507 QLIST_REMOVE(client, link);
2508 g_free(client);
2509}
2510
33b6c2ed
FZ
2511static void cpu_notify_map_clients_locked(void)
2512{
2513 MapClient *client;
2514
2515 while (!QLIST_EMPTY(&map_client_list)) {
2516 client = QLIST_FIRST(&map_client_list);
e95205e1
FZ
2517 qemu_bh_schedule(client->bh);
2518 cpu_unregister_map_client_do(client);
33b6c2ed
FZ
2519 }
2520}
2521
e95205e1 2522void cpu_register_map_client(QEMUBH *bh)
ba223c29 2523{
7267c094 2524 MapClient *client = g_malloc(sizeof(*client));
ba223c29 2525
38e047b5 2526 qemu_mutex_lock(&map_client_list_lock);
e95205e1 2527 client->bh = bh;
72cf2d4f 2528 QLIST_INSERT_HEAD(&map_client_list, client, link);
33b6c2ed
FZ
2529 if (!atomic_read(&bounce.in_use)) {
2530 cpu_notify_map_clients_locked();
2531 }
38e047b5 2532 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2533}
2534
38e047b5 2535void cpu_exec_init_all(void)
ba223c29 2536{
38e047b5
FZ
2537 qemu_mutex_init(&ram_list.mutex);
2538 memory_map_init();
2539 io_mem_init();
2540 qemu_mutex_init(&map_client_list_lock);
ba223c29
AL
2541}
2542
e95205e1 2543void cpu_unregister_map_client(QEMUBH *bh)
ba223c29
AL
2544{
2545 MapClient *client;
2546
e95205e1
FZ
2547 qemu_mutex_lock(&map_client_list_lock);
2548 QLIST_FOREACH(client, &map_client_list, link) {
2549 if (client->bh == bh) {
2550 cpu_unregister_map_client_do(client);
2551 break;
2552 }
ba223c29 2553 }
e95205e1 2554 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2555}
2556
2557static void cpu_notify_map_clients(void)
2558{
38e047b5 2559 qemu_mutex_lock(&map_client_list_lock);
33b6c2ed 2560 cpu_notify_map_clients_locked();
38e047b5 2561 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2562}
2563
51644ab7
PB
2564bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2565{
5c8a00ce 2566 MemoryRegion *mr;
51644ab7
PB
2567 hwaddr l, xlat;
2568
41063e1e 2569 rcu_read_lock();
51644ab7
PB
2570 while (len > 0) {
2571 l = len;
5c8a00ce
PB
2572 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2573 if (!memory_access_is_direct(mr, is_write)) {
2574 l = memory_access_size(mr, l, addr);
2575 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2576 return false;
2577 }
2578 }
2579
2580 len -= l;
2581 addr += l;
2582 }
41063e1e 2583 rcu_read_unlock();
51644ab7
PB
2584 return true;
2585}
2586
6d16c2f8
AL
2587/* Map a physical memory region into a host virtual address.
2588 * May map a subset of the requested range, given by and returned in *plen.
2589 * May return NULL if resources needed to perform the mapping are exhausted.
2590 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2591 * Use cpu_register_map_client() to know when retrying the map operation is
2592 * likely to succeed.
6d16c2f8 2593 */
ac1970fb 2594void *address_space_map(AddressSpace *as,
a8170e5e
AK
2595 hwaddr addr,
2596 hwaddr *plen,
ac1970fb 2597 bool is_write)
6d16c2f8 2598{
a8170e5e 2599 hwaddr len = *plen;
e3127ae0
PB
2600 hwaddr done = 0;
2601 hwaddr l, xlat, base;
2602 MemoryRegion *mr, *this_mr;
2603 ram_addr_t raddr;
6d16c2f8 2604
e3127ae0
PB
2605 if (len == 0) {
2606 return NULL;
2607 }
38bee5dc 2608
e3127ae0 2609 l = len;
41063e1e 2610 rcu_read_lock();
e3127ae0 2611 mr = address_space_translate(as, addr, &xlat, &l, is_write);
41063e1e 2612
e3127ae0 2613 if (!memory_access_is_direct(mr, is_write)) {
c2cba0ff 2614 if (atomic_xchg(&bounce.in_use, true)) {
41063e1e 2615 rcu_read_unlock();
e3127ae0 2616 return NULL;
6d16c2f8 2617 }
e85d9db5
KW
2618 /* Avoid unbounded allocations */
2619 l = MIN(l, TARGET_PAGE_SIZE);
2620 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2621 bounce.addr = addr;
2622 bounce.len = l;
d3e71559
PB
2623
2624 memory_region_ref(mr);
2625 bounce.mr = mr;
e3127ae0 2626 if (!is_write) {
5c9eb028
PM
2627 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2628 bounce.buffer, l);
8ab934f9 2629 }
6d16c2f8 2630
41063e1e 2631 rcu_read_unlock();
e3127ae0
PB
2632 *plen = l;
2633 return bounce.buffer;
2634 }
2635
2636 base = xlat;
2637 raddr = memory_region_get_ram_addr(mr);
2638
2639 for (;;) {
6d16c2f8
AL
2640 len -= l;
2641 addr += l;
e3127ae0
PB
2642 done += l;
2643 if (len == 0) {
2644 break;
2645 }
2646
2647 l = len;
2648 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2649 if (this_mr != mr || xlat != base + done) {
2650 break;
2651 }
6d16c2f8 2652 }
e3127ae0 2653
d3e71559 2654 memory_region_ref(mr);
41063e1e 2655 rcu_read_unlock();
e3127ae0
PB
2656 *plen = done;
2657 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2658}
2659
ac1970fb 2660/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2661 * Will also mark the memory as dirty if is_write == 1. access_len gives
2662 * the amount of memory that was actually read or written by the caller.
2663 */
a8170e5e
AK
2664void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2665 int is_write, hwaddr access_len)
6d16c2f8
AL
2666{
2667 if (buffer != bounce.buffer) {
d3e71559
PB
2668 MemoryRegion *mr;
2669 ram_addr_t addr1;
2670
2671 mr = qemu_ram_addr_from_host(buffer, &addr1);
2672 assert(mr != NULL);
6d16c2f8 2673 if (is_write) {
845b6214 2674 invalidate_and_set_dirty(mr, addr1, access_len);
6d16c2f8 2675 }
868bb33f 2676 if (xen_enabled()) {
e41d7c69 2677 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2678 }
d3e71559 2679 memory_region_unref(mr);
6d16c2f8
AL
2680 return;
2681 }
2682 if (is_write) {
5c9eb028
PM
2683 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2684 bounce.buffer, access_len);
6d16c2f8 2685 }
f8a83245 2686 qemu_vfree(bounce.buffer);
6d16c2f8 2687 bounce.buffer = NULL;
d3e71559 2688 memory_region_unref(bounce.mr);
c2cba0ff 2689 atomic_mb_set(&bounce.in_use, false);
ba223c29 2690 cpu_notify_map_clients();
6d16c2f8 2691}
d0ecd2aa 2692
a8170e5e
AK
2693void *cpu_physical_memory_map(hwaddr addr,
2694 hwaddr *plen,
ac1970fb
AK
2695 int is_write)
2696{
2697 return address_space_map(&address_space_memory, addr, plen, is_write);
2698}
2699
a8170e5e
AK
2700void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2701 int is_write, hwaddr access_len)
ac1970fb
AK
2702{
2703 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2704}
2705
8df1cd07 2706/* warning: addr must be aligned */
50013115
PM
2707static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2708 MemTxAttrs attrs,
2709 MemTxResult *result,
2710 enum device_endian endian)
8df1cd07 2711{
8df1cd07 2712 uint8_t *ptr;
791af8c8 2713 uint64_t val;
5c8a00ce 2714 MemoryRegion *mr;
149f54b5
PB
2715 hwaddr l = 4;
2716 hwaddr addr1;
50013115 2717 MemTxResult r;
8df1cd07 2718
41063e1e 2719 rcu_read_lock();
fdfba1a2 2720 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2721 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2722 /* I/O case */
50013115 2723 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
1e78bcc1
AG
2724#if defined(TARGET_WORDS_BIGENDIAN)
2725 if (endian == DEVICE_LITTLE_ENDIAN) {
2726 val = bswap32(val);
2727 }
2728#else
2729 if (endian == DEVICE_BIG_ENDIAN) {
2730 val = bswap32(val);
2731 }
2732#endif
8df1cd07
FB
2733 } else {
2734 /* RAM case */
5c8a00ce 2735 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2736 & TARGET_PAGE_MASK)
149f54b5 2737 + addr1);
1e78bcc1
AG
2738 switch (endian) {
2739 case DEVICE_LITTLE_ENDIAN:
2740 val = ldl_le_p(ptr);
2741 break;
2742 case DEVICE_BIG_ENDIAN:
2743 val = ldl_be_p(ptr);
2744 break;
2745 default:
2746 val = ldl_p(ptr);
2747 break;
2748 }
50013115
PM
2749 r = MEMTX_OK;
2750 }
2751 if (result) {
2752 *result = r;
8df1cd07 2753 }
41063e1e 2754 rcu_read_unlock();
8df1cd07
FB
2755 return val;
2756}
2757
50013115
PM
2758uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2759 MemTxAttrs attrs, MemTxResult *result)
2760{
2761 return address_space_ldl_internal(as, addr, attrs, result,
2762 DEVICE_NATIVE_ENDIAN);
2763}
2764
2765uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2766 MemTxAttrs attrs, MemTxResult *result)
2767{
2768 return address_space_ldl_internal(as, addr, attrs, result,
2769 DEVICE_LITTLE_ENDIAN);
2770}
2771
2772uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2773 MemTxAttrs attrs, MemTxResult *result)
2774{
2775 return address_space_ldl_internal(as, addr, attrs, result,
2776 DEVICE_BIG_ENDIAN);
2777}
2778
fdfba1a2 2779uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2780{
50013115 2781 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2782}
2783
fdfba1a2 2784uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2785{
50013115 2786 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2787}
2788
fdfba1a2 2789uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2790{
50013115 2791 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2792}
2793
84b7b8e7 2794/* warning: addr must be aligned */
50013115
PM
2795static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2796 MemTxAttrs attrs,
2797 MemTxResult *result,
2798 enum device_endian endian)
84b7b8e7 2799{
84b7b8e7
FB
2800 uint8_t *ptr;
2801 uint64_t val;
5c8a00ce 2802 MemoryRegion *mr;
149f54b5
PB
2803 hwaddr l = 8;
2804 hwaddr addr1;
50013115 2805 MemTxResult r;
84b7b8e7 2806
41063e1e 2807 rcu_read_lock();
2c17449b 2808 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2809 false);
2810 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2811 /* I/O case */
50013115 2812 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
968a5627
PB
2813#if defined(TARGET_WORDS_BIGENDIAN)
2814 if (endian == DEVICE_LITTLE_ENDIAN) {
2815 val = bswap64(val);
2816 }
2817#else
2818 if (endian == DEVICE_BIG_ENDIAN) {
2819 val = bswap64(val);
2820 }
84b7b8e7
FB
2821#endif
2822 } else {
2823 /* RAM case */
5c8a00ce 2824 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2825 & TARGET_PAGE_MASK)
149f54b5 2826 + addr1);
1e78bcc1
AG
2827 switch (endian) {
2828 case DEVICE_LITTLE_ENDIAN:
2829 val = ldq_le_p(ptr);
2830 break;
2831 case DEVICE_BIG_ENDIAN:
2832 val = ldq_be_p(ptr);
2833 break;
2834 default:
2835 val = ldq_p(ptr);
2836 break;
2837 }
50013115
PM
2838 r = MEMTX_OK;
2839 }
2840 if (result) {
2841 *result = r;
84b7b8e7 2842 }
41063e1e 2843 rcu_read_unlock();
84b7b8e7
FB
2844 return val;
2845}
2846
50013115
PM
2847uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2848 MemTxAttrs attrs, MemTxResult *result)
2849{
2850 return address_space_ldq_internal(as, addr, attrs, result,
2851 DEVICE_NATIVE_ENDIAN);
2852}
2853
2854uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
2855 MemTxAttrs attrs, MemTxResult *result)
2856{
2857 return address_space_ldq_internal(as, addr, attrs, result,
2858 DEVICE_LITTLE_ENDIAN);
2859}
2860
2861uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
2862 MemTxAttrs attrs, MemTxResult *result)
2863{
2864 return address_space_ldq_internal(as, addr, attrs, result,
2865 DEVICE_BIG_ENDIAN);
2866}
2867
2c17449b 2868uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2869{
50013115 2870 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2871}
2872
2c17449b 2873uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2874{
50013115 2875 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2876}
2877
2c17449b 2878uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2879{
50013115 2880 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2881}
2882
aab33094 2883/* XXX: optimize */
50013115
PM
2884uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
2885 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
2886{
2887 uint8_t val;
50013115
PM
2888 MemTxResult r;
2889
2890 r = address_space_rw(as, addr, attrs, &val, 1, 0);
2891 if (result) {
2892 *result = r;
2893 }
aab33094
FB
2894 return val;
2895}
2896
50013115
PM
2897uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2898{
2899 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
2900}
2901
733f0b02 2902/* warning: addr must be aligned */
50013115
PM
2903static inline uint32_t address_space_lduw_internal(AddressSpace *as,
2904 hwaddr addr,
2905 MemTxAttrs attrs,
2906 MemTxResult *result,
2907 enum device_endian endian)
aab33094 2908{
733f0b02
MT
2909 uint8_t *ptr;
2910 uint64_t val;
5c8a00ce 2911 MemoryRegion *mr;
149f54b5
PB
2912 hwaddr l = 2;
2913 hwaddr addr1;
50013115 2914 MemTxResult r;
733f0b02 2915
41063e1e 2916 rcu_read_lock();
41701aa4 2917 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2918 false);
2919 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2920 /* I/O case */
50013115 2921 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
1e78bcc1
AG
2922#if defined(TARGET_WORDS_BIGENDIAN)
2923 if (endian == DEVICE_LITTLE_ENDIAN) {
2924 val = bswap16(val);
2925 }
2926#else
2927 if (endian == DEVICE_BIG_ENDIAN) {
2928 val = bswap16(val);
2929 }
2930#endif
733f0b02
MT
2931 } else {
2932 /* RAM case */
5c8a00ce 2933 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2934 & TARGET_PAGE_MASK)
149f54b5 2935 + addr1);
1e78bcc1
AG
2936 switch (endian) {
2937 case DEVICE_LITTLE_ENDIAN:
2938 val = lduw_le_p(ptr);
2939 break;
2940 case DEVICE_BIG_ENDIAN:
2941 val = lduw_be_p(ptr);
2942 break;
2943 default:
2944 val = lduw_p(ptr);
2945 break;
2946 }
50013115
PM
2947 r = MEMTX_OK;
2948 }
2949 if (result) {
2950 *result = r;
733f0b02 2951 }
41063e1e 2952 rcu_read_unlock();
733f0b02 2953 return val;
aab33094
FB
2954}
2955
50013115
PM
2956uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
2957 MemTxAttrs attrs, MemTxResult *result)
2958{
2959 return address_space_lduw_internal(as, addr, attrs, result,
2960 DEVICE_NATIVE_ENDIAN);
2961}
2962
2963uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
2964 MemTxAttrs attrs, MemTxResult *result)
2965{
2966 return address_space_lduw_internal(as, addr, attrs, result,
2967 DEVICE_LITTLE_ENDIAN);
2968}
2969
2970uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
2971 MemTxAttrs attrs, MemTxResult *result)
2972{
2973 return address_space_lduw_internal(as, addr, attrs, result,
2974 DEVICE_BIG_ENDIAN);
2975}
2976
41701aa4 2977uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2978{
50013115 2979 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2980}
2981
41701aa4 2982uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2983{
50013115 2984 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2985}
2986
41701aa4 2987uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2988{
50013115 2989 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2990}
2991
8df1cd07
FB
2992/* warning: addr must be aligned. The ram page is not masked as dirty
2993 and the code inside is not invalidated. It is useful if the dirty
2994 bits are used to track modified PTEs */
50013115
PM
2995void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
2996 MemTxAttrs attrs, MemTxResult *result)
8df1cd07 2997{
8df1cd07 2998 uint8_t *ptr;
5c8a00ce 2999 MemoryRegion *mr;
149f54b5
PB
3000 hwaddr l = 4;
3001 hwaddr addr1;
50013115 3002 MemTxResult r;
845b6214 3003 uint8_t dirty_log_mask;
8df1cd07 3004
41063e1e 3005 rcu_read_lock();
2198a121 3006 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3007 true);
3008 if (l < 4 || !memory_access_is_direct(mr, true)) {
50013115 3009 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3010 } else {
5c8a00ce 3011 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3012 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3013 stl_p(ptr, val);
74576198 3014
845b6214
PB
3015 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3016 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
3017 if (dirty_log_mask) {
3018 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
74576198 3019 }
50013115
PM
3020 r = MEMTX_OK;
3021 }
3022 if (result) {
3023 *result = r;
8df1cd07 3024 }
41063e1e 3025 rcu_read_unlock();
8df1cd07
FB
3026}
3027
50013115
PM
3028void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3029{
3030 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3031}
3032
8df1cd07 3033/* warning: addr must be aligned */
50013115
PM
3034static inline void address_space_stl_internal(AddressSpace *as,
3035 hwaddr addr, uint32_t val,
3036 MemTxAttrs attrs,
3037 MemTxResult *result,
3038 enum device_endian endian)
8df1cd07 3039{
8df1cd07 3040 uint8_t *ptr;
5c8a00ce 3041 MemoryRegion *mr;
149f54b5
PB
3042 hwaddr l = 4;
3043 hwaddr addr1;
50013115 3044 MemTxResult r;
8df1cd07 3045
41063e1e 3046 rcu_read_lock();
ab1da857 3047 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3048 true);
3049 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
3050#if defined(TARGET_WORDS_BIGENDIAN)
3051 if (endian == DEVICE_LITTLE_ENDIAN) {
3052 val = bswap32(val);
3053 }
3054#else
3055 if (endian == DEVICE_BIG_ENDIAN) {
3056 val = bswap32(val);
3057 }
3058#endif
50013115 3059 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3060 } else {
8df1cd07 3061 /* RAM case */
5c8a00ce 3062 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3063 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3064 switch (endian) {
3065 case DEVICE_LITTLE_ENDIAN:
3066 stl_le_p(ptr, val);
3067 break;
3068 case DEVICE_BIG_ENDIAN:
3069 stl_be_p(ptr, val);
3070 break;
3071 default:
3072 stl_p(ptr, val);
3073 break;
3074 }
845b6214 3075 invalidate_and_set_dirty(mr, addr1, 4);
50013115
PM
3076 r = MEMTX_OK;
3077 }
3078 if (result) {
3079 *result = r;
8df1cd07 3080 }
41063e1e 3081 rcu_read_unlock();
8df1cd07
FB
3082}
3083
50013115
PM
3084void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3085 MemTxAttrs attrs, MemTxResult *result)
3086{
3087 address_space_stl_internal(as, addr, val, attrs, result,
3088 DEVICE_NATIVE_ENDIAN);
3089}
3090
3091void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3092 MemTxAttrs attrs, MemTxResult *result)
3093{
3094 address_space_stl_internal(as, addr, val, attrs, result,
3095 DEVICE_LITTLE_ENDIAN);
3096}
3097
3098void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3099 MemTxAttrs attrs, MemTxResult *result)
3100{
3101 address_space_stl_internal(as, addr, val, attrs, result,
3102 DEVICE_BIG_ENDIAN);
3103}
3104
ab1da857 3105void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3106{
50013115 3107 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3108}
3109
ab1da857 3110void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3111{
50013115 3112 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3113}
3114
ab1da857 3115void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3116{
50013115 3117 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3118}
3119
aab33094 3120/* XXX: optimize */
50013115
PM
3121void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3122 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
3123{
3124 uint8_t v = val;
50013115
PM
3125 MemTxResult r;
3126
3127 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3128 if (result) {
3129 *result = r;
3130 }
3131}
3132
3133void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3134{
3135 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
aab33094
FB
3136}
3137
733f0b02 3138/* warning: addr must be aligned */
50013115
PM
3139static inline void address_space_stw_internal(AddressSpace *as,
3140 hwaddr addr, uint32_t val,
3141 MemTxAttrs attrs,
3142 MemTxResult *result,
3143 enum device_endian endian)
aab33094 3144{
733f0b02 3145 uint8_t *ptr;
5c8a00ce 3146 MemoryRegion *mr;
149f54b5
PB
3147 hwaddr l = 2;
3148 hwaddr addr1;
50013115 3149 MemTxResult r;
733f0b02 3150
41063e1e 3151 rcu_read_lock();
5ce5944d 3152 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 3153 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
3154#if defined(TARGET_WORDS_BIGENDIAN)
3155 if (endian == DEVICE_LITTLE_ENDIAN) {
3156 val = bswap16(val);
3157 }
3158#else
3159 if (endian == DEVICE_BIG_ENDIAN) {
3160 val = bswap16(val);
3161 }
3162#endif
50013115 3163 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
733f0b02 3164 } else {
733f0b02 3165 /* RAM case */
5c8a00ce 3166 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 3167 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3168 switch (endian) {
3169 case DEVICE_LITTLE_ENDIAN:
3170 stw_le_p(ptr, val);
3171 break;
3172 case DEVICE_BIG_ENDIAN:
3173 stw_be_p(ptr, val);
3174 break;
3175 default:
3176 stw_p(ptr, val);
3177 break;
3178 }
845b6214 3179 invalidate_and_set_dirty(mr, addr1, 2);
50013115
PM
3180 r = MEMTX_OK;
3181 }
3182 if (result) {
3183 *result = r;
733f0b02 3184 }
41063e1e 3185 rcu_read_unlock();
aab33094
FB
3186}
3187
50013115
PM
3188void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3189 MemTxAttrs attrs, MemTxResult *result)
3190{
3191 address_space_stw_internal(as, addr, val, attrs, result,
3192 DEVICE_NATIVE_ENDIAN);
3193}
3194
3195void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3196 MemTxAttrs attrs, MemTxResult *result)
3197{
3198 address_space_stw_internal(as, addr, val, attrs, result,
3199 DEVICE_LITTLE_ENDIAN);
3200}
3201
3202void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3203 MemTxAttrs attrs, MemTxResult *result)
3204{
3205 address_space_stw_internal(as, addr, val, attrs, result,
3206 DEVICE_BIG_ENDIAN);
3207}
3208
5ce5944d 3209void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3210{
50013115 3211 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3212}
3213
5ce5944d 3214void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3215{
50013115 3216 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3217}
3218
5ce5944d 3219void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3220{
50013115 3221 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3222}
3223
aab33094 3224/* XXX: optimize */
50013115
PM
3225void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3226 MemTxAttrs attrs, MemTxResult *result)
aab33094 3227{
50013115 3228 MemTxResult r;
aab33094 3229 val = tswap64(val);
50013115
PM
3230 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3231 if (result) {
3232 *result = r;
3233 }
aab33094
FB
3234}
3235
50013115
PM
3236void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3237 MemTxAttrs attrs, MemTxResult *result)
1e78bcc1 3238{
50013115 3239 MemTxResult r;
1e78bcc1 3240 val = cpu_to_le64(val);
50013115
PM
3241 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3242 if (result) {
3243 *result = r;
3244 }
3245}
3246void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3247 MemTxAttrs attrs, MemTxResult *result)
3248{
3249 MemTxResult r;
3250 val = cpu_to_be64(val);
3251 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3252 if (result) {
3253 *result = r;
3254 }
3255}
3256
3257void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3258{
3259 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3260}
3261
3262void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3263{
3264 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3265}
3266
f606604f 3267void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1 3268{
50013115 3269 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3270}
3271
5e2972fd 3272/* virtual memory access for debug (includes writing to ROM) */
f17ec444 3273int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 3274 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3275{
3276 int l;
a8170e5e 3277 hwaddr phys_addr;
9b3c35e0 3278 target_ulong page;
13eb76e0
FB
3279
3280 while (len > 0) {
3281 page = addr & TARGET_PAGE_MASK;
f17ec444 3282 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
3283 /* if no physical page mapped, return an error */
3284 if (phys_addr == -1)
3285 return -1;
3286 l = (page + TARGET_PAGE_SIZE) - addr;
3287 if (l > len)
3288 l = len;
5e2972fd 3289 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
3290 if (is_write) {
3291 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3292 } else {
5c9eb028
PM
3293 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3294 buf, l, 0);
2e38847b 3295 }
13eb76e0
FB
3296 len -= l;
3297 buf += l;
3298 addr += l;
3299 }
3300 return 0;
3301}
a68fe89c 3302#endif
13eb76e0 3303
8e4a424b
BS
3304/*
3305 * A helper function for the _utterly broken_ virtio device model to find out if
3306 * it's running on a big endian machine. Don't do this at home kids!
3307 */
98ed8ecf
GK
3308bool target_words_bigendian(void);
3309bool target_words_bigendian(void)
8e4a424b
BS
3310{
3311#if defined(TARGET_WORDS_BIGENDIAN)
3312 return true;
3313#else
3314 return false;
3315#endif
3316}
3317
76f35538 3318#ifndef CONFIG_USER_ONLY
a8170e5e 3319bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 3320{
5c8a00ce 3321 MemoryRegion*mr;
149f54b5 3322 hwaddr l = 1;
41063e1e 3323 bool res;
76f35538 3324
41063e1e 3325 rcu_read_lock();
5c8a00ce
PB
3326 mr = address_space_translate(&address_space_memory,
3327 phys_addr, &phys_addr, &l, false);
76f35538 3328
41063e1e
PB
3329 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3330 rcu_read_unlock();
3331 return res;
76f35538 3332}
bd2fa51f
MH
3333
3334void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3335{
3336 RAMBlock *block;
3337
0dc3f44a
MD
3338 rcu_read_lock();
3339 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5 3340 func(block->host, block->offset, block->used_length, opaque);
bd2fa51f 3341 }
0dc3f44a 3342 rcu_read_unlock();
bd2fa51f 3343}
ec3f8c99 3344#endif
This page took 1.453729 seconds and 4 git commands to generate.