]> Git Repo - qemu.git/blame - exec.c
Merge remote-tracking branch 'remotes/lalrae/tags/mips-20150612' into staging
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
4485bd26 29#if !defined(CONFIG_USER_ONLY)
47c8ca53 30#include "hw/boards.h"
4485bd26 31#endif
cc9e98cb 32#include "hw/qdev.h"
1de7afc9 33#include "qemu/osdep.h"
9c17d615 34#include "sysemu/kvm.h"
2ff3de68 35#include "sysemu/sysemu.h"
0d09e41a 36#include "hw/xen/xen.h"
1de7afc9
PB
37#include "qemu/timer.h"
38#include "qemu/config-file.h"
75a34036 39#include "qemu/error-report.h"
022c62cb 40#include "exec/memory.h"
9c17d615 41#include "sysemu/dma.h"
022c62cb 42#include "exec/address-spaces.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
432d268c 45#else /* !CONFIG_USER_ONLY */
9c17d615 46#include "sysemu/xen-mapcache.h"
6506e4f9 47#include "trace.h"
53a5960a 48#endif
0d6d3c87 49#include "exec/cpu-all.h"
0dc3f44a 50#include "qemu/rcu_queue.h"
022c62cb 51#include "exec/cputlb.h"
5b6dd868 52#include "translate-all.h"
0cac1b66 53
022c62cb 54#include "exec/memory-internal.h"
220c3ebd 55#include "exec/ram_addr.h"
67d95c15 56
b35ba30f
MT
57#include "qemu/range.h"
58
db7b5426 59//#define DEBUG_SUBPAGE
1196be37 60
e2eef170 61#if !defined(CONFIG_USER_ONLY)
0dc3f44a
MD
62/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
63 * are protected by the ramlist lock.
64 */
0d53d9fe 65RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
66
67static MemoryRegion *system_memory;
309cb471 68static MemoryRegion *system_io;
62152b8a 69
f6790af6
AK
70AddressSpace address_space_io;
71AddressSpace address_space_memory;
2673a5da 72
0844e007 73MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 74static MemoryRegion io_mem_unassigned;
0e0df1e2 75
7bd4f430
PB
76/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
77#define RAM_PREALLOC (1 << 0)
78
dbcb8981
PB
79/* RAM is mmap-ed with MAP_SHARED */
80#define RAM_SHARED (1 << 1)
81
62be4e3a
MT
82/* Only a portion of RAM (used_length) is actually used, and migrated.
83 * This used_length size can change across reboots.
84 */
85#define RAM_RESIZEABLE (1 << 2)
86
e2eef170 87#endif
9fa3e853 88
bdc44640 89struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
90/* current CPU in the current thread. It is only valid inside
91 cpu_exec() */
4917cf44 92DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 93/* 0 = Do not count executed instructions.
bf20dc07 94 1 = Precise instruction counting.
2e70f6ef 95 2 = Adaptive rate instruction counting. */
5708fc66 96int use_icount;
6a00d601 97
e2eef170 98#if !defined(CONFIG_USER_ONLY)
4346ae3e 99
1db8abb1
PB
100typedef struct PhysPageEntry PhysPageEntry;
101
102struct PhysPageEntry {
9736e55b 103 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 104 uint32_t skip : 6;
9736e55b 105 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 106 uint32_t ptr : 26;
1db8abb1
PB
107};
108
8b795765
MT
109#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
110
03f49957 111/* Size of the L2 (and L3, etc) page tables. */
57271d63 112#define ADDR_SPACE_BITS 64
03f49957 113
026736ce 114#define P_L2_BITS 9
03f49957
PB
115#define P_L2_SIZE (1 << P_L2_BITS)
116
117#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
118
119typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 120
53cb28cb 121typedef struct PhysPageMap {
79e2b9ae
PB
122 struct rcu_head rcu;
123
53cb28cb
MA
124 unsigned sections_nb;
125 unsigned sections_nb_alloc;
126 unsigned nodes_nb;
127 unsigned nodes_nb_alloc;
128 Node *nodes;
129 MemoryRegionSection *sections;
130} PhysPageMap;
131
1db8abb1 132struct AddressSpaceDispatch {
79e2b9ae
PB
133 struct rcu_head rcu;
134
1db8abb1
PB
135 /* This is a multi-level map on the physical address space.
136 * The bottom level has pointers to MemoryRegionSections.
137 */
138 PhysPageEntry phys_map;
53cb28cb 139 PhysPageMap map;
acc9d80b 140 AddressSpace *as;
1db8abb1
PB
141};
142
90260c6c
JK
143#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
144typedef struct subpage_t {
145 MemoryRegion iomem;
acc9d80b 146 AddressSpace *as;
90260c6c
JK
147 hwaddr base;
148 uint16_t sub_section[TARGET_PAGE_SIZE];
149} subpage_t;
150
b41aac4f
LPF
151#define PHYS_SECTION_UNASSIGNED 0
152#define PHYS_SECTION_NOTDIRTY 1
153#define PHYS_SECTION_ROM 2
154#define PHYS_SECTION_WATCH 3
5312bd8b 155
e2eef170 156static void io_mem_init(void);
62152b8a 157static void memory_map_init(void);
09daed84 158static void tcg_commit(MemoryListener *listener);
e2eef170 159
1ec9b909 160static MemoryRegion io_mem_watch;
6658ffb8 161#endif
fd6ce8f6 162
6d9a1304 163#if !defined(CONFIG_USER_ONLY)
d6f2ea22 164
53cb28cb 165static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 166{
53cb28cb
MA
167 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
168 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
169 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
170 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 171 }
f7bf5461
AK
172}
173
db94604b 174static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
f7bf5461
AK
175{
176 unsigned i;
8b795765 177 uint32_t ret;
db94604b
PB
178 PhysPageEntry e;
179 PhysPageEntry *p;
f7bf5461 180
53cb28cb 181 ret = map->nodes_nb++;
db94604b 182 p = map->nodes[ret];
f7bf5461 183 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 184 assert(ret != map->nodes_nb_alloc);
db94604b
PB
185
186 e.skip = leaf ? 0 : 1;
187 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
03f49957 188 for (i = 0; i < P_L2_SIZE; ++i) {
db94604b 189 memcpy(&p[i], &e, sizeof(e));
d6f2ea22 190 }
f7bf5461 191 return ret;
d6f2ea22
AK
192}
193
53cb28cb
MA
194static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
195 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 196 int level)
f7bf5461
AK
197{
198 PhysPageEntry *p;
03f49957 199 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 200
9736e55b 201 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
db94604b 202 lp->ptr = phys_map_node_alloc(map, level == 0);
92e873b9 203 }
db94604b 204 p = map->nodes[lp->ptr];
03f49957 205 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 206
03f49957 207 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 208 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 209 lp->skip = 0;
c19e8800 210 lp->ptr = leaf;
07f07b31
AK
211 *index += step;
212 *nb -= step;
2999097b 213 } else {
53cb28cb 214 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
215 }
216 ++lp;
f7bf5461
AK
217 }
218}
219
ac1970fb 220static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 221 hwaddr index, hwaddr nb,
2999097b 222 uint16_t leaf)
f7bf5461 223{
2999097b 224 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 225 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 226
53cb28cb 227 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
228}
229
b35ba30f
MT
230/* Compact a non leaf page entry. Simply detect that the entry has a single child,
231 * and update our entry so we can skip it and go directly to the destination.
232 */
233static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
234{
235 unsigned valid_ptr = P_L2_SIZE;
236 int valid = 0;
237 PhysPageEntry *p;
238 int i;
239
240 if (lp->ptr == PHYS_MAP_NODE_NIL) {
241 return;
242 }
243
244 p = nodes[lp->ptr];
245 for (i = 0; i < P_L2_SIZE; i++) {
246 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
247 continue;
248 }
249
250 valid_ptr = i;
251 valid++;
252 if (p[i].skip) {
253 phys_page_compact(&p[i], nodes, compacted);
254 }
255 }
256
257 /* We can only compress if there's only one child. */
258 if (valid != 1) {
259 return;
260 }
261
262 assert(valid_ptr < P_L2_SIZE);
263
264 /* Don't compress if it won't fit in the # of bits we have. */
265 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
266 return;
267 }
268
269 lp->ptr = p[valid_ptr].ptr;
270 if (!p[valid_ptr].skip) {
271 /* If our only child is a leaf, make this a leaf. */
272 /* By design, we should have made this node a leaf to begin with so we
273 * should never reach here.
274 * But since it's so simple to handle this, let's do it just in case we
275 * change this rule.
276 */
277 lp->skip = 0;
278 } else {
279 lp->skip += p[valid_ptr].skip;
280 }
281}
282
283static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
284{
285 DECLARE_BITMAP(compacted, nodes_nb);
286
287 if (d->phys_map.skip) {
53cb28cb 288 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
289 }
290}
291
97115a8d 292static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 293 Node *nodes, MemoryRegionSection *sections)
92e873b9 294{
31ab2b4a 295 PhysPageEntry *p;
97115a8d 296 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 297 int i;
f1f6e3b8 298
9736e55b 299 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 300 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 301 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 302 }
9affd6fc 303 p = nodes[lp.ptr];
03f49957 304 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 305 }
b35ba30f
MT
306
307 if (sections[lp.ptr].size.hi ||
308 range_covers_byte(sections[lp.ptr].offset_within_address_space,
309 sections[lp.ptr].size.lo, addr)) {
310 return &sections[lp.ptr];
311 } else {
312 return &sections[PHYS_SECTION_UNASSIGNED];
313 }
f3705d53
AK
314}
315
e5548617
BS
316bool memory_region_is_unassigned(MemoryRegion *mr)
317{
2a8e7499 318 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 319 && mr != &io_mem_watch;
fd6ce8f6 320}
149f54b5 321
79e2b9ae 322/* Called from RCU critical section */
c7086b4a 323static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
324 hwaddr addr,
325 bool resolve_subpage)
9f029603 326{
90260c6c
JK
327 MemoryRegionSection *section;
328 subpage_t *subpage;
329
53cb28cb 330 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
331 if (resolve_subpage && section->mr->subpage) {
332 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 333 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
334 }
335 return section;
9f029603
JK
336}
337
79e2b9ae 338/* Called from RCU critical section */
90260c6c 339static MemoryRegionSection *
c7086b4a 340address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 341 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
342{
343 MemoryRegionSection *section;
a87f3954 344 Int128 diff;
149f54b5 345
c7086b4a 346 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
347 /* Compute offset within MemoryRegionSection */
348 addr -= section->offset_within_address_space;
349
350 /* Compute offset within MemoryRegion */
351 *xlat = addr + section->offset_within_region;
352
353 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 354 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
355 return section;
356}
90260c6c 357
a87f3954
PB
358static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
359{
360 if (memory_region_is_ram(mr)) {
361 return !(is_write && mr->readonly);
362 }
363 if (memory_region_is_romd(mr)) {
364 return !is_write;
365 }
366
367 return false;
368}
369
41063e1e 370/* Called from RCU critical section */
5c8a00ce
PB
371MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
372 hwaddr *xlat, hwaddr *plen,
373 bool is_write)
90260c6c 374{
30951157
AK
375 IOMMUTLBEntry iotlb;
376 MemoryRegionSection *section;
377 MemoryRegion *mr;
30951157
AK
378
379 for (;;) {
79e2b9ae
PB
380 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
381 section = address_space_translate_internal(d, addr, &addr, plen, true);
30951157
AK
382 mr = section->mr;
383
384 if (!mr->iommu_ops) {
385 break;
386 }
387
8d7b8cb9 388 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
389 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
390 | (addr & iotlb.addr_mask));
23820dbf 391 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
30951157
AK
392 if (!(iotlb.perm & (1 << is_write))) {
393 mr = &io_mem_unassigned;
394 break;
395 }
396
397 as = iotlb.target_as;
398 }
399
fe680d0d 400 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954 401 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
23820dbf 402 *plen = MIN(page, *plen);
a87f3954
PB
403 }
404
30951157
AK
405 *xlat = addr;
406 return mr;
90260c6c
JK
407}
408
79e2b9ae 409/* Called from RCU critical section */
90260c6c 410MemoryRegionSection *
9d82b5a7
PB
411address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
412 hwaddr *xlat, hwaddr *plen)
90260c6c 413{
30951157 414 MemoryRegionSection *section;
9d82b5a7
PB
415 section = address_space_translate_internal(cpu->memory_dispatch,
416 addr, xlat, plen, false);
30951157
AK
417
418 assert(!section->mr->iommu_ops);
419 return section;
90260c6c 420}
5b6dd868 421#endif
fd6ce8f6 422
b170fce3 423#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
424
425static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 426{
259186a7 427 CPUState *cpu = opaque;
a513fe19 428
5b6dd868
BS
429 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
430 version_id is increased. */
259186a7 431 cpu->interrupt_request &= ~0x01;
c01a71c1 432 tlb_flush(cpu, 1);
5b6dd868
BS
433
434 return 0;
a513fe19 435}
7501267e 436
6c3bff0e
PD
437static int cpu_common_pre_load(void *opaque)
438{
439 CPUState *cpu = opaque;
440
adee6424 441 cpu->exception_index = -1;
6c3bff0e
PD
442
443 return 0;
444}
445
446static bool cpu_common_exception_index_needed(void *opaque)
447{
448 CPUState *cpu = opaque;
449
adee6424 450 return tcg_enabled() && cpu->exception_index != -1;
6c3bff0e
PD
451}
452
453static const VMStateDescription vmstate_cpu_common_exception_index = {
454 .name = "cpu_common/exception_index",
455 .version_id = 1,
456 .minimum_version_id = 1,
5cd8cada 457 .needed = cpu_common_exception_index_needed,
6c3bff0e
PD
458 .fields = (VMStateField[]) {
459 VMSTATE_INT32(exception_index, CPUState),
460 VMSTATE_END_OF_LIST()
461 }
462};
463
1a1562f5 464const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
465 .name = "cpu_common",
466 .version_id = 1,
467 .minimum_version_id = 1,
6c3bff0e 468 .pre_load = cpu_common_pre_load,
5b6dd868 469 .post_load = cpu_common_post_load,
35d08458 470 .fields = (VMStateField[]) {
259186a7
AF
471 VMSTATE_UINT32(halted, CPUState),
472 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 473 VMSTATE_END_OF_LIST()
6c3bff0e 474 },
5cd8cada
JQ
475 .subsections = (const VMStateDescription*[]) {
476 &vmstate_cpu_common_exception_index,
477 NULL
5b6dd868
BS
478 }
479};
1a1562f5 480
5b6dd868 481#endif
ea041c0e 482
38d8f5c8 483CPUState *qemu_get_cpu(int index)
ea041c0e 484{
bdc44640 485 CPUState *cpu;
ea041c0e 486
bdc44640 487 CPU_FOREACH(cpu) {
55e5c285 488 if (cpu->cpu_index == index) {
bdc44640 489 return cpu;
55e5c285 490 }
ea041c0e 491 }
5b6dd868 492
bdc44640 493 return NULL;
ea041c0e
FB
494}
495
09daed84
EI
496#if !defined(CONFIG_USER_ONLY)
497void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
498{
499 /* We only support one address space per cpu at the moment. */
500 assert(cpu->as == as);
501
502 if (cpu->tcg_as_listener) {
503 memory_listener_unregister(cpu->tcg_as_listener);
504 } else {
505 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
506 }
507 cpu->tcg_as_listener->commit = tcg_commit;
508 memory_listener_register(cpu->tcg_as_listener, as);
509}
510#endif
511
5b6dd868 512void cpu_exec_init(CPUArchState *env)
ea041c0e 513{
5b6dd868 514 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 515 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 516 CPUState *some_cpu;
5b6dd868
BS
517 int cpu_index;
518
519#if defined(CONFIG_USER_ONLY)
520 cpu_list_lock();
521#endif
5b6dd868 522 cpu_index = 0;
bdc44640 523 CPU_FOREACH(some_cpu) {
5b6dd868
BS
524 cpu_index++;
525 }
55e5c285 526 cpu->cpu_index = cpu_index;
1b1ed8dc 527 cpu->numa_node = 0;
f0c3c505 528 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 529 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 530#ifndef CONFIG_USER_ONLY
09daed84 531 cpu->as = &address_space_memory;
5b6dd868 532 cpu->thread_id = qemu_get_thread_id();
cba70549 533 cpu_reload_memory_map(cpu);
5b6dd868 534#endif
bdc44640 535 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
536#if defined(CONFIG_USER_ONLY)
537 cpu_list_unlock();
538#endif
e0d47944
AF
539 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
540 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
541 }
5b6dd868 542#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
543 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
544 cpu_save, cpu_load, env);
b170fce3 545 assert(cc->vmsd == NULL);
e0d47944 546 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 547#endif
b170fce3
AF
548 if (cc->vmsd != NULL) {
549 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
550 }
ea041c0e
FB
551}
552
94df27fd 553#if defined(CONFIG_USER_ONLY)
00b941e5 554static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
555{
556 tb_invalidate_phys_page_range(pc, pc + 1, 0);
557}
558#else
00b941e5 559static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 560{
e8262a1b
MF
561 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
562 if (phys != -1) {
09daed84 563 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 564 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 565 }
1e7855a5 566}
c27004ec 567#endif
d720b93d 568
c527ee8f 569#if defined(CONFIG_USER_ONLY)
75a34036 570void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
571
572{
573}
574
3ee887e8
PM
575int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
576 int flags)
577{
578 return -ENOSYS;
579}
580
581void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
582{
583}
584
75a34036 585int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
586 int flags, CPUWatchpoint **watchpoint)
587{
588 return -ENOSYS;
589}
590#else
6658ffb8 591/* Add a watchpoint. */
75a34036 592int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 593 int flags, CPUWatchpoint **watchpoint)
6658ffb8 594{
c0ce998e 595 CPUWatchpoint *wp;
6658ffb8 596
05068c0d 597 /* forbid ranges which are empty or run off the end of the address space */
07e2863d 598 if (len == 0 || (addr + len - 1) < addr) {
75a34036
AF
599 error_report("tried to set invalid watchpoint at %"
600 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
601 return -EINVAL;
602 }
7267c094 603 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
604
605 wp->vaddr = addr;
05068c0d 606 wp->len = len;
a1d1bb31
AL
607 wp->flags = flags;
608
2dc9f411 609 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
610 if (flags & BP_GDB) {
611 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
612 } else {
613 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
614 }
6658ffb8 615
31b030d4 616 tlb_flush_page(cpu, addr);
a1d1bb31
AL
617
618 if (watchpoint)
619 *watchpoint = wp;
620 return 0;
6658ffb8
PB
621}
622
a1d1bb31 623/* Remove a specific watchpoint. */
75a34036 624int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 625 int flags)
6658ffb8 626{
a1d1bb31 627 CPUWatchpoint *wp;
6658ffb8 628
ff4700b0 629 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 630 if (addr == wp->vaddr && len == wp->len
6e140f28 631 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 632 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
633 return 0;
634 }
635 }
a1d1bb31 636 return -ENOENT;
6658ffb8
PB
637}
638
a1d1bb31 639/* Remove a specific watchpoint by reference. */
75a34036 640void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 641{
ff4700b0 642 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 643
31b030d4 644 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 645
7267c094 646 g_free(watchpoint);
a1d1bb31
AL
647}
648
649/* Remove all matching watchpoints. */
75a34036 650void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 651{
c0ce998e 652 CPUWatchpoint *wp, *next;
a1d1bb31 653
ff4700b0 654 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
655 if (wp->flags & mask) {
656 cpu_watchpoint_remove_by_ref(cpu, wp);
657 }
c0ce998e 658 }
7d03f82f 659}
05068c0d
PM
660
661/* Return true if this watchpoint address matches the specified
662 * access (ie the address range covered by the watchpoint overlaps
663 * partially or completely with the address range covered by the
664 * access).
665 */
666static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
667 vaddr addr,
668 vaddr len)
669{
670 /* We know the lengths are non-zero, but a little caution is
671 * required to avoid errors in the case where the range ends
672 * exactly at the top of the address space and so addr + len
673 * wraps round to zero.
674 */
675 vaddr wpend = wp->vaddr + wp->len - 1;
676 vaddr addrend = addr + len - 1;
677
678 return !(addr > wpend || wp->vaddr > addrend);
679}
680
c527ee8f 681#endif
7d03f82f 682
a1d1bb31 683/* Add a breakpoint. */
b3310ab3 684int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 685 CPUBreakpoint **breakpoint)
4c3a88a2 686{
c0ce998e 687 CPUBreakpoint *bp;
3b46e624 688
7267c094 689 bp = g_malloc(sizeof(*bp));
4c3a88a2 690
a1d1bb31
AL
691 bp->pc = pc;
692 bp->flags = flags;
693
2dc9f411 694 /* keep all GDB-injected breakpoints in front */
00b941e5 695 if (flags & BP_GDB) {
f0c3c505 696 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 697 } else {
f0c3c505 698 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 699 }
3b46e624 700
f0c3c505 701 breakpoint_invalidate(cpu, pc);
a1d1bb31 702
00b941e5 703 if (breakpoint) {
a1d1bb31 704 *breakpoint = bp;
00b941e5 705 }
4c3a88a2 706 return 0;
4c3a88a2
FB
707}
708
a1d1bb31 709/* Remove a specific breakpoint. */
b3310ab3 710int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 711{
a1d1bb31
AL
712 CPUBreakpoint *bp;
713
f0c3c505 714 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 715 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 716 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
717 return 0;
718 }
7d03f82f 719 }
a1d1bb31 720 return -ENOENT;
7d03f82f
EI
721}
722
a1d1bb31 723/* Remove a specific breakpoint by reference. */
b3310ab3 724void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 725{
f0c3c505
AF
726 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
727
728 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 729
7267c094 730 g_free(breakpoint);
a1d1bb31
AL
731}
732
733/* Remove all matching breakpoints. */
b3310ab3 734void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 735{
c0ce998e 736 CPUBreakpoint *bp, *next;
a1d1bb31 737
f0c3c505 738 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
739 if (bp->flags & mask) {
740 cpu_breakpoint_remove_by_ref(cpu, bp);
741 }
c0ce998e 742 }
4c3a88a2
FB
743}
744
c33a346e
FB
745/* enable or disable single step mode. EXCP_DEBUG is returned by the
746 CPU loop after each instruction */
3825b28f 747void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 748{
ed2803da
AF
749 if (cpu->singlestep_enabled != enabled) {
750 cpu->singlestep_enabled = enabled;
751 if (kvm_enabled()) {
38e478ec 752 kvm_update_guest_debug(cpu, 0);
ed2803da 753 } else {
ccbb4d44 754 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 755 /* XXX: only flush what is necessary */
38e478ec 756 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
757 tb_flush(env);
758 }
c33a346e 759 }
c33a346e
FB
760}
761
a47dddd7 762void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
763{
764 va_list ap;
493ae1f0 765 va_list ap2;
7501267e
FB
766
767 va_start(ap, fmt);
493ae1f0 768 va_copy(ap2, ap);
7501267e
FB
769 fprintf(stderr, "qemu: fatal: ");
770 vfprintf(stderr, fmt, ap);
771 fprintf(stderr, "\n");
878096ee 772 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
773 if (qemu_log_enabled()) {
774 qemu_log("qemu: fatal: ");
775 qemu_log_vprintf(fmt, ap2);
776 qemu_log("\n");
a0762859 777 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 778 qemu_log_flush();
93fcfe39 779 qemu_log_close();
924edcae 780 }
493ae1f0 781 va_end(ap2);
f9373291 782 va_end(ap);
fd052bf6
RV
783#if defined(CONFIG_USER_ONLY)
784 {
785 struct sigaction act;
786 sigfillset(&act.sa_mask);
787 act.sa_handler = SIG_DFL;
788 sigaction(SIGABRT, &act, NULL);
789 }
790#endif
7501267e
FB
791 abort();
792}
793
0124311e 794#if !defined(CONFIG_USER_ONLY)
0dc3f44a 795/* Called from RCU critical section */
041603fe
PB
796static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
797{
798 RAMBlock *block;
799
43771539 800 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 801 if (block && addr - block->offset < block->max_length) {
041603fe
PB
802 goto found;
803 }
0dc3f44a 804 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5 805 if (addr - block->offset < block->max_length) {
041603fe
PB
806 goto found;
807 }
808 }
809
810 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
811 abort();
812
813found:
43771539
PB
814 /* It is safe to write mru_block outside the iothread lock. This
815 * is what happens:
816 *
817 * mru_block = xxx
818 * rcu_read_unlock()
819 * xxx removed from list
820 * rcu_read_lock()
821 * read mru_block
822 * mru_block = NULL;
823 * call_rcu(reclaim_ramblock, xxx);
824 * rcu_read_unlock()
825 *
826 * atomic_rcu_set is not needed here. The block was already published
827 * when it was placed into the list. Here we're just making an extra
828 * copy of the pointer.
829 */
041603fe
PB
830 ram_list.mru_block = block;
831 return block;
832}
833
a2f4d5be 834static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 835{
041603fe 836 ram_addr_t start1;
a2f4d5be
JQ
837 RAMBlock *block;
838 ram_addr_t end;
839
840 end = TARGET_PAGE_ALIGN(start + length);
841 start &= TARGET_PAGE_MASK;
d24981d3 842
0dc3f44a 843 rcu_read_lock();
041603fe
PB
844 block = qemu_get_ram_block(start);
845 assert(block == qemu_get_ram_block(end - 1));
1240be24 846 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
041603fe 847 cpu_tlb_reset_dirty_all(start1, length);
0dc3f44a 848 rcu_read_unlock();
d24981d3
JQ
849}
850
5579c7f3 851/* Note: start and end must be within the same ram block. */
03eebc9e
SH
852bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
853 ram_addr_t length,
854 unsigned client)
1ccde1cb 855{
03eebc9e
SH
856 unsigned long end, page;
857 bool dirty;
858
859 if (length == 0) {
860 return false;
861 }
f23db169 862
03eebc9e
SH
863 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
864 page = start >> TARGET_PAGE_BITS;
865 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
866 page, end - page);
867
868 if (dirty && tcg_enabled()) {
a2f4d5be 869 tlb_reset_dirty_range_all(start, length);
5579c7f3 870 }
03eebc9e
SH
871
872 return dirty;
1ccde1cb
FB
873}
874
79e2b9ae 875/* Called from RCU critical section */
bb0e627a 876hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
877 MemoryRegionSection *section,
878 target_ulong vaddr,
879 hwaddr paddr, hwaddr xlat,
880 int prot,
881 target_ulong *address)
e5548617 882{
a8170e5e 883 hwaddr iotlb;
e5548617
BS
884 CPUWatchpoint *wp;
885
cc5bea60 886 if (memory_region_is_ram(section->mr)) {
e5548617
BS
887 /* Normal RAM. */
888 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 889 + xlat;
e5548617 890 if (!section->readonly) {
b41aac4f 891 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 892 } else {
b41aac4f 893 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
894 }
895 } else {
1b3fb98f 896 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 897 iotlb += xlat;
e5548617
BS
898 }
899
900 /* Make accesses to pages with watchpoints go via the
901 watchpoint trap routines. */
ff4700b0 902 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 903 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
904 /* Avoid trapping reads of pages with a write breakpoint. */
905 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 906 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
907 *address |= TLB_MMIO;
908 break;
909 }
910 }
911 }
912
913 return iotlb;
914}
9fa3e853
FB
915#endif /* defined(CONFIG_USER_ONLY) */
916
e2eef170 917#if !defined(CONFIG_USER_ONLY)
8da3ff18 918
c227f099 919static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 920 uint16_t section);
acc9d80b 921static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 922
a2b257d6
IM
923static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
924 qemu_anon_ram_alloc;
91138037
MA
925
926/*
927 * Set a custom physical guest memory alloator.
928 * Accelerators with unusual needs may need this. Hopefully, we can
929 * get rid of it eventually.
930 */
a2b257d6 931void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
91138037
MA
932{
933 phys_mem_alloc = alloc;
934}
935
53cb28cb
MA
936static uint16_t phys_section_add(PhysPageMap *map,
937 MemoryRegionSection *section)
5312bd8b 938{
68f3f65b
PB
939 /* The physical section number is ORed with a page-aligned
940 * pointer to produce the iotlb entries. Thus it should
941 * never overflow into the page-aligned value.
942 */
53cb28cb 943 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 944
53cb28cb
MA
945 if (map->sections_nb == map->sections_nb_alloc) {
946 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
947 map->sections = g_renew(MemoryRegionSection, map->sections,
948 map->sections_nb_alloc);
5312bd8b 949 }
53cb28cb 950 map->sections[map->sections_nb] = *section;
dfde4e6e 951 memory_region_ref(section->mr);
53cb28cb 952 return map->sections_nb++;
5312bd8b
AK
953}
954
058bc4b5
PB
955static void phys_section_destroy(MemoryRegion *mr)
956{
dfde4e6e
PB
957 memory_region_unref(mr);
958
058bc4b5
PB
959 if (mr->subpage) {
960 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 961 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
962 g_free(subpage);
963 }
964}
965
6092666e 966static void phys_sections_free(PhysPageMap *map)
5312bd8b 967{
9affd6fc
PB
968 while (map->sections_nb > 0) {
969 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
970 phys_section_destroy(section->mr);
971 }
9affd6fc
PB
972 g_free(map->sections);
973 g_free(map->nodes);
5312bd8b
AK
974}
975
ac1970fb 976static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
977{
978 subpage_t *subpage;
a8170e5e 979 hwaddr base = section->offset_within_address_space
0f0cb164 980 & TARGET_PAGE_MASK;
97115a8d 981 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 982 d->map.nodes, d->map.sections);
0f0cb164
AK
983 MemoryRegionSection subsection = {
984 .offset_within_address_space = base,
052e87b0 985 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 986 };
a8170e5e 987 hwaddr start, end;
0f0cb164 988
f3705d53 989 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 990
f3705d53 991 if (!(existing->mr->subpage)) {
acc9d80b 992 subpage = subpage_init(d->as, base);
3be91e86 993 subsection.address_space = d->as;
0f0cb164 994 subsection.mr = &subpage->iomem;
ac1970fb 995 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 996 phys_section_add(&d->map, &subsection));
0f0cb164 997 } else {
f3705d53 998 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
999 }
1000 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 1001 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
1002 subpage_register(subpage, start, end,
1003 phys_section_add(&d->map, section));
0f0cb164
AK
1004}
1005
1006
052e87b0
PB
1007static void register_multipage(AddressSpaceDispatch *d,
1008 MemoryRegionSection *section)
33417e70 1009{
a8170e5e 1010 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 1011 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
1012 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1013 TARGET_PAGE_BITS));
dd81124b 1014
733d5ef5
PB
1015 assert(num_pages);
1016 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
1017}
1018
ac1970fb 1019static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 1020{
89ae337a 1021 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 1022 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 1023 MemoryRegionSection now = *section, remain = *section;
052e87b0 1024 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1025
733d5ef5
PB
1026 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1027 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1028 - now.offset_within_address_space;
1029
052e87b0 1030 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 1031 register_subpage(d, &now);
733d5ef5 1032 } else {
052e87b0 1033 now.size = int128_zero();
733d5ef5 1034 }
052e87b0
PB
1035 while (int128_ne(remain.size, now.size)) {
1036 remain.size = int128_sub(remain.size, now.size);
1037 remain.offset_within_address_space += int128_get64(now.size);
1038 remain.offset_within_region += int128_get64(now.size);
69b67646 1039 now = remain;
052e87b0 1040 if (int128_lt(remain.size, page_size)) {
733d5ef5 1041 register_subpage(d, &now);
88266249 1042 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1043 now.size = page_size;
ac1970fb 1044 register_subpage(d, &now);
69b67646 1045 } else {
052e87b0 1046 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1047 register_multipage(d, &now);
69b67646 1048 }
0f0cb164
AK
1049 }
1050}
1051
62a2744c
SY
1052void qemu_flush_coalesced_mmio_buffer(void)
1053{
1054 if (kvm_enabled())
1055 kvm_flush_coalesced_mmio_buffer();
1056}
1057
b2a8658e
UD
1058void qemu_mutex_lock_ramlist(void)
1059{
1060 qemu_mutex_lock(&ram_list.mutex);
1061}
1062
1063void qemu_mutex_unlock_ramlist(void)
1064{
1065 qemu_mutex_unlock(&ram_list.mutex);
1066}
1067
e1e84ba0 1068#ifdef __linux__
c902760f
MT
1069
1070#include <sys/vfs.h>
1071
1072#define HUGETLBFS_MAGIC 0x958458f6
1073
fc7a5800 1074static long gethugepagesize(const char *path, Error **errp)
c902760f
MT
1075{
1076 struct statfs fs;
1077 int ret;
1078
1079 do {
9742bf26 1080 ret = statfs(path, &fs);
c902760f
MT
1081 } while (ret != 0 && errno == EINTR);
1082
1083 if (ret != 0) {
fc7a5800
HT
1084 error_setg_errno(errp, errno, "failed to get page size of file %s",
1085 path);
9742bf26 1086 return 0;
c902760f
MT
1087 }
1088
1089 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1090 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1091
1092 return fs.f_bsize;
1093}
1094
04b16653
AW
1095static void *file_ram_alloc(RAMBlock *block,
1096 ram_addr_t memory,
7f56e740
PB
1097 const char *path,
1098 Error **errp)
c902760f
MT
1099{
1100 char *filename;
8ca761f6
PF
1101 char *sanitized_name;
1102 char *c;
557529dd 1103 void *area = NULL;
c902760f 1104 int fd;
557529dd 1105 uint64_t hpagesize;
fc7a5800 1106 Error *local_err = NULL;
c902760f 1107
fc7a5800
HT
1108 hpagesize = gethugepagesize(path, &local_err);
1109 if (local_err) {
1110 error_propagate(errp, local_err);
f9a49dfa 1111 goto error;
c902760f 1112 }
a2b257d6 1113 block->mr->align = hpagesize;
c902760f
MT
1114
1115 if (memory < hpagesize) {
557529dd
HT
1116 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1117 "or larger than huge page size 0x%" PRIx64,
1118 memory, hpagesize);
1119 goto error;
c902760f
MT
1120 }
1121
1122 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1123 error_setg(errp,
1124 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1125 goto error;
c902760f
MT
1126 }
1127
8ca761f6 1128 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1129 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1130 for (c = sanitized_name; *c != '\0'; c++) {
1131 if (*c == '/')
1132 *c = '_';
1133 }
1134
1135 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1136 sanitized_name);
1137 g_free(sanitized_name);
c902760f
MT
1138
1139 fd = mkstemp(filename);
1140 if (fd < 0) {
7f56e740
PB
1141 error_setg_errno(errp, errno,
1142 "unable to create backing store for hugepages");
e4ada482 1143 g_free(filename);
f9a49dfa 1144 goto error;
c902760f
MT
1145 }
1146 unlink(filename);
e4ada482 1147 g_free(filename);
c902760f
MT
1148
1149 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1150
1151 /*
1152 * ftruncate is not supported by hugetlbfs in older
1153 * hosts, so don't bother bailing out on errors.
1154 * If anything goes wrong with it under other filesystems,
1155 * mmap will fail.
1156 */
7f56e740 1157 if (ftruncate(fd, memory)) {
9742bf26 1158 perror("ftruncate");
7f56e740 1159 }
c902760f 1160
dbcb8981
PB
1161 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1162 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1163 fd, 0);
c902760f 1164 if (area == MAP_FAILED) {
7f56e740
PB
1165 error_setg_errno(errp, errno,
1166 "unable to map backing store for hugepages");
9742bf26 1167 close(fd);
f9a49dfa 1168 goto error;
c902760f 1169 }
ef36fa14
MT
1170
1171 if (mem_prealloc) {
38183310 1172 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1173 }
1174
04b16653 1175 block->fd = fd;
c902760f 1176 return area;
f9a49dfa
MT
1177
1178error:
1179 if (mem_prealloc) {
81b07353 1180 error_report("%s", error_get_pretty(*errp));
f9a49dfa
MT
1181 exit(1);
1182 }
1183 return NULL;
c902760f
MT
1184}
1185#endif
1186
0dc3f44a 1187/* Called with the ramlist lock held. */
d17b5288 1188static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1189{
1190 RAMBlock *block, *next_block;
3e837b2c 1191 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1192
49cd9ac6
SH
1193 assert(size != 0); /* it would hand out same offset multiple times */
1194
0dc3f44a 1195 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
04b16653 1196 return 0;
0d53d9fe 1197 }
04b16653 1198
0dc3f44a 1199 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
f15fbc4b 1200 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653 1201
62be4e3a 1202 end = block->offset + block->max_length;
04b16653 1203
0dc3f44a 1204 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
04b16653
AW
1205 if (next_block->offset >= end) {
1206 next = MIN(next, next_block->offset);
1207 }
1208 }
1209 if (next - end >= size && next - end < mingap) {
3e837b2c 1210 offset = end;
04b16653
AW
1211 mingap = next - end;
1212 }
1213 }
3e837b2c
AW
1214
1215 if (offset == RAM_ADDR_MAX) {
1216 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1217 (uint64_t)size);
1218 abort();
1219 }
1220
04b16653
AW
1221 return offset;
1222}
1223
652d7ec2 1224ram_addr_t last_ram_offset(void)
d17b5288
AW
1225{
1226 RAMBlock *block;
1227 ram_addr_t last = 0;
1228
0dc3f44a
MD
1229 rcu_read_lock();
1230 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
62be4e3a 1231 last = MAX(last, block->offset + block->max_length);
0d53d9fe 1232 }
0dc3f44a 1233 rcu_read_unlock();
d17b5288
AW
1234 return last;
1235}
1236
ddb97f1d
JB
1237static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1238{
1239 int ret;
ddb97f1d
JB
1240
1241 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
47c8ca53 1242 if (!machine_dump_guest_core(current_machine)) {
ddb97f1d
JB
1243 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1244 if (ret) {
1245 perror("qemu_madvise");
1246 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1247 "but dump_guest_core=off specified\n");
1248 }
1249 }
1250}
1251
0dc3f44a
MD
1252/* Called within an RCU critical section, or while the ramlist lock
1253 * is held.
1254 */
20cfe881 1255static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1256{
20cfe881 1257 RAMBlock *block;
84b89d78 1258
0dc3f44a 1259 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1260 if (block->offset == addr) {
20cfe881 1261 return block;
c5705a77
AK
1262 }
1263 }
20cfe881
HT
1264
1265 return NULL;
1266}
1267
ae3a7047 1268/* Called with iothread lock held. */
20cfe881
HT
1269void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1270{
ae3a7047 1271 RAMBlock *new_block, *block;
20cfe881 1272
0dc3f44a 1273 rcu_read_lock();
ae3a7047 1274 new_block = find_ram_block(addr);
c5705a77
AK
1275 assert(new_block);
1276 assert(!new_block->idstr[0]);
84b89d78 1277
09e5ab63
AL
1278 if (dev) {
1279 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1280 if (id) {
1281 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1282 g_free(id);
84b89d78
CM
1283 }
1284 }
1285 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1286
0dc3f44a 1287 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1288 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1289 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1290 new_block->idstr);
1291 abort();
1292 }
1293 }
0dc3f44a 1294 rcu_read_unlock();
c5705a77
AK
1295}
1296
ae3a7047 1297/* Called with iothread lock held. */
20cfe881
HT
1298void qemu_ram_unset_idstr(ram_addr_t addr)
1299{
ae3a7047 1300 RAMBlock *block;
20cfe881 1301
ae3a7047
MD
1302 /* FIXME: arch_init.c assumes that this is not called throughout
1303 * migration. Ignore the problem since hot-unplug during migration
1304 * does not work anyway.
1305 */
1306
0dc3f44a 1307 rcu_read_lock();
ae3a7047 1308 block = find_ram_block(addr);
20cfe881
HT
1309 if (block) {
1310 memset(block->idstr, 0, sizeof(block->idstr));
1311 }
0dc3f44a 1312 rcu_read_unlock();
20cfe881
HT
1313}
1314
8490fc78
LC
1315static int memory_try_enable_merging(void *addr, size_t len)
1316{
75cc7f01 1317 if (!machine_mem_merge(current_machine)) {
8490fc78
LC
1318 /* disabled by the user */
1319 return 0;
1320 }
1321
1322 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1323}
1324
62be4e3a
MT
1325/* Only legal before guest might have detected the memory size: e.g. on
1326 * incoming migration, or right after reset.
1327 *
1328 * As memory core doesn't know how is memory accessed, it is up to
1329 * resize callback to update device state and/or add assertions to detect
1330 * misuse, if necessary.
1331 */
1332int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1333{
1334 RAMBlock *block = find_ram_block(base);
1335
1336 assert(block);
1337
129ddaf3
MT
1338 newsize = TARGET_PAGE_ALIGN(newsize);
1339
62be4e3a
MT
1340 if (block->used_length == newsize) {
1341 return 0;
1342 }
1343
1344 if (!(block->flags & RAM_RESIZEABLE)) {
1345 error_setg_errno(errp, EINVAL,
1346 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1347 " in != 0x" RAM_ADDR_FMT, block->idstr,
1348 newsize, block->used_length);
1349 return -EINVAL;
1350 }
1351
1352 if (block->max_length < newsize) {
1353 error_setg_errno(errp, EINVAL,
1354 "Length too large: %s: 0x" RAM_ADDR_FMT
1355 " > 0x" RAM_ADDR_FMT, block->idstr,
1356 newsize, block->max_length);
1357 return -EINVAL;
1358 }
1359
1360 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1361 block->used_length = newsize;
58d2707e
PB
1362 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1363 DIRTY_CLIENTS_ALL);
62be4e3a
MT
1364 memory_region_set_size(block->mr, newsize);
1365 if (block->resized) {
1366 block->resized(block->idstr, newsize, block->host);
1367 }
1368 return 0;
1369}
1370
ef701d7b 1371static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1372{
e1c57ab8 1373 RAMBlock *block;
0d53d9fe 1374 RAMBlock *last_block = NULL;
2152f5ca
JQ
1375 ram_addr_t old_ram_size, new_ram_size;
1376
1377 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1378
b2a8658e 1379 qemu_mutex_lock_ramlist();
9b8424d5 1380 new_block->offset = find_ram_offset(new_block->max_length);
e1c57ab8
PB
1381
1382 if (!new_block->host) {
1383 if (xen_enabled()) {
9b8424d5
MT
1384 xen_ram_alloc(new_block->offset, new_block->max_length,
1385 new_block->mr);
e1c57ab8 1386 } else {
9b8424d5 1387 new_block->host = phys_mem_alloc(new_block->max_length,
a2b257d6 1388 &new_block->mr->align);
39228250 1389 if (!new_block->host) {
ef701d7b
HT
1390 error_setg_errno(errp, errno,
1391 "cannot set up guest memory '%s'",
1392 memory_region_name(new_block->mr));
1393 qemu_mutex_unlock_ramlist();
1394 return -1;
39228250 1395 }
9b8424d5 1396 memory_try_enable_merging(new_block->host, new_block->max_length);
6977dfe6 1397 }
c902760f 1398 }
94a6b54f 1399
0d53d9fe
MD
1400 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1401 * QLIST (which has an RCU-friendly variant) does not have insertion at
1402 * tail, so save the last element in last_block.
1403 */
0dc3f44a 1404 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
0d53d9fe 1405 last_block = block;
9b8424d5 1406 if (block->max_length < new_block->max_length) {
abb26d63
PB
1407 break;
1408 }
1409 }
1410 if (block) {
0dc3f44a 1411 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
0d53d9fe 1412 } else if (last_block) {
0dc3f44a 1413 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
0d53d9fe 1414 } else { /* list is empty */
0dc3f44a 1415 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
abb26d63 1416 }
0d6d3c87 1417 ram_list.mru_block = NULL;
94a6b54f 1418
0dc3f44a
MD
1419 /* Write list before version */
1420 smp_wmb();
f798b07f 1421 ram_list.version++;
b2a8658e 1422 qemu_mutex_unlock_ramlist();
f798b07f 1423
2152f5ca
JQ
1424 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1425
1426 if (new_ram_size > old_ram_size) {
1ab4c8ce 1427 int i;
ae3a7047
MD
1428
1429 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1ab4c8ce
JQ
1430 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1431 ram_list.dirty_memory[i] =
1432 bitmap_zero_extend(ram_list.dirty_memory[i],
1433 old_ram_size, new_ram_size);
1434 }
2152f5ca 1435 }
9b8424d5 1436 cpu_physical_memory_set_dirty_range(new_block->offset,
58d2707e
PB
1437 new_block->used_length,
1438 DIRTY_CLIENTS_ALL);
94a6b54f 1439
a904c911
PB
1440 if (new_block->host) {
1441 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1442 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1443 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1444 if (kvm_enabled()) {
1445 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1446 }
e1c57ab8 1447 }
6f0437e8 1448
94a6b54f
PB
1449 return new_block->offset;
1450}
e9a1ab19 1451
0b183fc8 1452#ifdef __linux__
e1c57ab8 1453ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1454 bool share, const char *mem_path,
7f56e740 1455 Error **errp)
e1c57ab8
PB
1456{
1457 RAMBlock *new_block;
ef701d7b
HT
1458 ram_addr_t addr;
1459 Error *local_err = NULL;
e1c57ab8
PB
1460
1461 if (xen_enabled()) {
7f56e740
PB
1462 error_setg(errp, "-mem-path not supported with Xen");
1463 return -1;
e1c57ab8
PB
1464 }
1465
1466 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1467 /*
1468 * file_ram_alloc() needs to allocate just like
1469 * phys_mem_alloc, but we haven't bothered to provide
1470 * a hook there.
1471 */
7f56e740
PB
1472 error_setg(errp,
1473 "-mem-path not supported with this accelerator");
1474 return -1;
e1c57ab8
PB
1475 }
1476
1477 size = TARGET_PAGE_ALIGN(size);
1478 new_block = g_malloc0(sizeof(*new_block));
1479 new_block->mr = mr;
9b8424d5
MT
1480 new_block->used_length = size;
1481 new_block->max_length = size;
dbcb8981 1482 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1483 new_block->host = file_ram_alloc(new_block, size,
1484 mem_path, errp);
1485 if (!new_block->host) {
1486 g_free(new_block);
1487 return -1;
1488 }
1489
ef701d7b
HT
1490 addr = ram_block_add(new_block, &local_err);
1491 if (local_err) {
1492 g_free(new_block);
1493 error_propagate(errp, local_err);
1494 return -1;
1495 }
1496 return addr;
e1c57ab8 1497}
0b183fc8 1498#endif
e1c57ab8 1499
62be4e3a
MT
1500static
1501ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1502 void (*resized)(const char*,
1503 uint64_t length,
1504 void *host),
1505 void *host, bool resizeable,
ef701d7b 1506 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1507{
1508 RAMBlock *new_block;
ef701d7b
HT
1509 ram_addr_t addr;
1510 Error *local_err = NULL;
e1c57ab8
PB
1511
1512 size = TARGET_PAGE_ALIGN(size);
62be4e3a 1513 max_size = TARGET_PAGE_ALIGN(max_size);
e1c57ab8
PB
1514 new_block = g_malloc0(sizeof(*new_block));
1515 new_block->mr = mr;
62be4e3a 1516 new_block->resized = resized;
9b8424d5
MT
1517 new_block->used_length = size;
1518 new_block->max_length = max_size;
62be4e3a 1519 assert(max_size >= size);
e1c57ab8
PB
1520 new_block->fd = -1;
1521 new_block->host = host;
1522 if (host) {
7bd4f430 1523 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1524 }
62be4e3a
MT
1525 if (resizeable) {
1526 new_block->flags |= RAM_RESIZEABLE;
1527 }
ef701d7b
HT
1528 addr = ram_block_add(new_block, &local_err);
1529 if (local_err) {
1530 g_free(new_block);
1531 error_propagate(errp, local_err);
1532 return -1;
1533 }
1534 return addr;
e1c57ab8
PB
1535}
1536
62be4e3a
MT
1537ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1538 MemoryRegion *mr, Error **errp)
1539{
1540 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1541}
1542
ef701d7b 1543ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1544{
62be4e3a
MT
1545 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1546}
1547
1548ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1549 void (*resized)(const char*,
1550 uint64_t length,
1551 void *host),
1552 MemoryRegion *mr, Error **errp)
1553{
1554 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
6977dfe6
YT
1555}
1556
1f2e98b6
AW
1557void qemu_ram_free_from_ptr(ram_addr_t addr)
1558{
1559 RAMBlock *block;
1560
b2a8658e 1561 qemu_mutex_lock_ramlist();
0dc3f44a 1562 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1f2e98b6 1563 if (addr == block->offset) {
0dc3f44a 1564 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1565 ram_list.mru_block = NULL;
0dc3f44a
MD
1566 /* Write list before version */
1567 smp_wmb();
f798b07f 1568 ram_list.version++;
43771539 1569 g_free_rcu(block, rcu);
b2a8658e 1570 break;
1f2e98b6
AW
1571 }
1572 }
b2a8658e 1573 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1574}
1575
43771539
PB
1576static void reclaim_ramblock(RAMBlock *block)
1577{
1578 if (block->flags & RAM_PREALLOC) {
1579 ;
1580 } else if (xen_enabled()) {
1581 xen_invalidate_map_cache_entry(block->host);
1582#ifndef _WIN32
1583 } else if (block->fd >= 0) {
1584 munmap(block->host, block->max_length);
1585 close(block->fd);
1586#endif
1587 } else {
1588 qemu_anon_ram_free(block->host, block->max_length);
1589 }
1590 g_free(block);
1591}
1592
c227f099 1593void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1594{
04b16653
AW
1595 RAMBlock *block;
1596
b2a8658e 1597 qemu_mutex_lock_ramlist();
0dc3f44a 1598 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
04b16653 1599 if (addr == block->offset) {
0dc3f44a 1600 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1601 ram_list.mru_block = NULL;
0dc3f44a
MD
1602 /* Write list before version */
1603 smp_wmb();
f798b07f 1604 ram_list.version++;
43771539 1605 call_rcu(block, reclaim_ramblock, rcu);
b2a8658e 1606 break;
04b16653
AW
1607 }
1608 }
b2a8658e 1609 qemu_mutex_unlock_ramlist();
e9a1ab19
FB
1610}
1611
cd19cfa2
HY
1612#ifndef _WIN32
1613void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1614{
1615 RAMBlock *block;
1616 ram_addr_t offset;
1617 int flags;
1618 void *area, *vaddr;
1619
0dc3f44a 1620 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
cd19cfa2 1621 offset = addr - block->offset;
9b8424d5 1622 if (offset < block->max_length) {
1240be24 1623 vaddr = ramblock_ptr(block, offset);
7bd4f430 1624 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1625 ;
dfeaf2ab
MA
1626 } else if (xen_enabled()) {
1627 abort();
cd19cfa2
HY
1628 } else {
1629 flags = MAP_FIXED;
3435f395 1630 if (block->fd >= 0) {
dbcb8981
PB
1631 flags |= (block->flags & RAM_SHARED ?
1632 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1633 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1634 flags, block->fd, offset);
cd19cfa2 1635 } else {
2eb9fbaa
MA
1636 /*
1637 * Remap needs to match alloc. Accelerators that
1638 * set phys_mem_alloc never remap. If they did,
1639 * we'd need a remap hook here.
1640 */
1641 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1642
cd19cfa2
HY
1643 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1644 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1645 flags, -1, 0);
cd19cfa2
HY
1646 }
1647 if (area != vaddr) {
f15fbc4b
AP
1648 fprintf(stderr, "Could not remap addr: "
1649 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1650 length, addr);
1651 exit(1);
1652 }
8490fc78 1653 memory_try_enable_merging(vaddr, length);
ddb97f1d 1654 qemu_ram_setup_dump(vaddr, length);
cd19cfa2 1655 }
cd19cfa2
HY
1656 }
1657 }
1658}
1659#endif /* !_WIN32 */
1660
a35ba7be
PB
1661int qemu_get_ram_fd(ram_addr_t addr)
1662{
ae3a7047
MD
1663 RAMBlock *block;
1664 int fd;
a35ba7be 1665
0dc3f44a 1666 rcu_read_lock();
ae3a7047
MD
1667 block = qemu_get_ram_block(addr);
1668 fd = block->fd;
0dc3f44a 1669 rcu_read_unlock();
ae3a7047 1670 return fd;
a35ba7be
PB
1671}
1672
3fd74b84
DM
1673void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1674{
ae3a7047
MD
1675 RAMBlock *block;
1676 void *ptr;
3fd74b84 1677
0dc3f44a 1678 rcu_read_lock();
ae3a7047
MD
1679 block = qemu_get_ram_block(addr);
1680 ptr = ramblock_ptr(block, 0);
0dc3f44a 1681 rcu_read_unlock();
ae3a7047 1682 return ptr;
3fd74b84
DM
1683}
1684
1b5ec234 1685/* Return a host pointer to ram allocated with qemu_ram_alloc.
ae3a7047
MD
1686 * This should not be used for general purpose DMA. Use address_space_map
1687 * or address_space_rw instead. For local memory (e.g. video ram) that the
1688 * device owns, use memory_region_get_ram_ptr.
0dc3f44a
MD
1689 *
1690 * By the time this function returns, the returned pointer is not protected
1691 * by RCU anymore. If the caller is not within an RCU critical section and
1692 * does not hold the iothread lock, it must have other means of protecting the
1693 * pointer, such as a reference to the region that includes the incoming
1694 * ram_addr_t.
1b5ec234
PB
1695 */
1696void *qemu_get_ram_ptr(ram_addr_t addr)
1697{
ae3a7047
MD
1698 RAMBlock *block;
1699 void *ptr;
1b5ec234 1700
0dc3f44a 1701 rcu_read_lock();
ae3a7047
MD
1702 block = qemu_get_ram_block(addr);
1703
1704 if (xen_enabled() && block->host == NULL) {
0d6d3c87
PB
1705 /* We need to check if the requested address is in the RAM
1706 * because we don't want to map the entire memory in QEMU.
1707 * In that case just map until the end of the page.
1708 */
1709 if (block->offset == 0) {
ae3a7047 1710 ptr = xen_map_cache(addr, 0, 0);
0dc3f44a 1711 goto unlock;
0d6d3c87 1712 }
ae3a7047
MD
1713
1714 block->host = xen_map_cache(block->offset, block->max_length, 1);
0d6d3c87 1715 }
ae3a7047
MD
1716 ptr = ramblock_ptr(block, addr - block->offset);
1717
0dc3f44a
MD
1718unlock:
1719 rcu_read_unlock();
ae3a7047 1720 return ptr;
dc828ca1
PB
1721}
1722
38bee5dc 1723/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
ae3a7047 1724 * but takes a size argument.
0dc3f44a
MD
1725 *
1726 * By the time this function returns, the returned pointer is not protected
1727 * by RCU anymore. If the caller is not within an RCU critical section and
1728 * does not hold the iothread lock, it must have other means of protecting the
1729 * pointer, such as a reference to the region that includes the incoming
1730 * ram_addr_t.
ae3a7047 1731 */
cb85f7ab 1732static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1733{
ae3a7047 1734 void *ptr;
8ab934f9
SS
1735 if (*size == 0) {
1736 return NULL;
1737 }
868bb33f 1738 if (xen_enabled()) {
e41d7c69 1739 return xen_map_cache(addr, *size, 1);
868bb33f 1740 } else {
38bee5dc 1741 RAMBlock *block;
0dc3f44a
MD
1742 rcu_read_lock();
1743 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5
MT
1744 if (addr - block->offset < block->max_length) {
1745 if (addr - block->offset + *size > block->max_length)
1746 *size = block->max_length - addr + block->offset;
ae3a7047 1747 ptr = ramblock_ptr(block, addr - block->offset);
0dc3f44a 1748 rcu_read_unlock();
ae3a7047 1749 return ptr;
38bee5dc
SS
1750 }
1751 }
1752
1753 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1754 abort();
38bee5dc
SS
1755 }
1756}
1757
7443b437 1758/* Some of the softmmu routines need to translate from a host pointer
ae3a7047
MD
1759 * (typically a TLB entry) back to a ram offset.
1760 *
1761 * By the time this function returns, the returned pointer is not protected
1762 * by RCU anymore. If the caller is not within an RCU critical section and
1763 * does not hold the iothread lock, it must have other means of protecting the
1764 * pointer, such as a reference to the region that includes the incoming
1765 * ram_addr_t.
1766 */
1b5ec234 1767MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1768{
94a6b54f
PB
1769 RAMBlock *block;
1770 uint8_t *host = ptr;
ae3a7047 1771 MemoryRegion *mr;
94a6b54f 1772
868bb33f 1773 if (xen_enabled()) {
0dc3f44a 1774 rcu_read_lock();
e41d7c69 1775 *ram_addr = xen_ram_addr_from_mapcache(ptr);
ae3a7047 1776 mr = qemu_get_ram_block(*ram_addr)->mr;
0dc3f44a 1777 rcu_read_unlock();
ae3a7047 1778 return mr;
712c2b41
SS
1779 }
1780
0dc3f44a
MD
1781 rcu_read_lock();
1782 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 1783 if (block && block->host && host - block->host < block->max_length) {
23887b79
PB
1784 goto found;
1785 }
1786
0dc3f44a 1787 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
432d268c
JN
1788 /* This case append when the block is not mapped. */
1789 if (block->host == NULL) {
1790 continue;
1791 }
9b8424d5 1792 if (host - block->host < block->max_length) {
23887b79 1793 goto found;
f471a17e 1794 }
94a6b54f 1795 }
432d268c 1796
0dc3f44a 1797 rcu_read_unlock();
1b5ec234 1798 return NULL;
23887b79
PB
1799
1800found:
1801 *ram_addr = block->offset + (host - block->host);
ae3a7047 1802 mr = block->mr;
0dc3f44a 1803 rcu_read_unlock();
ae3a7047 1804 return mr;
e890261f 1805}
f471a17e 1806
a8170e5e 1807static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1808 uint64_t val, unsigned size)
9fa3e853 1809{
52159192 1810 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1811 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1812 }
0e0df1e2
AK
1813 switch (size) {
1814 case 1:
1815 stb_p(qemu_get_ram_ptr(ram_addr), val);
1816 break;
1817 case 2:
1818 stw_p(qemu_get_ram_ptr(ram_addr), val);
1819 break;
1820 case 4:
1821 stl_p(qemu_get_ram_ptr(ram_addr), val);
1822 break;
1823 default:
1824 abort();
3a7d929e 1825 }
58d2707e
PB
1826 /* Set both VGA and migration bits for simplicity and to remove
1827 * the notdirty callback faster.
1828 */
1829 cpu_physical_memory_set_dirty_range(ram_addr, size,
1830 DIRTY_CLIENTS_NOCODE);
f23db169
FB
1831 /* we remove the notdirty callback only if the code has been
1832 flushed */
a2cd8c85 1833 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1834 CPUArchState *env = current_cpu->env_ptr;
93afeade 1835 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1836 }
9fa3e853
FB
1837}
1838
b018ddf6
PB
1839static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1840 unsigned size, bool is_write)
1841{
1842 return is_write;
1843}
1844
0e0df1e2 1845static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1846 .write = notdirty_mem_write,
b018ddf6 1847 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1848 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1849};
1850
0f459d16 1851/* Generate a debug exception if a watchpoint has been hit. */
66b9b43c 1852static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
0f459d16 1853{
93afeade
AF
1854 CPUState *cpu = current_cpu;
1855 CPUArchState *env = cpu->env_ptr;
06d55cc1 1856 target_ulong pc, cs_base;
0f459d16 1857 target_ulong vaddr;
a1d1bb31 1858 CPUWatchpoint *wp;
06d55cc1 1859 int cpu_flags;
0f459d16 1860
ff4700b0 1861 if (cpu->watchpoint_hit) {
06d55cc1
AL
1862 /* We re-entered the check after replacing the TB. Now raise
1863 * the debug interrupt so that is will trigger after the
1864 * current instruction. */
93afeade 1865 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1866 return;
1867 }
93afeade 1868 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1869 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
1870 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1871 && (wp->flags & flags)) {
08225676
PM
1872 if (flags == BP_MEM_READ) {
1873 wp->flags |= BP_WATCHPOINT_HIT_READ;
1874 } else {
1875 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1876 }
1877 wp->hitaddr = vaddr;
66b9b43c 1878 wp->hitattrs = attrs;
ff4700b0
AF
1879 if (!cpu->watchpoint_hit) {
1880 cpu->watchpoint_hit = wp;
239c51a5 1881 tb_check_watchpoint(cpu);
6e140f28 1882 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1883 cpu->exception_index = EXCP_DEBUG;
5638d180 1884 cpu_loop_exit(cpu);
6e140f28
AL
1885 } else {
1886 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1887 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1888 cpu_resume_from_signal(cpu, NULL);
6e140f28 1889 }
06d55cc1 1890 }
6e140f28
AL
1891 } else {
1892 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1893 }
1894 }
1895}
1896
6658ffb8
PB
1897/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1898 so these check for a hit then pass through to the normal out-of-line
1899 phys routines. */
66b9b43c
PM
1900static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1901 unsigned size, MemTxAttrs attrs)
6658ffb8 1902{
66b9b43c
PM
1903 MemTxResult res;
1904 uint64_t data;
1905
1906 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
1ec9b909 1907 switch (size) {
66b9b43c
PM
1908 case 1:
1909 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
1910 break;
1911 case 2:
1912 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
1913 break;
1914 case 4:
1915 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
1916 break;
1ec9b909
AK
1917 default: abort();
1918 }
66b9b43c
PM
1919 *pdata = data;
1920 return res;
6658ffb8
PB
1921}
1922
66b9b43c
PM
1923static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
1924 uint64_t val, unsigned size,
1925 MemTxAttrs attrs)
6658ffb8 1926{
66b9b43c
PM
1927 MemTxResult res;
1928
1929 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1ec9b909 1930 switch (size) {
67364150 1931 case 1:
66b9b43c 1932 address_space_stb(&address_space_memory, addr, val, attrs, &res);
67364150
MF
1933 break;
1934 case 2:
66b9b43c 1935 address_space_stw(&address_space_memory, addr, val, attrs, &res);
67364150
MF
1936 break;
1937 case 4:
66b9b43c 1938 address_space_stl(&address_space_memory, addr, val, attrs, &res);
67364150 1939 break;
1ec9b909
AK
1940 default: abort();
1941 }
66b9b43c 1942 return res;
6658ffb8
PB
1943}
1944
1ec9b909 1945static const MemoryRegionOps watch_mem_ops = {
66b9b43c
PM
1946 .read_with_attrs = watch_mem_read,
1947 .write_with_attrs = watch_mem_write,
1ec9b909 1948 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1949};
6658ffb8 1950
f25a49e0
PM
1951static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
1952 unsigned len, MemTxAttrs attrs)
db7b5426 1953{
acc9d80b 1954 subpage_t *subpage = opaque;
ff6cff75 1955 uint8_t buf[8];
5c9eb028 1956 MemTxResult res;
791af8c8 1957
db7b5426 1958#if defined(DEBUG_SUBPAGE)
016e9d62 1959 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1960 subpage, len, addr);
db7b5426 1961#endif
5c9eb028
PM
1962 res = address_space_read(subpage->as, addr + subpage->base,
1963 attrs, buf, len);
1964 if (res) {
1965 return res;
f25a49e0 1966 }
acc9d80b
JK
1967 switch (len) {
1968 case 1:
f25a49e0
PM
1969 *data = ldub_p(buf);
1970 return MEMTX_OK;
acc9d80b 1971 case 2:
f25a49e0
PM
1972 *data = lduw_p(buf);
1973 return MEMTX_OK;
acc9d80b 1974 case 4:
f25a49e0
PM
1975 *data = ldl_p(buf);
1976 return MEMTX_OK;
ff6cff75 1977 case 8:
f25a49e0
PM
1978 *data = ldq_p(buf);
1979 return MEMTX_OK;
acc9d80b
JK
1980 default:
1981 abort();
1982 }
db7b5426
BS
1983}
1984
f25a49e0
PM
1985static MemTxResult subpage_write(void *opaque, hwaddr addr,
1986 uint64_t value, unsigned len, MemTxAttrs attrs)
db7b5426 1987{
acc9d80b 1988 subpage_t *subpage = opaque;
ff6cff75 1989 uint8_t buf[8];
acc9d80b 1990
db7b5426 1991#if defined(DEBUG_SUBPAGE)
016e9d62 1992 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1993 " value %"PRIx64"\n",
1994 __func__, subpage, len, addr, value);
db7b5426 1995#endif
acc9d80b
JK
1996 switch (len) {
1997 case 1:
1998 stb_p(buf, value);
1999 break;
2000 case 2:
2001 stw_p(buf, value);
2002 break;
2003 case 4:
2004 stl_p(buf, value);
2005 break;
ff6cff75
PB
2006 case 8:
2007 stq_p(buf, value);
2008 break;
acc9d80b
JK
2009 default:
2010 abort();
2011 }
5c9eb028
PM
2012 return address_space_write(subpage->as, addr + subpage->base,
2013 attrs, buf, len);
db7b5426
BS
2014}
2015
c353e4cc 2016static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 2017 unsigned len, bool is_write)
c353e4cc 2018{
acc9d80b 2019 subpage_t *subpage = opaque;
c353e4cc 2020#if defined(DEBUG_SUBPAGE)
016e9d62 2021 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 2022 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
2023#endif
2024
acc9d80b 2025 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 2026 len, is_write);
c353e4cc
PB
2027}
2028
70c68e44 2029static const MemoryRegionOps subpage_ops = {
f25a49e0
PM
2030 .read_with_attrs = subpage_read,
2031 .write_with_attrs = subpage_write,
ff6cff75
PB
2032 .impl.min_access_size = 1,
2033 .impl.max_access_size = 8,
2034 .valid.min_access_size = 1,
2035 .valid.max_access_size = 8,
c353e4cc 2036 .valid.accepts = subpage_accepts,
70c68e44 2037 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
2038};
2039
c227f099 2040static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2041 uint16_t section)
db7b5426
BS
2042{
2043 int idx, eidx;
2044
2045 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2046 return -1;
2047 idx = SUBPAGE_IDX(start);
2048 eidx = SUBPAGE_IDX(end);
2049#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2050 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2051 __func__, mmio, start, end, idx, eidx, section);
db7b5426 2052#endif
db7b5426 2053 for (; idx <= eidx; idx++) {
5312bd8b 2054 mmio->sub_section[idx] = section;
db7b5426
BS
2055 }
2056
2057 return 0;
2058}
2059
acc9d80b 2060static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 2061{
c227f099 2062 subpage_t *mmio;
db7b5426 2063
7267c094 2064 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 2065
acc9d80b 2066 mmio->as = as;
1eec614b 2067 mmio->base = base;
2c9b15ca 2068 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 2069 NULL, TARGET_PAGE_SIZE);
b3b00c78 2070 mmio->iomem.subpage = true;
db7b5426 2071#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2072 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2073 mmio, base, TARGET_PAGE_SIZE);
db7b5426 2074#endif
b41aac4f 2075 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
2076
2077 return mmio;
2078}
2079
a656e22f
PC
2080static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2081 MemoryRegion *mr)
5312bd8b 2082{
a656e22f 2083 assert(as);
5312bd8b 2084 MemoryRegionSection section = {
a656e22f 2085 .address_space = as,
5312bd8b
AK
2086 .mr = mr,
2087 .offset_within_address_space = 0,
2088 .offset_within_region = 0,
052e87b0 2089 .size = int128_2_64(),
5312bd8b
AK
2090 };
2091
53cb28cb 2092 return phys_section_add(map, &section);
5312bd8b
AK
2093}
2094
9d82b5a7 2095MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
aa102231 2096{
79e2b9ae
PB
2097 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2098 MemoryRegionSection *sections = d->map.sections;
9d82b5a7
PB
2099
2100 return sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
2101}
2102
e9179ce1
AK
2103static void io_mem_init(void)
2104{
1f6245e5 2105 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 2106 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 2107 NULL, UINT64_MAX);
2c9b15ca 2108 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 2109 NULL, UINT64_MAX);
2c9b15ca 2110 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 2111 NULL, UINT64_MAX);
e9179ce1
AK
2112}
2113
ac1970fb 2114static void mem_begin(MemoryListener *listener)
00752703
PB
2115{
2116 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
2117 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2118 uint16_t n;
2119
a656e22f 2120 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 2121 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 2122 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 2123 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 2124 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 2125 assert(n == PHYS_SECTION_ROM);
a656e22f 2126 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 2127 assert(n == PHYS_SECTION_WATCH);
00752703 2128
9736e55b 2129 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
2130 d->as = as;
2131 as->next_dispatch = d;
2132}
2133
79e2b9ae
PB
2134static void address_space_dispatch_free(AddressSpaceDispatch *d)
2135{
2136 phys_sections_free(&d->map);
2137 g_free(d);
2138}
2139
00752703 2140static void mem_commit(MemoryListener *listener)
ac1970fb 2141{
89ae337a 2142 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
2143 AddressSpaceDispatch *cur = as->dispatch;
2144 AddressSpaceDispatch *next = as->next_dispatch;
2145
53cb28cb 2146 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 2147
79e2b9ae 2148 atomic_rcu_set(&as->dispatch, next);
53cb28cb 2149 if (cur) {
79e2b9ae 2150 call_rcu(cur, address_space_dispatch_free, rcu);
53cb28cb 2151 }
9affd6fc
PB
2152}
2153
1d71148e 2154static void tcg_commit(MemoryListener *listener)
50c1e149 2155{
182735ef 2156 CPUState *cpu;
117712c3
AK
2157
2158 /* since each CPU stores ram addresses in its TLB cache, we must
2159 reset the modified entries */
2160 /* XXX: slow ! */
bdc44640 2161 CPU_FOREACH(cpu) {
33bde2e1
EI
2162 /* FIXME: Disentangle the cpu.h circular files deps so we can
2163 directly get the right CPU from listener. */
2164 if (cpu->tcg_as_listener != listener) {
2165 continue;
2166 }
76e5c76f 2167 cpu_reload_memory_map(cpu);
117712c3 2168 }
50c1e149
AK
2169}
2170
ac1970fb
AK
2171void address_space_init_dispatch(AddressSpace *as)
2172{
00752703 2173 as->dispatch = NULL;
89ae337a 2174 as->dispatch_listener = (MemoryListener) {
ac1970fb 2175 .begin = mem_begin,
00752703 2176 .commit = mem_commit,
ac1970fb
AK
2177 .region_add = mem_add,
2178 .region_nop = mem_add,
2179 .priority = 0,
2180 };
89ae337a 2181 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
2182}
2183
6e48e8f9
PB
2184void address_space_unregister(AddressSpace *as)
2185{
2186 memory_listener_unregister(&as->dispatch_listener);
2187}
2188
83f3c251
AK
2189void address_space_destroy_dispatch(AddressSpace *as)
2190{
2191 AddressSpaceDispatch *d = as->dispatch;
2192
79e2b9ae
PB
2193 atomic_rcu_set(&as->dispatch, NULL);
2194 if (d) {
2195 call_rcu(d, address_space_dispatch_free, rcu);
2196 }
83f3c251
AK
2197}
2198
62152b8a
AK
2199static void memory_map_init(void)
2200{
7267c094 2201 system_memory = g_malloc(sizeof(*system_memory));
03f49957 2202
57271d63 2203 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2204 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2205
7267c094 2206 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2207 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2208 65536);
7dca8043 2209 address_space_init(&address_space_io, system_io, "I/O");
62152b8a
AK
2210}
2211
2212MemoryRegion *get_system_memory(void)
2213{
2214 return system_memory;
2215}
2216
309cb471
AK
2217MemoryRegion *get_system_io(void)
2218{
2219 return system_io;
2220}
2221
e2eef170
PB
2222#endif /* !defined(CONFIG_USER_ONLY) */
2223
13eb76e0
FB
2224/* physical memory access (slow version, mainly for debug) */
2225#if defined(CONFIG_USER_ONLY)
f17ec444 2226int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2227 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2228{
2229 int l, flags;
2230 target_ulong page;
53a5960a 2231 void * p;
13eb76e0
FB
2232
2233 while (len > 0) {
2234 page = addr & TARGET_PAGE_MASK;
2235 l = (page + TARGET_PAGE_SIZE) - addr;
2236 if (l > len)
2237 l = len;
2238 flags = page_get_flags(page);
2239 if (!(flags & PAGE_VALID))
a68fe89c 2240 return -1;
13eb76e0
FB
2241 if (is_write) {
2242 if (!(flags & PAGE_WRITE))
a68fe89c 2243 return -1;
579a97f7 2244 /* XXX: this code should not depend on lock_user */
72fb7daa 2245 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2246 return -1;
72fb7daa
AJ
2247 memcpy(p, buf, l);
2248 unlock_user(p, addr, l);
13eb76e0
FB
2249 } else {
2250 if (!(flags & PAGE_READ))
a68fe89c 2251 return -1;
579a97f7 2252 /* XXX: this code should not depend on lock_user */
72fb7daa 2253 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2254 return -1;
72fb7daa 2255 memcpy(buf, p, l);
5b257578 2256 unlock_user(p, addr, 0);
13eb76e0
FB
2257 }
2258 len -= l;
2259 buf += l;
2260 addr += l;
2261 }
a68fe89c 2262 return 0;
13eb76e0 2263}
8df1cd07 2264
13eb76e0 2265#else
51d7a9eb 2266
845b6214 2267static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
a8170e5e 2268 hwaddr length)
51d7a9eb 2269{
e87f7778
PB
2270 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2271 /* No early return if dirty_log_mask is or becomes 0, because
2272 * cpu_physical_memory_set_dirty_range will still call
2273 * xen_modified_memory.
2274 */
2275 if (dirty_log_mask) {
2276 dirty_log_mask =
2277 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2278 }
2279 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2280 tb_invalidate_phys_range(addr, addr + length);
2281 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
51d7a9eb 2282 }
e87f7778 2283 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
51d7a9eb
AP
2284}
2285
23326164 2286static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2287{
e1622f4b 2288 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2289
2290 /* Regions are assumed to support 1-4 byte accesses unless
2291 otherwise specified. */
23326164
RH
2292 if (access_size_max == 0) {
2293 access_size_max = 4;
2294 }
2295
2296 /* Bound the maximum access by the alignment of the address. */
2297 if (!mr->ops->impl.unaligned) {
2298 unsigned align_size_max = addr & -addr;
2299 if (align_size_max != 0 && align_size_max < access_size_max) {
2300 access_size_max = align_size_max;
2301 }
82f2563f 2302 }
23326164
RH
2303
2304 /* Don't attempt accesses larger than the maximum. */
2305 if (l > access_size_max) {
2306 l = access_size_max;
82f2563f 2307 }
098178f2
PB
2308 if (l & (l - 1)) {
2309 l = 1 << (qemu_fls(l) - 1);
2310 }
23326164
RH
2311
2312 return l;
82f2563f
PB
2313}
2314
5c9eb028
PM
2315MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2316 uint8_t *buf, int len, bool is_write)
13eb76e0 2317{
149f54b5 2318 hwaddr l;
13eb76e0 2319 uint8_t *ptr;
791af8c8 2320 uint64_t val;
149f54b5 2321 hwaddr addr1;
5c8a00ce 2322 MemoryRegion *mr;
3b643495 2323 MemTxResult result = MEMTX_OK;
3b46e624 2324
41063e1e 2325 rcu_read_lock();
13eb76e0 2326 while (len > 0) {
149f54b5 2327 l = len;
5c8a00ce 2328 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2329
13eb76e0 2330 if (is_write) {
5c8a00ce
PB
2331 if (!memory_access_is_direct(mr, is_write)) {
2332 l = memory_access_size(mr, l, addr1);
4917cf44 2333 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2334 potential bugs */
23326164
RH
2335 switch (l) {
2336 case 8:
2337 /* 64 bit write access */
2338 val = ldq_p(buf);
3b643495
PM
2339 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2340 attrs);
23326164
RH
2341 break;
2342 case 4:
1c213d19 2343 /* 32 bit write access */
c27004ec 2344 val = ldl_p(buf);
3b643495
PM
2345 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2346 attrs);
23326164
RH
2347 break;
2348 case 2:
1c213d19 2349 /* 16 bit write access */
c27004ec 2350 val = lduw_p(buf);
3b643495
PM
2351 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2352 attrs);
23326164
RH
2353 break;
2354 case 1:
1c213d19 2355 /* 8 bit write access */
c27004ec 2356 val = ldub_p(buf);
3b643495
PM
2357 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2358 attrs);
23326164
RH
2359 break;
2360 default:
2361 abort();
13eb76e0 2362 }
2bbfa05d 2363 } else {
5c8a00ce 2364 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2365 /* RAM case */
5579c7f3 2366 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2367 memcpy(ptr, buf, l);
845b6214 2368 invalidate_and_set_dirty(mr, addr1, l);
13eb76e0
FB
2369 }
2370 } else {
5c8a00ce 2371 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2372 /* I/O case */
5c8a00ce 2373 l = memory_access_size(mr, l, addr1);
23326164
RH
2374 switch (l) {
2375 case 8:
2376 /* 64 bit read access */
3b643495
PM
2377 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2378 attrs);
23326164
RH
2379 stq_p(buf, val);
2380 break;
2381 case 4:
13eb76e0 2382 /* 32 bit read access */
3b643495
PM
2383 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2384 attrs);
c27004ec 2385 stl_p(buf, val);
23326164
RH
2386 break;
2387 case 2:
13eb76e0 2388 /* 16 bit read access */
3b643495
PM
2389 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2390 attrs);
c27004ec 2391 stw_p(buf, val);
23326164
RH
2392 break;
2393 case 1:
1c213d19 2394 /* 8 bit read access */
3b643495
PM
2395 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2396 attrs);
c27004ec 2397 stb_p(buf, val);
23326164
RH
2398 break;
2399 default:
2400 abort();
13eb76e0
FB
2401 }
2402 } else {
2403 /* RAM case */
5c8a00ce 2404 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2405 memcpy(buf, ptr, l);
13eb76e0
FB
2406 }
2407 }
2408 len -= l;
2409 buf += l;
2410 addr += l;
2411 }
41063e1e 2412 rcu_read_unlock();
fd8aaa76 2413
3b643495 2414 return result;
13eb76e0 2415}
8df1cd07 2416
5c9eb028
PM
2417MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2418 const uint8_t *buf, int len)
ac1970fb 2419{
5c9eb028 2420 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
ac1970fb
AK
2421}
2422
5c9eb028
PM
2423MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2424 uint8_t *buf, int len)
ac1970fb 2425{
5c9eb028 2426 return address_space_rw(as, addr, attrs, buf, len, false);
ac1970fb
AK
2427}
2428
2429
a8170e5e 2430void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2431 int len, int is_write)
2432{
5c9eb028
PM
2433 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2434 buf, len, is_write);
ac1970fb
AK
2435}
2436
582b55a9
AG
2437enum write_rom_type {
2438 WRITE_DATA,
2439 FLUSH_CACHE,
2440};
2441
2a221651 2442static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2443 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2444{
149f54b5 2445 hwaddr l;
d0ecd2aa 2446 uint8_t *ptr;
149f54b5 2447 hwaddr addr1;
5c8a00ce 2448 MemoryRegion *mr;
3b46e624 2449
41063e1e 2450 rcu_read_lock();
d0ecd2aa 2451 while (len > 0) {
149f54b5 2452 l = len;
2a221651 2453 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2454
5c8a00ce
PB
2455 if (!(memory_region_is_ram(mr) ||
2456 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2457 /* do nothing */
2458 } else {
5c8a00ce 2459 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2460 /* ROM/RAM case */
5579c7f3 2461 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2462 switch (type) {
2463 case WRITE_DATA:
2464 memcpy(ptr, buf, l);
845b6214 2465 invalidate_and_set_dirty(mr, addr1, l);
582b55a9
AG
2466 break;
2467 case FLUSH_CACHE:
2468 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2469 break;
2470 }
d0ecd2aa
FB
2471 }
2472 len -= l;
2473 buf += l;
2474 addr += l;
2475 }
41063e1e 2476 rcu_read_unlock();
d0ecd2aa
FB
2477}
2478
582b55a9 2479/* used for ROM loading : can write in RAM and ROM */
2a221651 2480void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2481 const uint8_t *buf, int len)
2482{
2a221651 2483 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2484}
2485
2486void cpu_flush_icache_range(hwaddr start, int len)
2487{
2488 /*
2489 * This function should do the same thing as an icache flush that was
2490 * triggered from within the guest. For TCG we are always cache coherent,
2491 * so there is no need to flush anything. For KVM / Xen we need to flush
2492 * the host's instruction cache at least.
2493 */
2494 if (tcg_enabled()) {
2495 return;
2496 }
2497
2a221651
EI
2498 cpu_physical_memory_write_rom_internal(&address_space_memory,
2499 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2500}
2501
6d16c2f8 2502typedef struct {
d3e71559 2503 MemoryRegion *mr;
6d16c2f8 2504 void *buffer;
a8170e5e
AK
2505 hwaddr addr;
2506 hwaddr len;
c2cba0ff 2507 bool in_use;
6d16c2f8
AL
2508} BounceBuffer;
2509
2510static BounceBuffer bounce;
2511
ba223c29 2512typedef struct MapClient {
e95205e1 2513 QEMUBH *bh;
72cf2d4f 2514 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2515} MapClient;
2516
38e047b5 2517QemuMutex map_client_list_lock;
72cf2d4f
BS
2518static QLIST_HEAD(map_client_list, MapClient) map_client_list
2519 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29 2520
e95205e1
FZ
2521static void cpu_unregister_map_client_do(MapClient *client)
2522{
2523 QLIST_REMOVE(client, link);
2524 g_free(client);
2525}
2526
33b6c2ed
FZ
2527static void cpu_notify_map_clients_locked(void)
2528{
2529 MapClient *client;
2530
2531 while (!QLIST_EMPTY(&map_client_list)) {
2532 client = QLIST_FIRST(&map_client_list);
e95205e1
FZ
2533 qemu_bh_schedule(client->bh);
2534 cpu_unregister_map_client_do(client);
33b6c2ed
FZ
2535 }
2536}
2537
e95205e1 2538void cpu_register_map_client(QEMUBH *bh)
ba223c29 2539{
7267c094 2540 MapClient *client = g_malloc(sizeof(*client));
ba223c29 2541
38e047b5 2542 qemu_mutex_lock(&map_client_list_lock);
e95205e1 2543 client->bh = bh;
72cf2d4f 2544 QLIST_INSERT_HEAD(&map_client_list, client, link);
33b6c2ed
FZ
2545 if (!atomic_read(&bounce.in_use)) {
2546 cpu_notify_map_clients_locked();
2547 }
38e047b5 2548 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2549}
2550
38e047b5 2551void cpu_exec_init_all(void)
ba223c29 2552{
38e047b5
FZ
2553 qemu_mutex_init(&ram_list.mutex);
2554 memory_map_init();
2555 io_mem_init();
2556 qemu_mutex_init(&map_client_list_lock);
ba223c29
AL
2557}
2558
e95205e1 2559void cpu_unregister_map_client(QEMUBH *bh)
ba223c29
AL
2560{
2561 MapClient *client;
2562
e95205e1
FZ
2563 qemu_mutex_lock(&map_client_list_lock);
2564 QLIST_FOREACH(client, &map_client_list, link) {
2565 if (client->bh == bh) {
2566 cpu_unregister_map_client_do(client);
2567 break;
2568 }
ba223c29 2569 }
e95205e1 2570 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2571}
2572
2573static void cpu_notify_map_clients(void)
2574{
38e047b5 2575 qemu_mutex_lock(&map_client_list_lock);
33b6c2ed 2576 cpu_notify_map_clients_locked();
38e047b5 2577 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2578}
2579
51644ab7
PB
2580bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2581{
5c8a00ce 2582 MemoryRegion *mr;
51644ab7
PB
2583 hwaddr l, xlat;
2584
41063e1e 2585 rcu_read_lock();
51644ab7
PB
2586 while (len > 0) {
2587 l = len;
5c8a00ce
PB
2588 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2589 if (!memory_access_is_direct(mr, is_write)) {
2590 l = memory_access_size(mr, l, addr);
2591 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2592 return false;
2593 }
2594 }
2595
2596 len -= l;
2597 addr += l;
2598 }
41063e1e 2599 rcu_read_unlock();
51644ab7
PB
2600 return true;
2601}
2602
6d16c2f8
AL
2603/* Map a physical memory region into a host virtual address.
2604 * May map a subset of the requested range, given by and returned in *plen.
2605 * May return NULL if resources needed to perform the mapping are exhausted.
2606 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2607 * Use cpu_register_map_client() to know when retrying the map operation is
2608 * likely to succeed.
6d16c2f8 2609 */
ac1970fb 2610void *address_space_map(AddressSpace *as,
a8170e5e
AK
2611 hwaddr addr,
2612 hwaddr *plen,
ac1970fb 2613 bool is_write)
6d16c2f8 2614{
a8170e5e 2615 hwaddr len = *plen;
e3127ae0
PB
2616 hwaddr done = 0;
2617 hwaddr l, xlat, base;
2618 MemoryRegion *mr, *this_mr;
2619 ram_addr_t raddr;
6d16c2f8 2620
e3127ae0
PB
2621 if (len == 0) {
2622 return NULL;
2623 }
38bee5dc 2624
e3127ae0 2625 l = len;
41063e1e 2626 rcu_read_lock();
e3127ae0 2627 mr = address_space_translate(as, addr, &xlat, &l, is_write);
41063e1e 2628
e3127ae0 2629 if (!memory_access_is_direct(mr, is_write)) {
c2cba0ff 2630 if (atomic_xchg(&bounce.in_use, true)) {
41063e1e 2631 rcu_read_unlock();
e3127ae0 2632 return NULL;
6d16c2f8 2633 }
e85d9db5
KW
2634 /* Avoid unbounded allocations */
2635 l = MIN(l, TARGET_PAGE_SIZE);
2636 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2637 bounce.addr = addr;
2638 bounce.len = l;
d3e71559
PB
2639
2640 memory_region_ref(mr);
2641 bounce.mr = mr;
e3127ae0 2642 if (!is_write) {
5c9eb028
PM
2643 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2644 bounce.buffer, l);
8ab934f9 2645 }
6d16c2f8 2646
41063e1e 2647 rcu_read_unlock();
e3127ae0
PB
2648 *plen = l;
2649 return bounce.buffer;
2650 }
2651
2652 base = xlat;
2653 raddr = memory_region_get_ram_addr(mr);
2654
2655 for (;;) {
6d16c2f8
AL
2656 len -= l;
2657 addr += l;
e3127ae0
PB
2658 done += l;
2659 if (len == 0) {
2660 break;
2661 }
2662
2663 l = len;
2664 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2665 if (this_mr != mr || xlat != base + done) {
2666 break;
2667 }
6d16c2f8 2668 }
e3127ae0 2669
d3e71559 2670 memory_region_ref(mr);
41063e1e 2671 rcu_read_unlock();
e3127ae0
PB
2672 *plen = done;
2673 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2674}
2675
ac1970fb 2676/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2677 * Will also mark the memory as dirty if is_write == 1. access_len gives
2678 * the amount of memory that was actually read or written by the caller.
2679 */
a8170e5e
AK
2680void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2681 int is_write, hwaddr access_len)
6d16c2f8
AL
2682{
2683 if (buffer != bounce.buffer) {
d3e71559
PB
2684 MemoryRegion *mr;
2685 ram_addr_t addr1;
2686
2687 mr = qemu_ram_addr_from_host(buffer, &addr1);
2688 assert(mr != NULL);
6d16c2f8 2689 if (is_write) {
845b6214 2690 invalidate_and_set_dirty(mr, addr1, access_len);
6d16c2f8 2691 }
868bb33f 2692 if (xen_enabled()) {
e41d7c69 2693 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2694 }
d3e71559 2695 memory_region_unref(mr);
6d16c2f8
AL
2696 return;
2697 }
2698 if (is_write) {
5c9eb028
PM
2699 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2700 bounce.buffer, access_len);
6d16c2f8 2701 }
f8a83245 2702 qemu_vfree(bounce.buffer);
6d16c2f8 2703 bounce.buffer = NULL;
d3e71559 2704 memory_region_unref(bounce.mr);
c2cba0ff 2705 atomic_mb_set(&bounce.in_use, false);
ba223c29 2706 cpu_notify_map_clients();
6d16c2f8 2707}
d0ecd2aa 2708
a8170e5e
AK
2709void *cpu_physical_memory_map(hwaddr addr,
2710 hwaddr *plen,
ac1970fb
AK
2711 int is_write)
2712{
2713 return address_space_map(&address_space_memory, addr, plen, is_write);
2714}
2715
a8170e5e
AK
2716void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2717 int is_write, hwaddr access_len)
ac1970fb
AK
2718{
2719 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2720}
2721
8df1cd07 2722/* warning: addr must be aligned */
50013115
PM
2723static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2724 MemTxAttrs attrs,
2725 MemTxResult *result,
2726 enum device_endian endian)
8df1cd07 2727{
8df1cd07 2728 uint8_t *ptr;
791af8c8 2729 uint64_t val;
5c8a00ce 2730 MemoryRegion *mr;
149f54b5
PB
2731 hwaddr l = 4;
2732 hwaddr addr1;
50013115 2733 MemTxResult r;
8df1cd07 2734
41063e1e 2735 rcu_read_lock();
fdfba1a2 2736 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2737 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2738 /* I/O case */
50013115 2739 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
1e78bcc1
AG
2740#if defined(TARGET_WORDS_BIGENDIAN)
2741 if (endian == DEVICE_LITTLE_ENDIAN) {
2742 val = bswap32(val);
2743 }
2744#else
2745 if (endian == DEVICE_BIG_ENDIAN) {
2746 val = bswap32(val);
2747 }
2748#endif
8df1cd07
FB
2749 } else {
2750 /* RAM case */
5c8a00ce 2751 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2752 & TARGET_PAGE_MASK)
149f54b5 2753 + addr1);
1e78bcc1
AG
2754 switch (endian) {
2755 case DEVICE_LITTLE_ENDIAN:
2756 val = ldl_le_p(ptr);
2757 break;
2758 case DEVICE_BIG_ENDIAN:
2759 val = ldl_be_p(ptr);
2760 break;
2761 default:
2762 val = ldl_p(ptr);
2763 break;
2764 }
50013115
PM
2765 r = MEMTX_OK;
2766 }
2767 if (result) {
2768 *result = r;
8df1cd07 2769 }
41063e1e 2770 rcu_read_unlock();
8df1cd07
FB
2771 return val;
2772}
2773
50013115
PM
2774uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2775 MemTxAttrs attrs, MemTxResult *result)
2776{
2777 return address_space_ldl_internal(as, addr, attrs, result,
2778 DEVICE_NATIVE_ENDIAN);
2779}
2780
2781uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2782 MemTxAttrs attrs, MemTxResult *result)
2783{
2784 return address_space_ldl_internal(as, addr, attrs, result,
2785 DEVICE_LITTLE_ENDIAN);
2786}
2787
2788uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2789 MemTxAttrs attrs, MemTxResult *result)
2790{
2791 return address_space_ldl_internal(as, addr, attrs, result,
2792 DEVICE_BIG_ENDIAN);
2793}
2794
fdfba1a2 2795uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2796{
50013115 2797 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2798}
2799
fdfba1a2 2800uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2801{
50013115 2802 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2803}
2804
fdfba1a2 2805uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2806{
50013115 2807 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2808}
2809
84b7b8e7 2810/* warning: addr must be aligned */
50013115
PM
2811static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2812 MemTxAttrs attrs,
2813 MemTxResult *result,
2814 enum device_endian endian)
84b7b8e7 2815{
84b7b8e7
FB
2816 uint8_t *ptr;
2817 uint64_t val;
5c8a00ce 2818 MemoryRegion *mr;
149f54b5
PB
2819 hwaddr l = 8;
2820 hwaddr addr1;
50013115 2821 MemTxResult r;
84b7b8e7 2822
41063e1e 2823 rcu_read_lock();
2c17449b 2824 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2825 false);
2826 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2827 /* I/O case */
50013115 2828 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
968a5627
PB
2829#if defined(TARGET_WORDS_BIGENDIAN)
2830 if (endian == DEVICE_LITTLE_ENDIAN) {
2831 val = bswap64(val);
2832 }
2833#else
2834 if (endian == DEVICE_BIG_ENDIAN) {
2835 val = bswap64(val);
2836 }
84b7b8e7
FB
2837#endif
2838 } else {
2839 /* RAM case */
5c8a00ce 2840 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2841 & TARGET_PAGE_MASK)
149f54b5 2842 + addr1);
1e78bcc1
AG
2843 switch (endian) {
2844 case DEVICE_LITTLE_ENDIAN:
2845 val = ldq_le_p(ptr);
2846 break;
2847 case DEVICE_BIG_ENDIAN:
2848 val = ldq_be_p(ptr);
2849 break;
2850 default:
2851 val = ldq_p(ptr);
2852 break;
2853 }
50013115
PM
2854 r = MEMTX_OK;
2855 }
2856 if (result) {
2857 *result = r;
84b7b8e7 2858 }
41063e1e 2859 rcu_read_unlock();
84b7b8e7
FB
2860 return val;
2861}
2862
50013115
PM
2863uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2864 MemTxAttrs attrs, MemTxResult *result)
2865{
2866 return address_space_ldq_internal(as, addr, attrs, result,
2867 DEVICE_NATIVE_ENDIAN);
2868}
2869
2870uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
2871 MemTxAttrs attrs, MemTxResult *result)
2872{
2873 return address_space_ldq_internal(as, addr, attrs, result,
2874 DEVICE_LITTLE_ENDIAN);
2875}
2876
2877uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
2878 MemTxAttrs attrs, MemTxResult *result)
2879{
2880 return address_space_ldq_internal(as, addr, attrs, result,
2881 DEVICE_BIG_ENDIAN);
2882}
2883
2c17449b 2884uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2885{
50013115 2886 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2887}
2888
2c17449b 2889uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2890{
50013115 2891 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2892}
2893
2c17449b 2894uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2895{
50013115 2896 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2897}
2898
aab33094 2899/* XXX: optimize */
50013115
PM
2900uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
2901 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
2902{
2903 uint8_t val;
50013115
PM
2904 MemTxResult r;
2905
2906 r = address_space_rw(as, addr, attrs, &val, 1, 0);
2907 if (result) {
2908 *result = r;
2909 }
aab33094
FB
2910 return val;
2911}
2912
50013115
PM
2913uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2914{
2915 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
2916}
2917
733f0b02 2918/* warning: addr must be aligned */
50013115
PM
2919static inline uint32_t address_space_lduw_internal(AddressSpace *as,
2920 hwaddr addr,
2921 MemTxAttrs attrs,
2922 MemTxResult *result,
2923 enum device_endian endian)
aab33094 2924{
733f0b02
MT
2925 uint8_t *ptr;
2926 uint64_t val;
5c8a00ce 2927 MemoryRegion *mr;
149f54b5
PB
2928 hwaddr l = 2;
2929 hwaddr addr1;
50013115 2930 MemTxResult r;
733f0b02 2931
41063e1e 2932 rcu_read_lock();
41701aa4 2933 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2934 false);
2935 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2936 /* I/O case */
50013115 2937 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
1e78bcc1
AG
2938#if defined(TARGET_WORDS_BIGENDIAN)
2939 if (endian == DEVICE_LITTLE_ENDIAN) {
2940 val = bswap16(val);
2941 }
2942#else
2943 if (endian == DEVICE_BIG_ENDIAN) {
2944 val = bswap16(val);
2945 }
2946#endif
733f0b02
MT
2947 } else {
2948 /* RAM case */
5c8a00ce 2949 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2950 & TARGET_PAGE_MASK)
149f54b5 2951 + addr1);
1e78bcc1
AG
2952 switch (endian) {
2953 case DEVICE_LITTLE_ENDIAN:
2954 val = lduw_le_p(ptr);
2955 break;
2956 case DEVICE_BIG_ENDIAN:
2957 val = lduw_be_p(ptr);
2958 break;
2959 default:
2960 val = lduw_p(ptr);
2961 break;
2962 }
50013115
PM
2963 r = MEMTX_OK;
2964 }
2965 if (result) {
2966 *result = r;
733f0b02 2967 }
41063e1e 2968 rcu_read_unlock();
733f0b02 2969 return val;
aab33094
FB
2970}
2971
50013115
PM
2972uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
2973 MemTxAttrs attrs, MemTxResult *result)
2974{
2975 return address_space_lduw_internal(as, addr, attrs, result,
2976 DEVICE_NATIVE_ENDIAN);
2977}
2978
2979uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
2980 MemTxAttrs attrs, MemTxResult *result)
2981{
2982 return address_space_lduw_internal(as, addr, attrs, result,
2983 DEVICE_LITTLE_ENDIAN);
2984}
2985
2986uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
2987 MemTxAttrs attrs, MemTxResult *result)
2988{
2989 return address_space_lduw_internal(as, addr, attrs, result,
2990 DEVICE_BIG_ENDIAN);
2991}
2992
41701aa4 2993uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2994{
50013115 2995 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2996}
2997
41701aa4 2998uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2999{
50013115 3000 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3001}
3002
41701aa4 3003uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3004{
50013115 3005 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3006}
3007
8df1cd07
FB
3008/* warning: addr must be aligned. The ram page is not masked as dirty
3009 and the code inside is not invalidated. It is useful if the dirty
3010 bits are used to track modified PTEs */
50013115
PM
3011void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3012 MemTxAttrs attrs, MemTxResult *result)
8df1cd07 3013{
8df1cd07 3014 uint8_t *ptr;
5c8a00ce 3015 MemoryRegion *mr;
149f54b5
PB
3016 hwaddr l = 4;
3017 hwaddr addr1;
50013115 3018 MemTxResult r;
845b6214 3019 uint8_t dirty_log_mask;
8df1cd07 3020
41063e1e 3021 rcu_read_lock();
2198a121 3022 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3023 true);
3024 if (l < 4 || !memory_access_is_direct(mr, true)) {
50013115 3025 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3026 } else {
5c8a00ce 3027 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3028 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3029 stl_p(ptr, val);
74576198 3030
845b6214
PB
3031 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3032 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
58d2707e 3033 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
50013115
PM
3034 r = MEMTX_OK;
3035 }
3036 if (result) {
3037 *result = r;
8df1cd07 3038 }
41063e1e 3039 rcu_read_unlock();
8df1cd07
FB
3040}
3041
50013115
PM
3042void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3043{
3044 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3045}
3046
8df1cd07 3047/* warning: addr must be aligned */
50013115
PM
3048static inline void address_space_stl_internal(AddressSpace *as,
3049 hwaddr addr, uint32_t val,
3050 MemTxAttrs attrs,
3051 MemTxResult *result,
3052 enum device_endian endian)
8df1cd07 3053{
8df1cd07 3054 uint8_t *ptr;
5c8a00ce 3055 MemoryRegion *mr;
149f54b5
PB
3056 hwaddr l = 4;
3057 hwaddr addr1;
50013115 3058 MemTxResult r;
8df1cd07 3059
41063e1e 3060 rcu_read_lock();
ab1da857 3061 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3062 true);
3063 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
3064#if defined(TARGET_WORDS_BIGENDIAN)
3065 if (endian == DEVICE_LITTLE_ENDIAN) {
3066 val = bswap32(val);
3067 }
3068#else
3069 if (endian == DEVICE_BIG_ENDIAN) {
3070 val = bswap32(val);
3071 }
3072#endif
50013115 3073 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3074 } else {
8df1cd07 3075 /* RAM case */
5c8a00ce 3076 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3077 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3078 switch (endian) {
3079 case DEVICE_LITTLE_ENDIAN:
3080 stl_le_p(ptr, val);
3081 break;
3082 case DEVICE_BIG_ENDIAN:
3083 stl_be_p(ptr, val);
3084 break;
3085 default:
3086 stl_p(ptr, val);
3087 break;
3088 }
845b6214 3089 invalidate_and_set_dirty(mr, addr1, 4);
50013115
PM
3090 r = MEMTX_OK;
3091 }
3092 if (result) {
3093 *result = r;
8df1cd07 3094 }
41063e1e 3095 rcu_read_unlock();
8df1cd07
FB
3096}
3097
50013115
PM
3098void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3099 MemTxAttrs attrs, MemTxResult *result)
3100{
3101 address_space_stl_internal(as, addr, val, attrs, result,
3102 DEVICE_NATIVE_ENDIAN);
3103}
3104
3105void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3106 MemTxAttrs attrs, MemTxResult *result)
3107{
3108 address_space_stl_internal(as, addr, val, attrs, result,
3109 DEVICE_LITTLE_ENDIAN);
3110}
3111
3112void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3113 MemTxAttrs attrs, MemTxResult *result)
3114{
3115 address_space_stl_internal(as, addr, val, attrs, result,
3116 DEVICE_BIG_ENDIAN);
3117}
3118
ab1da857 3119void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3120{
50013115 3121 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3122}
3123
ab1da857 3124void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3125{
50013115 3126 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3127}
3128
ab1da857 3129void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3130{
50013115 3131 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3132}
3133
aab33094 3134/* XXX: optimize */
50013115
PM
3135void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3136 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
3137{
3138 uint8_t v = val;
50013115
PM
3139 MemTxResult r;
3140
3141 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3142 if (result) {
3143 *result = r;
3144 }
3145}
3146
3147void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3148{
3149 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
aab33094
FB
3150}
3151
733f0b02 3152/* warning: addr must be aligned */
50013115
PM
3153static inline void address_space_stw_internal(AddressSpace *as,
3154 hwaddr addr, uint32_t val,
3155 MemTxAttrs attrs,
3156 MemTxResult *result,
3157 enum device_endian endian)
aab33094 3158{
733f0b02 3159 uint8_t *ptr;
5c8a00ce 3160 MemoryRegion *mr;
149f54b5
PB
3161 hwaddr l = 2;
3162 hwaddr addr1;
50013115 3163 MemTxResult r;
733f0b02 3164
41063e1e 3165 rcu_read_lock();
5ce5944d 3166 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 3167 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
3168#if defined(TARGET_WORDS_BIGENDIAN)
3169 if (endian == DEVICE_LITTLE_ENDIAN) {
3170 val = bswap16(val);
3171 }
3172#else
3173 if (endian == DEVICE_BIG_ENDIAN) {
3174 val = bswap16(val);
3175 }
3176#endif
50013115 3177 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
733f0b02 3178 } else {
733f0b02 3179 /* RAM case */
5c8a00ce 3180 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 3181 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3182 switch (endian) {
3183 case DEVICE_LITTLE_ENDIAN:
3184 stw_le_p(ptr, val);
3185 break;
3186 case DEVICE_BIG_ENDIAN:
3187 stw_be_p(ptr, val);
3188 break;
3189 default:
3190 stw_p(ptr, val);
3191 break;
3192 }
845b6214 3193 invalidate_and_set_dirty(mr, addr1, 2);
50013115
PM
3194 r = MEMTX_OK;
3195 }
3196 if (result) {
3197 *result = r;
733f0b02 3198 }
41063e1e 3199 rcu_read_unlock();
aab33094
FB
3200}
3201
50013115
PM
3202void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3203 MemTxAttrs attrs, MemTxResult *result)
3204{
3205 address_space_stw_internal(as, addr, val, attrs, result,
3206 DEVICE_NATIVE_ENDIAN);
3207}
3208
3209void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3210 MemTxAttrs attrs, MemTxResult *result)
3211{
3212 address_space_stw_internal(as, addr, val, attrs, result,
3213 DEVICE_LITTLE_ENDIAN);
3214}
3215
3216void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3217 MemTxAttrs attrs, MemTxResult *result)
3218{
3219 address_space_stw_internal(as, addr, val, attrs, result,
3220 DEVICE_BIG_ENDIAN);
3221}
3222
5ce5944d 3223void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3224{
50013115 3225 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3226}
3227
5ce5944d 3228void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3229{
50013115 3230 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3231}
3232
5ce5944d 3233void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3234{
50013115 3235 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3236}
3237
aab33094 3238/* XXX: optimize */
50013115
PM
3239void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3240 MemTxAttrs attrs, MemTxResult *result)
aab33094 3241{
50013115 3242 MemTxResult r;
aab33094 3243 val = tswap64(val);
50013115
PM
3244 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3245 if (result) {
3246 *result = r;
3247 }
aab33094
FB
3248}
3249
50013115
PM
3250void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3251 MemTxAttrs attrs, MemTxResult *result)
1e78bcc1 3252{
50013115 3253 MemTxResult r;
1e78bcc1 3254 val = cpu_to_le64(val);
50013115
PM
3255 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3256 if (result) {
3257 *result = r;
3258 }
3259}
3260void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3261 MemTxAttrs attrs, MemTxResult *result)
3262{
3263 MemTxResult r;
3264 val = cpu_to_be64(val);
3265 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3266 if (result) {
3267 *result = r;
3268 }
3269}
3270
3271void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3272{
3273 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3274}
3275
3276void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3277{
3278 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3279}
3280
f606604f 3281void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1 3282{
50013115 3283 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3284}
3285
5e2972fd 3286/* virtual memory access for debug (includes writing to ROM) */
f17ec444 3287int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 3288 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3289{
3290 int l;
a8170e5e 3291 hwaddr phys_addr;
9b3c35e0 3292 target_ulong page;
13eb76e0
FB
3293
3294 while (len > 0) {
3295 page = addr & TARGET_PAGE_MASK;
f17ec444 3296 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
3297 /* if no physical page mapped, return an error */
3298 if (phys_addr == -1)
3299 return -1;
3300 l = (page + TARGET_PAGE_SIZE) - addr;
3301 if (l > len)
3302 l = len;
5e2972fd 3303 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
3304 if (is_write) {
3305 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3306 } else {
5c9eb028
PM
3307 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3308 buf, l, 0);
2e38847b 3309 }
13eb76e0
FB
3310 len -= l;
3311 buf += l;
3312 addr += l;
3313 }
3314 return 0;
3315}
a68fe89c 3316#endif
13eb76e0 3317
8e4a424b
BS
3318/*
3319 * A helper function for the _utterly broken_ virtio device model to find out if
3320 * it's running on a big endian machine. Don't do this at home kids!
3321 */
98ed8ecf
GK
3322bool target_words_bigendian(void);
3323bool target_words_bigendian(void)
8e4a424b
BS
3324{
3325#if defined(TARGET_WORDS_BIGENDIAN)
3326 return true;
3327#else
3328 return false;
3329#endif
3330}
3331
76f35538 3332#ifndef CONFIG_USER_ONLY
a8170e5e 3333bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 3334{
5c8a00ce 3335 MemoryRegion*mr;
149f54b5 3336 hwaddr l = 1;
41063e1e 3337 bool res;
76f35538 3338
41063e1e 3339 rcu_read_lock();
5c8a00ce
PB
3340 mr = address_space_translate(&address_space_memory,
3341 phys_addr, &phys_addr, &l, false);
76f35538 3342
41063e1e
PB
3343 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3344 rcu_read_unlock();
3345 return res;
76f35538 3346}
bd2fa51f 3347
e3807054 3348int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
bd2fa51f
MH
3349{
3350 RAMBlock *block;
e3807054 3351 int ret = 0;
bd2fa51f 3352
0dc3f44a
MD
3353 rcu_read_lock();
3354 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
e3807054
DDAG
3355 ret = func(block->idstr, block->host, block->offset,
3356 block->used_length, opaque);
3357 if (ret) {
3358 break;
3359 }
bd2fa51f 3360 }
0dc3f44a 3361 rcu_read_unlock();
e3807054 3362 return ret;
bd2fa51f 3363}
ec3f8c99 3364#endif
This page took 1.462962 seconds and 4 git commands to generate.