cputlb: move CPU_LOOP() for tlb_reset() to exec.c
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
4485bd26 29#if !defined(CONFIG_USER_ONLY)
47c8ca53 30#include "hw/boards.h"
4485bd26 31#endif
cc9e98cb 32#include "hw/qdev.h"
1de7afc9 33#include "qemu/osdep.h"
9c17d615 34#include "sysemu/kvm.h"
2ff3de68 35#include "sysemu/sysemu.h"
0d09e41a 36#include "hw/xen/xen.h"
1de7afc9
PB
37#include "qemu/timer.h"
38#include "qemu/config-file.h"
75a34036 39#include "qemu/error-report.h"
022c62cb 40#include "exec/memory.h"
9c17d615 41#include "sysemu/dma.h"
022c62cb 42#include "exec/address-spaces.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
432d268c 45#else /* !CONFIG_USER_ONLY */
9c17d615 46#include "sysemu/xen-mapcache.h"
6506e4f9 47#include "trace.h"
53a5960a 48#endif
0d6d3c87 49#include "exec/cpu-all.h"
0dc3f44a 50#include "qemu/rcu_queue.h"
4840f10e 51#include "qemu/main-loop.h"
022c62cb 52#include "exec/cputlb.h"
5b6dd868 53#include "translate-all.h"
0cac1b66 54
022c62cb 55#include "exec/memory-internal.h"
220c3ebd 56#include "exec/ram_addr.h"
67d95c15 57
b35ba30f
MT
58#include "qemu/range.h"
59
db7b5426 60//#define DEBUG_SUBPAGE
1196be37 61
e2eef170 62#if !defined(CONFIG_USER_ONLY)
0dc3f44a
MD
63/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
64 * are protected by the ramlist lock.
65 */
0d53d9fe 66RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
67
68static MemoryRegion *system_memory;
309cb471 69static MemoryRegion *system_io;
62152b8a 70
f6790af6
AK
71AddressSpace address_space_io;
72AddressSpace address_space_memory;
2673a5da 73
0844e007 74MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 75static MemoryRegion io_mem_unassigned;
0e0df1e2 76
7bd4f430
PB
77/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
78#define RAM_PREALLOC (1 << 0)
79
dbcb8981
PB
80/* RAM is mmap-ed with MAP_SHARED */
81#define RAM_SHARED (1 << 1)
82
62be4e3a
MT
83/* Only a portion of RAM (used_length) is actually used, and migrated.
84 * This used_length size can change across reboots.
85 */
86#define RAM_RESIZEABLE (1 << 2)
87
e2eef170 88#endif
9fa3e853 89
bdc44640 90struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
91/* current CPU in the current thread. It is only valid inside
92 cpu_exec() */
f240eb6f 93__thread CPUState *current_cpu;
2e70f6ef 94/* 0 = Do not count executed instructions.
bf20dc07 95 1 = Precise instruction counting.
2e70f6ef 96 2 = Adaptive rate instruction counting. */
5708fc66 97int use_icount;
6a00d601 98
e2eef170 99#if !defined(CONFIG_USER_ONLY)
4346ae3e 100
1db8abb1
PB
101typedef struct PhysPageEntry PhysPageEntry;
102
103struct PhysPageEntry {
9736e55b 104 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 105 uint32_t skip : 6;
9736e55b 106 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 107 uint32_t ptr : 26;
1db8abb1
PB
108};
109
8b795765
MT
110#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
111
03f49957 112/* Size of the L2 (and L3, etc) page tables. */
57271d63 113#define ADDR_SPACE_BITS 64
03f49957 114
026736ce 115#define P_L2_BITS 9
03f49957
PB
116#define P_L2_SIZE (1 << P_L2_BITS)
117
118#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
119
120typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 121
53cb28cb 122typedef struct PhysPageMap {
79e2b9ae
PB
123 struct rcu_head rcu;
124
53cb28cb
MA
125 unsigned sections_nb;
126 unsigned sections_nb_alloc;
127 unsigned nodes_nb;
128 unsigned nodes_nb_alloc;
129 Node *nodes;
130 MemoryRegionSection *sections;
131} PhysPageMap;
132
1db8abb1 133struct AddressSpaceDispatch {
79e2b9ae
PB
134 struct rcu_head rcu;
135
1db8abb1
PB
136 /* This is a multi-level map on the physical address space.
137 * The bottom level has pointers to MemoryRegionSections.
138 */
139 PhysPageEntry phys_map;
53cb28cb 140 PhysPageMap map;
acc9d80b 141 AddressSpace *as;
1db8abb1
PB
142};
143
90260c6c
JK
144#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
145typedef struct subpage_t {
146 MemoryRegion iomem;
acc9d80b 147 AddressSpace *as;
90260c6c
JK
148 hwaddr base;
149 uint16_t sub_section[TARGET_PAGE_SIZE];
150} subpage_t;
151
b41aac4f
LPF
152#define PHYS_SECTION_UNASSIGNED 0
153#define PHYS_SECTION_NOTDIRTY 1
154#define PHYS_SECTION_ROM 2
155#define PHYS_SECTION_WATCH 3
5312bd8b 156
e2eef170 157static void io_mem_init(void);
62152b8a 158static void memory_map_init(void);
09daed84 159static void tcg_commit(MemoryListener *listener);
e2eef170 160
1ec9b909 161static MemoryRegion io_mem_watch;
6658ffb8 162#endif
fd6ce8f6 163
6d9a1304 164#if !defined(CONFIG_USER_ONLY)
d6f2ea22 165
53cb28cb 166static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 167{
53cb28cb
MA
168 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
169 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
170 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
171 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 172 }
f7bf5461
AK
173}
174
db94604b 175static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
f7bf5461
AK
176{
177 unsigned i;
8b795765 178 uint32_t ret;
db94604b
PB
179 PhysPageEntry e;
180 PhysPageEntry *p;
f7bf5461 181
53cb28cb 182 ret = map->nodes_nb++;
db94604b 183 p = map->nodes[ret];
f7bf5461 184 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 185 assert(ret != map->nodes_nb_alloc);
db94604b
PB
186
187 e.skip = leaf ? 0 : 1;
188 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
03f49957 189 for (i = 0; i < P_L2_SIZE; ++i) {
db94604b 190 memcpy(&p[i], &e, sizeof(e));
d6f2ea22 191 }
f7bf5461 192 return ret;
d6f2ea22
AK
193}
194
53cb28cb
MA
195static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
196 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 197 int level)
f7bf5461
AK
198{
199 PhysPageEntry *p;
03f49957 200 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 201
9736e55b 202 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
db94604b 203 lp->ptr = phys_map_node_alloc(map, level == 0);
92e873b9 204 }
db94604b 205 p = map->nodes[lp->ptr];
03f49957 206 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 207
03f49957 208 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 209 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 210 lp->skip = 0;
c19e8800 211 lp->ptr = leaf;
07f07b31
AK
212 *index += step;
213 *nb -= step;
2999097b 214 } else {
53cb28cb 215 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
216 }
217 ++lp;
f7bf5461
AK
218 }
219}
220
ac1970fb 221static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 222 hwaddr index, hwaddr nb,
2999097b 223 uint16_t leaf)
f7bf5461 224{
2999097b 225 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 226 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 227
53cb28cb 228 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
229}
230
b35ba30f
MT
231/* Compact a non leaf page entry. Simply detect that the entry has a single child,
232 * and update our entry so we can skip it and go directly to the destination.
233 */
234static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
235{
236 unsigned valid_ptr = P_L2_SIZE;
237 int valid = 0;
238 PhysPageEntry *p;
239 int i;
240
241 if (lp->ptr == PHYS_MAP_NODE_NIL) {
242 return;
243 }
244
245 p = nodes[lp->ptr];
246 for (i = 0; i < P_L2_SIZE; i++) {
247 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
248 continue;
249 }
250
251 valid_ptr = i;
252 valid++;
253 if (p[i].skip) {
254 phys_page_compact(&p[i], nodes, compacted);
255 }
256 }
257
258 /* We can only compress if there's only one child. */
259 if (valid != 1) {
260 return;
261 }
262
263 assert(valid_ptr < P_L2_SIZE);
264
265 /* Don't compress if it won't fit in the # of bits we have. */
266 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
267 return;
268 }
269
270 lp->ptr = p[valid_ptr].ptr;
271 if (!p[valid_ptr].skip) {
272 /* If our only child is a leaf, make this a leaf. */
273 /* By design, we should have made this node a leaf to begin with so we
274 * should never reach here.
275 * But since it's so simple to handle this, let's do it just in case we
276 * change this rule.
277 */
278 lp->skip = 0;
279 } else {
280 lp->skip += p[valid_ptr].skip;
281 }
282}
283
284static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
285{
286 DECLARE_BITMAP(compacted, nodes_nb);
287
288 if (d->phys_map.skip) {
53cb28cb 289 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
290 }
291}
292
97115a8d 293static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 294 Node *nodes, MemoryRegionSection *sections)
92e873b9 295{
31ab2b4a 296 PhysPageEntry *p;
97115a8d 297 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 298 int i;
f1f6e3b8 299
9736e55b 300 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 301 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 302 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 303 }
9affd6fc 304 p = nodes[lp.ptr];
03f49957 305 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 306 }
b35ba30f
MT
307
308 if (sections[lp.ptr].size.hi ||
309 range_covers_byte(sections[lp.ptr].offset_within_address_space,
310 sections[lp.ptr].size.lo, addr)) {
311 return &sections[lp.ptr];
312 } else {
313 return &sections[PHYS_SECTION_UNASSIGNED];
314 }
f3705d53
AK
315}
316
e5548617
BS
317bool memory_region_is_unassigned(MemoryRegion *mr)
318{
2a8e7499 319 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 320 && mr != &io_mem_watch;
fd6ce8f6 321}
149f54b5 322
79e2b9ae 323/* Called from RCU critical section */
c7086b4a 324static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
325 hwaddr addr,
326 bool resolve_subpage)
9f029603 327{
90260c6c
JK
328 MemoryRegionSection *section;
329 subpage_t *subpage;
330
53cb28cb 331 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
332 if (resolve_subpage && section->mr->subpage) {
333 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 334 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
335 }
336 return section;
9f029603
JK
337}
338
79e2b9ae 339/* Called from RCU critical section */
90260c6c 340static MemoryRegionSection *
c7086b4a 341address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 342 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
343{
344 MemoryRegionSection *section;
965eb2fc 345 MemoryRegion *mr;
a87f3954 346 Int128 diff;
149f54b5 347
c7086b4a 348 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
349 /* Compute offset within MemoryRegionSection */
350 addr -= section->offset_within_address_space;
351
352 /* Compute offset within MemoryRegion */
353 *xlat = addr + section->offset_within_region;
354
965eb2fc 355 mr = section->mr;
b242e0e0
PB
356
357 /* MMIO registers can be expected to perform full-width accesses based only
358 * on their address, without considering adjacent registers that could
359 * decode to completely different MemoryRegions. When such registers
360 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
361 * regions overlap wildly. For this reason we cannot clamp the accesses
362 * here.
363 *
364 * If the length is small (as is the case for address_space_ldl/stl),
365 * everything works fine. If the incoming length is large, however,
366 * the caller really has to do the clamping through memory_access_size.
367 */
965eb2fc 368 if (memory_region_is_ram(mr)) {
e4a511f8 369 diff = int128_sub(section->size, int128_make64(addr));
965eb2fc
PB
370 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
371 }
149f54b5
PB
372 return section;
373}
90260c6c 374
a87f3954
PB
375static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
376{
377 if (memory_region_is_ram(mr)) {
378 return !(is_write && mr->readonly);
379 }
380 if (memory_region_is_romd(mr)) {
381 return !is_write;
382 }
383
384 return false;
385}
386
41063e1e 387/* Called from RCU critical section */
5c8a00ce
PB
388MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
389 hwaddr *xlat, hwaddr *plen,
390 bool is_write)
90260c6c 391{
30951157
AK
392 IOMMUTLBEntry iotlb;
393 MemoryRegionSection *section;
394 MemoryRegion *mr;
30951157
AK
395
396 for (;;) {
79e2b9ae
PB
397 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
398 section = address_space_translate_internal(d, addr, &addr, plen, true);
30951157
AK
399 mr = section->mr;
400
401 if (!mr->iommu_ops) {
402 break;
403 }
404
8d7b8cb9 405 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
406 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
407 | (addr & iotlb.addr_mask));
23820dbf 408 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
30951157
AK
409 if (!(iotlb.perm & (1 << is_write))) {
410 mr = &io_mem_unassigned;
411 break;
412 }
413
414 as = iotlb.target_as;
415 }
416
fe680d0d 417 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954 418 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
23820dbf 419 *plen = MIN(page, *plen);
a87f3954
PB
420 }
421
30951157
AK
422 *xlat = addr;
423 return mr;
90260c6c
JK
424}
425
79e2b9ae 426/* Called from RCU critical section */
90260c6c 427MemoryRegionSection *
9d82b5a7
PB
428address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
429 hwaddr *xlat, hwaddr *plen)
90260c6c 430{
30951157 431 MemoryRegionSection *section;
9d82b5a7
PB
432 section = address_space_translate_internal(cpu->memory_dispatch,
433 addr, xlat, plen, false);
30951157
AK
434
435 assert(!section->mr->iommu_ops);
436 return section;
90260c6c 437}
5b6dd868 438#endif
fd6ce8f6 439
b170fce3 440#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
441
442static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 443{
259186a7 444 CPUState *cpu = opaque;
a513fe19 445
5b6dd868
BS
446 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
447 version_id is increased. */
259186a7 448 cpu->interrupt_request &= ~0x01;
c01a71c1 449 tlb_flush(cpu, 1);
5b6dd868
BS
450
451 return 0;
a513fe19 452}
7501267e 453
6c3bff0e
PD
454static int cpu_common_pre_load(void *opaque)
455{
456 CPUState *cpu = opaque;
457
adee6424 458 cpu->exception_index = -1;
6c3bff0e
PD
459
460 return 0;
461}
462
463static bool cpu_common_exception_index_needed(void *opaque)
464{
465 CPUState *cpu = opaque;
466
adee6424 467 return tcg_enabled() && cpu->exception_index != -1;
6c3bff0e
PD
468}
469
470static const VMStateDescription vmstate_cpu_common_exception_index = {
471 .name = "cpu_common/exception_index",
472 .version_id = 1,
473 .minimum_version_id = 1,
5cd8cada 474 .needed = cpu_common_exception_index_needed,
6c3bff0e
PD
475 .fields = (VMStateField[]) {
476 VMSTATE_INT32(exception_index, CPUState),
477 VMSTATE_END_OF_LIST()
478 }
479};
480
bac05aa9
AS
481static bool cpu_common_crash_occurred_needed(void *opaque)
482{
483 CPUState *cpu = opaque;
484
485 return cpu->crash_occurred;
486}
487
488static const VMStateDescription vmstate_cpu_common_crash_occurred = {
489 .name = "cpu_common/crash_occurred",
490 .version_id = 1,
491 .minimum_version_id = 1,
492 .needed = cpu_common_crash_occurred_needed,
493 .fields = (VMStateField[]) {
494 VMSTATE_BOOL(crash_occurred, CPUState),
495 VMSTATE_END_OF_LIST()
496 }
497};
498
1a1562f5 499const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
500 .name = "cpu_common",
501 .version_id = 1,
502 .minimum_version_id = 1,
6c3bff0e 503 .pre_load = cpu_common_pre_load,
5b6dd868 504 .post_load = cpu_common_post_load,
35d08458 505 .fields = (VMStateField[]) {
259186a7
AF
506 VMSTATE_UINT32(halted, CPUState),
507 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 508 VMSTATE_END_OF_LIST()
6c3bff0e 509 },
5cd8cada
JQ
510 .subsections = (const VMStateDescription*[]) {
511 &vmstate_cpu_common_exception_index,
bac05aa9 512 &vmstate_cpu_common_crash_occurred,
5cd8cada 513 NULL
5b6dd868
BS
514 }
515};
1a1562f5 516
5b6dd868 517#endif
ea041c0e 518
38d8f5c8 519CPUState *qemu_get_cpu(int index)
ea041c0e 520{
bdc44640 521 CPUState *cpu;
ea041c0e 522
bdc44640 523 CPU_FOREACH(cpu) {
55e5c285 524 if (cpu->cpu_index == index) {
bdc44640 525 return cpu;
55e5c285 526 }
ea041c0e 527 }
5b6dd868 528
bdc44640 529 return NULL;
ea041c0e
FB
530}
531
09daed84
EI
532#if !defined(CONFIG_USER_ONLY)
533void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
534{
535 /* We only support one address space per cpu at the moment. */
536 assert(cpu->as == as);
537
538 if (cpu->tcg_as_listener) {
539 memory_listener_unregister(cpu->tcg_as_listener);
540 } else {
541 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
542 }
543 cpu->tcg_as_listener->commit = tcg_commit;
544 memory_listener_register(cpu->tcg_as_listener, as);
545}
546#endif
547
b7bca733
BR
548#ifndef CONFIG_USER_ONLY
549static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
550
551static int cpu_get_free_index(Error **errp)
552{
553 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
554
555 if (cpu >= MAX_CPUMASK_BITS) {
556 error_setg(errp, "Trying to use more CPUs than max of %d",
557 MAX_CPUMASK_BITS);
558 return -1;
559 }
560
561 bitmap_set(cpu_index_map, cpu, 1);
562 return cpu;
563}
564
565void cpu_exec_exit(CPUState *cpu)
566{
567 if (cpu->cpu_index == -1) {
568 /* cpu_index was never allocated by this @cpu or was already freed. */
569 return;
570 }
571
572 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
573 cpu->cpu_index = -1;
574}
575#else
576
577static int cpu_get_free_index(Error **errp)
578{
579 CPUState *some_cpu;
580 int cpu_index = 0;
581
582 CPU_FOREACH(some_cpu) {
583 cpu_index++;
584 }
585 return cpu_index;
586}
587
588void cpu_exec_exit(CPUState *cpu)
589{
590}
591#endif
592
4bad9e39 593void cpu_exec_init(CPUState *cpu, Error **errp)
ea041c0e 594{
b170fce3 595 CPUClass *cc = CPU_GET_CLASS(cpu);
5b6dd868 596 int cpu_index;
b7bca733 597 Error *local_err = NULL;
5b6dd868 598
291135b5
EH
599#ifndef CONFIG_USER_ONLY
600 cpu->as = &address_space_memory;
601 cpu->thread_id = qemu_get_thread_id();
602 cpu_reload_memory_map(cpu);
603#endif
604
5b6dd868
BS
605#if defined(CONFIG_USER_ONLY)
606 cpu_list_lock();
607#endif
b7bca733
BR
608 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
609 if (local_err) {
610 error_propagate(errp, local_err);
611#if defined(CONFIG_USER_ONLY)
612 cpu_list_unlock();
613#endif
614 return;
5b6dd868 615 }
bdc44640 616 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
617#if defined(CONFIG_USER_ONLY)
618 cpu_list_unlock();
619#endif
e0d47944
AF
620 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
621 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
622 }
5b6dd868 623#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868 624 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
4bad9e39 625 cpu_save, cpu_load, cpu->env_ptr);
b170fce3 626 assert(cc->vmsd == NULL);
e0d47944 627 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 628#endif
b170fce3
AF
629 if (cc->vmsd != NULL) {
630 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
631 }
ea041c0e
FB
632}
633
94df27fd 634#if defined(CONFIG_USER_ONLY)
00b941e5 635static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
636{
637 tb_invalidate_phys_page_range(pc, pc + 1, 0);
638}
639#else
00b941e5 640static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 641{
e8262a1b
MF
642 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
643 if (phys != -1) {
09daed84 644 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 645 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 646 }
1e7855a5 647}
c27004ec 648#endif
d720b93d 649
c527ee8f 650#if defined(CONFIG_USER_ONLY)
75a34036 651void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
652
653{
654}
655
3ee887e8
PM
656int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
657 int flags)
658{
659 return -ENOSYS;
660}
661
662void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
663{
664}
665
75a34036 666int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
667 int flags, CPUWatchpoint **watchpoint)
668{
669 return -ENOSYS;
670}
671#else
6658ffb8 672/* Add a watchpoint. */
75a34036 673int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 674 int flags, CPUWatchpoint **watchpoint)
6658ffb8 675{
c0ce998e 676 CPUWatchpoint *wp;
6658ffb8 677
05068c0d 678 /* forbid ranges which are empty or run off the end of the address space */
07e2863d 679 if (len == 0 || (addr + len - 1) < addr) {
75a34036
AF
680 error_report("tried to set invalid watchpoint at %"
681 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
682 return -EINVAL;
683 }
7267c094 684 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
685
686 wp->vaddr = addr;
05068c0d 687 wp->len = len;
a1d1bb31
AL
688 wp->flags = flags;
689
2dc9f411 690 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
691 if (flags & BP_GDB) {
692 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
693 } else {
694 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
695 }
6658ffb8 696
31b030d4 697 tlb_flush_page(cpu, addr);
a1d1bb31
AL
698
699 if (watchpoint)
700 *watchpoint = wp;
701 return 0;
6658ffb8
PB
702}
703
a1d1bb31 704/* Remove a specific watchpoint. */
75a34036 705int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 706 int flags)
6658ffb8 707{
a1d1bb31 708 CPUWatchpoint *wp;
6658ffb8 709
ff4700b0 710 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 711 if (addr == wp->vaddr && len == wp->len
6e140f28 712 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 713 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
714 return 0;
715 }
716 }
a1d1bb31 717 return -ENOENT;
6658ffb8
PB
718}
719
a1d1bb31 720/* Remove a specific watchpoint by reference. */
75a34036 721void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 722{
ff4700b0 723 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 724
31b030d4 725 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 726
7267c094 727 g_free(watchpoint);
a1d1bb31
AL
728}
729
730/* Remove all matching watchpoints. */
75a34036 731void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 732{
c0ce998e 733 CPUWatchpoint *wp, *next;
a1d1bb31 734
ff4700b0 735 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
736 if (wp->flags & mask) {
737 cpu_watchpoint_remove_by_ref(cpu, wp);
738 }
c0ce998e 739 }
7d03f82f 740}
05068c0d
PM
741
742/* Return true if this watchpoint address matches the specified
743 * access (ie the address range covered by the watchpoint overlaps
744 * partially or completely with the address range covered by the
745 * access).
746 */
747static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
748 vaddr addr,
749 vaddr len)
750{
751 /* We know the lengths are non-zero, but a little caution is
752 * required to avoid errors in the case where the range ends
753 * exactly at the top of the address space and so addr + len
754 * wraps round to zero.
755 */
756 vaddr wpend = wp->vaddr + wp->len - 1;
757 vaddr addrend = addr + len - 1;
758
759 return !(addr > wpend || wp->vaddr > addrend);
760}
761
c527ee8f 762#endif
7d03f82f 763
a1d1bb31 764/* Add a breakpoint. */
b3310ab3 765int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 766 CPUBreakpoint **breakpoint)
4c3a88a2 767{
c0ce998e 768 CPUBreakpoint *bp;
3b46e624 769
7267c094 770 bp = g_malloc(sizeof(*bp));
4c3a88a2 771
a1d1bb31
AL
772 bp->pc = pc;
773 bp->flags = flags;
774
2dc9f411 775 /* keep all GDB-injected breakpoints in front */
00b941e5 776 if (flags & BP_GDB) {
f0c3c505 777 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 778 } else {
f0c3c505 779 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 780 }
3b46e624 781
f0c3c505 782 breakpoint_invalidate(cpu, pc);
a1d1bb31 783
00b941e5 784 if (breakpoint) {
a1d1bb31 785 *breakpoint = bp;
00b941e5 786 }
4c3a88a2 787 return 0;
4c3a88a2
FB
788}
789
a1d1bb31 790/* Remove a specific breakpoint. */
b3310ab3 791int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 792{
a1d1bb31
AL
793 CPUBreakpoint *bp;
794
f0c3c505 795 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 796 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 797 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
798 return 0;
799 }
7d03f82f 800 }
a1d1bb31 801 return -ENOENT;
7d03f82f
EI
802}
803
a1d1bb31 804/* Remove a specific breakpoint by reference. */
b3310ab3 805void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 806{
f0c3c505
AF
807 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
808
809 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 810
7267c094 811 g_free(breakpoint);
a1d1bb31
AL
812}
813
814/* Remove all matching breakpoints. */
b3310ab3 815void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 816{
c0ce998e 817 CPUBreakpoint *bp, *next;
a1d1bb31 818
f0c3c505 819 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
820 if (bp->flags & mask) {
821 cpu_breakpoint_remove_by_ref(cpu, bp);
822 }
c0ce998e 823 }
4c3a88a2
FB
824}
825
c33a346e
FB
826/* enable or disable single step mode. EXCP_DEBUG is returned by the
827 CPU loop after each instruction */
3825b28f 828void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 829{
ed2803da
AF
830 if (cpu->singlestep_enabled != enabled) {
831 cpu->singlestep_enabled = enabled;
832 if (kvm_enabled()) {
38e478ec 833 kvm_update_guest_debug(cpu, 0);
ed2803da 834 } else {
ccbb4d44 835 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 836 /* XXX: only flush what is necessary */
bbd77c18 837 tb_flush(cpu);
e22a25c9 838 }
c33a346e 839 }
c33a346e
FB
840}
841
a47dddd7 842void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
843{
844 va_list ap;
493ae1f0 845 va_list ap2;
7501267e
FB
846
847 va_start(ap, fmt);
493ae1f0 848 va_copy(ap2, ap);
7501267e
FB
849 fprintf(stderr, "qemu: fatal: ");
850 vfprintf(stderr, fmt, ap);
851 fprintf(stderr, "\n");
878096ee 852 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
853 if (qemu_log_enabled()) {
854 qemu_log("qemu: fatal: ");
855 qemu_log_vprintf(fmt, ap2);
856 qemu_log("\n");
a0762859 857 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 858 qemu_log_flush();
93fcfe39 859 qemu_log_close();
924edcae 860 }
493ae1f0 861 va_end(ap2);
f9373291 862 va_end(ap);
fd052bf6
RV
863#if defined(CONFIG_USER_ONLY)
864 {
865 struct sigaction act;
866 sigfillset(&act.sa_mask);
867 act.sa_handler = SIG_DFL;
868 sigaction(SIGABRT, &act, NULL);
869 }
870#endif
7501267e
FB
871 abort();
872}
873
0124311e 874#if !defined(CONFIG_USER_ONLY)
0dc3f44a 875/* Called from RCU critical section */
041603fe
PB
876static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
877{
878 RAMBlock *block;
879
43771539 880 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 881 if (block && addr - block->offset < block->max_length) {
041603fe
PB
882 goto found;
883 }
0dc3f44a 884 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5 885 if (addr - block->offset < block->max_length) {
041603fe
PB
886 goto found;
887 }
888 }
889
890 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
891 abort();
892
893found:
43771539
PB
894 /* It is safe to write mru_block outside the iothread lock. This
895 * is what happens:
896 *
897 * mru_block = xxx
898 * rcu_read_unlock()
899 * xxx removed from list
900 * rcu_read_lock()
901 * read mru_block
902 * mru_block = NULL;
903 * call_rcu(reclaim_ramblock, xxx);
904 * rcu_read_unlock()
905 *
906 * atomic_rcu_set is not needed here. The block was already published
907 * when it was placed into the list. Here we're just making an extra
908 * copy of the pointer.
909 */
041603fe
PB
910 ram_list.mru_block = block;
911 return block;
912}
913
a2f4d5be 914static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 915{
9a13565d 916 CPUState *cpu;
041603fe 917 ram_addr_t start1;
a2f4d5be
JQ
918 RAMBlock *block;
919 ram_addr_t end;
920
921 end = TARGET_PAGE_ALIGN(start + length);
922 start &= TARGET_PAGE_MASK;
d24981d3 923
0dc3f44a 924 rcu_read_lock();
041603fe
PB
925 block = qemu_get_ram_block(start);
926 assert(block == qemu_get_ram_block(end - 1));
1240be24 927 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
9a13565d
PC
928 CPU_FOREACH(cpu) {
929 tlb_reset_dirty(cpu, start1, length);
930 }
0dc3f44a 931 rcu_read_unlock();
d24981d3
JQ
932}
933
5579c7f3 934/* Note: start and end must be within the same ram block. */
03eebc9e
SH
935bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
936 ram_addr_t length,
937 unsigned client)
1ccde1cb 938{
03eebc9e
SH
939 unsigned long end, page;
940 bool dirty;
941
942 if (length == 0) {
943 return false;
944 }
f23db169 945
03eebc9e
SH
946 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
947 page = start >> TARGET_PAGE_BITS;
948 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
949 page, end - page);
950
951 if (dirty && tcg_enabled()) {
a2f4d5be 952 tlb_reset_dirty_range_all(start, length);
5579c7f3 953 }
03eebc9e
SH
954
955 return dirty;
1ccde1cb
FB
956}
957
79e2b9ae 958/* Called from RCU critical section */
bb0e627a 959hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
960 MemoryRegionSection *section,
961 target_ulong vaddr,
962 hwaddr paddr, hwaddr xlat,
963 int prot,
964 target_ulong *address)
e5548617 965{
a8170e5e 966 hwaddr iotlb;
e5548617
BS
967 CPUWatchpoint *wp;
968
cc5bea60 969 if (memory_region_is_ram(section->mr)) {
e5548617
BS
970 /* Normal RAM. */
971 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 972 + xlat;
e5548617 973 if (!section->readonly) {
b41aac4f 974 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 975 } else {
b41aac4f 976 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
977 }
978 } else {
0b8e2c10
PM
979 AddressSpaceDispatch *d;
980
981 d = atomic_rcu_read(&section->address_space->dispatch);
982 iotlb = section - d->map.sections;
149f54b5 983 iotlb += xlat;
e5548617
BS
984 }
985
986 /* Make accesses to pages with watchpoints go via the
987 watchpoint trap routines. */
ff4700b0 988 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 989 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
990 /* Avoid trapping reads of pages with a write breakpoint. */
991 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 992 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
993 *address |= TLB_MMIO;
994 break;
995 }
996 }
997 }
998
999 return iotlb;
1000}
9fa3e853
FB
1001#endif /* defined(CONFIG_USER_ONLY) */
1002
e2eef170 1003#if !defined(CONFIG_USER_ONLY)
8da3ff18 1004
c227f099 1005static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1006 uint16_t section);
acc9d80b 1007static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 1008
a2b257d6
IM
1009static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1010 qemu_anon_ram_alloc;
91138037
MA
1011
1012/*
1013 * Set a custom physical guest memory alloator.
1014 * Accelerators with unusual needs may need this. Hopefully, we can
1015 * get rid of it eventually.
1016 */
a2b257d6 1017void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
91138037
MA
1018{
1019 phys_mem_alloc = alloc;
1020}
1021
53cb28cb
MA
1022static uint16_t phys_section_add(PhysPageMap *map,
1023 MemoryRegionSection *section)
5312bd8b 1024{
68f3f65b
PB
1025 /* The physical section number is ORed with a page-aligned
1026 * pointer to produce the iotlb entries. Thus it should
1027 * never overflow into the page-aligned value.
1028 */
53cb28cb 1029 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 1030
53cb28cb
MA
1031 if (map->sections_nb == map->sections_nb_alloc) {
1032 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1033 map->sections = g_renew(MemoryRegionSection, map->sections,
1034 map->sections_nb_alloc);
5312bd8b 1035 }
53cb28cb 1036 map->sections[map->sections_nb] = *section;
dfde4e6e 1037 memory_region_ref(section->mr);
53cb28cb 1038 return map->sections_nb++;
5312bd8b
AK
1039}
1040
058bc4b5
PB
1041static void phys_section_destroy(MemoryRegion *mr)
1042{
dfde4e6e
PB
1043 memory_region_unref(mr);
1044
058bc4b5
PB
1045 if (mr->subpage) {
1046 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 1047 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
1048 g_free(subpage);
1049 }
1050}
1051
6092666e 1052static void phys_sections_free(PhysPageMap *map)
5312bd8b 1053{
9affd6fc
PB
1054 while (map->sections_nb > 0) {
1055 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
1056 phys_section_destroy(section->mr);
1057 }
9affd6fc
PB
1058 g_free(map->sections);
1059 g_free(map->nodes);
5312bd8b
AK
1060}
1061
ac1970fb 1062static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
1063{
1064 subpage_t *subpage;
a8170e5e 1065 hwaddr base = section->offset_within_address_space
0f0cb164 1066 & TARGET_PAGE_MASK;
97115a8d 1067 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 1068 d->map.nodes, d->map.sections);
0f0cb164
AK
1069 MemoryRegionSection subsection = {
1070 .offset_within_address_space = base,
052e87b0 1071 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 1072 };
a8170e5e 1073 hwaddr start, end;
0f0cb164 1074
f3705d53 1075 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 1076
f3705d53 1077 if (!(existing->mr->subpage)) {
acc9d80b 1078 subpage = subpage_init(d->as, base);
3be91e86 1079 subsection.address_space = d->as;
0f0cb164 1080 subsection.mr = &subpage->iomem;
ac1970fb 1081 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 1082 phys_section_add(&d->map, &subsection));
0f0cb164 1083 } else {
f3705d53 1084 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
1085 }
1086 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 1087 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
1088 subpage_register(subpage, start, end,
1089 phys_section_add(&d->map, section));
0f0cb164
AK
1090}
1091
1092
052e87b0
PB
1093static void register_multipage(AddressSpaceDispatch *d,
1094 MemoryRegionSection *section)
33417e70 1095{
a8170e5e 1096 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 1097 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
1098 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1099 TARGET_PAGE_BITS));
dd81124b 1100
733d5ef5
PB
1101 assert(num_pages);
1102 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
1103}
1104
ac1970fb 1105static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 1106{
89ae337a 1107 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 1108 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 1109 MemoryRegionSection now = *section, remain = *section;
052e87b0 1110 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1111
733d5ef5
PB
1112 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1113 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1114 - now.offset_within_address_space;
1115
052e87b0 1116 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 1117 register_subpage(d, &now);
733d5ef5 1118 } else {
052e87b0 1119 now.size = int128_zero();
733d5ef5 1120 }
052e87b0
PB
1121 while (int128_ne(remain.size, now.size)) {
1122 remain.size = int128_sub(remain.size, now.size);
1123 remain.offset_within_address_space += int128_get64(now.size);
1124 remain.offset_within_region += int128_get64(now.size);
69b67646 1125 now = remain;
052e87b0 1126 if (int128_lt(remain.size, page_size)) {
733d5ef5 1127 register_subpage(d, &now);
88266249 1128 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1129 now.size = page_size;
ac1970fb 1130 register_subpage(d, &now);
69b67646 1131 } else {
052e87b0 1132 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1133 register_multipage(d, &now);
69b67646 1134 }
0f0cb164
AK
1135 }
1136}
1137
62a2744c
SY
1138void qemu_flush_coalesced_mmio_buffer(void)
1139{
1140 if (kvm_enabled())
1141 kvm_flush_coalesced_mmio_buffer();
1142}
1143
b2a8658e
UD
1144void qemu_mutex_lock_ramlist(void)
1145{
1146 qemu_mutex_lock(&ram_list.mutex);
1147}
1148
1149void qemu_mutex_unlock_ramlist(void)
1150{
1151 qemu_mutex_unlock(&ram_list.mutex);
1152}
1153
e1e84ba0 1154#ifdef __linux__
c902760f
MT
1155
1156#include <sys/vfs.h>
1157
1158#define HUGETLBFS_MAGIC 0x958458f6
1159
fc7a5800 1160static long gethugepagesize(const char *path, Error **errp)
c902760f
MT
1161{
1162 struct statfs fs;
1163 int ret;
1164
1165 do {
9742bf26 1166 ret = statfs(path, &fs);
c902760f
MT
1167 } while (ret != 0 && errno == EINTR);
1168
1169 if (ret != 0) {
fc7a5800
HT
1170 error_setg_errno(errp, errno, "failed to get page size of file %s",
1171 path);
9742bf26 1172 return 0;
c902760f
MT
1173 }
1174
1175 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1176 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1177
1178 return fs.f_bsize;
1179}
1180
04b16653
AW
1181static void *file_ram_alloc(RAMBlock *block,
1182 ram_addr_t memory,
7f56e740
PB
1183 const char *path,
1184 Error **errp)
c902760f
MT
1185{
1186 char *filename;
8ca761f6
PF
1187 char *sanitized_name;
1188 char *c;
557529dd 1189 void *area = NULL;
c902760f 1190 int fd;
557529dd 1191 uint64_t hpagesize;
fc7a5800 1192 Error *local_err = NULL;
c902760f 1193
fc7a5800
HT
1194 hpagesize = gethugepagesize(path, &local_err);
1195 if (local_err) {
1196 error_propagate(errp, local_err);
f9a49dfa 1197 goto error;
c902760f 1198 }
a2b257d6 1199 block->mr->align = hpagesize;
c902760f
MT
1200
1201 if (memory < hpagesize) {
557529dd
HT
1202 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1203 "or larger than huge page size 0x%" PRIx64,
1204 memory, hpagesize);
1205 goto error;
c902760f
MT
1206 }
1207
1208 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1209 error_setg(errp,
1210 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1211 goto error;
c902760f
MT
1212 }
1213
8ca761f6 1214 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1215 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1216 for (c = sanitized_name; *c != '\0'; c++) {
1217 if (*c == '/')
1218 *c = '_';
1219 }
1220
1221 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1222 sanitized_name);
1223 g_free(sanitized_name);
c902760f
MT
1224
1225 fd = mkstemp(filename);
1226 if (fd < 0) {
7f56e740
PB
1227 error_setg_errno(errp, errno,
1228 "unable to create backing store for hugepages");
e4ada482 1229 g_free(filename);
f9a49dfa 1230 goto error;
c902760f
MT
1231 }
1232 unlink(filename);
e4ada482 1233 g_free(filename);
c902760f 1234
9284f319 1235 memory = ROUND_UP(memory, hpagesize);
c902760f
MT
1236
1237 /*
1238 * ftruncate is not supported by hugetlbfs in older
1239 * hosts, so don't bother bailing out on errors.
1240 * If anything goes wrong with it under other filesystems,
1241 * mmap will fail.
1242 */
7f56e740 1243 if (ftruncate(fd, memory)) {
9742bf26 1244 perror("ftruncate");
7f56e740 1245 }
c902760f 1246
dbcb8981
PB
1247 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1248 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1249 fd, 0);
c902760f 1250 if (area == MAP_FAILED) {
7f56e740
PB
1251 error_setg_errno(errp, errno,
1252 "unable to map backing store for hugepages");
9742bf26 1253 close(fd);
f9a49dfa 1254 goto error;
c902760f 1255 }
ef36fa14
MT
1256
1257 if (mem_prealloc) {
38183310 1258 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1259 }
1260
04b16653 1261 block->fd = fd;
c902760f 1262 return area;
f9a49dfa
MT
1263
1264error:
1265 if (mem_prealloc) {
81b07353 1266 error_report("%s", error_get_pretty(*errp));
f9a49dfa
MT
1267 exit(1);
1268 }
1269 return NULL;
c902760f
MT
1270}
1271#endif
1272
0dc3f44a 1273/* Called with the ramlist lock held. */
d17b5288 1274static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1275{
1276 RAMBlock *block, *next_block;
3e837b2c 1277 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1278
49cd9ac6
SH
1279 assert(size != 0); /* it would hand out same offset multiple times */
1280
0dc3f44a 1281 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
04b16653 1282 return 0;
0d53d9fe 1283 }
04b16653 1284
0dc3f44a 1285 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
f15fbc4b 1286 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653 1287
62be4e3a 1288 end = block->offset + block->max_length;
04b16653 1289
0dc3f44a 1290 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
04b16653
AW
1291 if (next_block->offset >= end) {
1292 next = MIN(next, next_block->offset);
1293 }
1294 }
1295 if (next - end >= size && next - end < mingap) {
3e837b2c 1296 offset = end;
04b16653
AW
1297 mingap = next - end;
1298 }
1299 }
3e837b2c
AW
1300
1301 if (offset == RAM_ADDR_MAX) {
1302 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1303 (uint64_t)size);
1304 abort();
1305 }
1306
04b16653
AW
1307 return offset;
1308}
1309
652d7ec2 1310ram_addr_t last_ram_offset(void)
d17b5288
AW
1311{
1312 RAMBlock *block;
1313 ram_addr_t last = 0;
1314
0dc3f44a
MD
1315 rcu_read_lock();
1316 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
62be4e3a 1317 last = MAX(last, block->offset + block->max_length);
0d53d9fe 1318 }
0dc3f44a 1319 rcu_read_unlock();
d17b5288
AW
1320 return last;
1321}
1322
ddb97f1d
JB
1323static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1324{
1325 int ret;
ddb97f1d
JB
1326
1327 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
47c8ca53 1328 if (!machine_dump_guest_core(current_machine)) {
ddb97f1d
JB
1329 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1330 if (ret) {
1331 perror("qemu_madvise");
1332 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1333 "but dump_guest_core=off specified\n");
1334 }
1335 }
1336}
1337
0dc3f44a
MD
1338/* Called within an RCU critical section, or while the ramlist lock
1339 * is held.
1340 */
20cfe881 1341static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1342{
20cfe881 1343 RAMBlock *block;
84b89d78 1344
0dc3f44a 1345 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1346 if (block->offset == addr) {
20cfe881 1347 return block;
c5705a77
AK
1348 }
1349 }
20cfe881
HT
1350
1351 return NULL;
1352}
1353
ae3a7047 1354/* Called with iothread lock held. */
20cfe881
HT
1355void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1356{
ae3a7047 1357 RAMBlock *new_block, *block;
20cfe881 1358
0dc3f44a 1359 rcu_read_lock();
ae3a7047 1360 new_block = find_ram_block(addr);
c5705a77
AK
1361 assert(new_block);
1362 assert(!new_block->idstr[0]);
84b89d78 1363
09e5ab63
AL
1364 if (dev) {
1365 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1366 if (id) {
1367 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1368 g_free(id);
84b89d78
CM
1369 }
1370 }
1371 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1372
0dc3f44a 1373 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1374 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1375 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1376 new_block->idstr);
1377 abort();
1378 }
1379 }
0dc3f44a 1380 rcu_read_unlock();
c5705a77
AK
1381}
1382
ae3a7047 1383/* Called with iothread lock held. */
20cfe881
HT
1384void qemu_ram_unset_idstr(ram_addr_t addr)
1385{
ae3a7047 1386 RAMBlock *block;
20cfe881 1387
ae3a7047
MD
1388 /* FIXME: arch_init.c assumes that this is not called throughout
1389 * migration. Ignore the problem since hot-unplug during migration
1390 * does not work anyway.
1391 */
1392
0dc3f44a 1393 rcu_read_lock();
ae3a7047 1394 block = find_ram_block(addr);
20cfe881
HT
1395 if (block) {
1396 memset(block->idstr, 0, sizeof(block->idstr));
1397 }
0dc3f44a 1398 rcu_read_unlock();
20cfe881
HT
1399}
1400
8490fc78
LC
1401static int memory_try_enable_merging(void *addr, size_t len)
1402{
75cc7f01 1403 if (!machine_mem_merge(current_machine)) {
8490fc78
LC
1404 /* disabled by the user */
1405 return 0;
1406 }
1407
1408 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1409}
1410
62be4e3a
MT
1411/* Only legal before guest might have detected the memory size: e.g. on
1412 * incoming migration, or right after reset.
1413 *
1414 * As memory core doesn't know how is memory accessed, it is up to
1415 * resize callback to update device state and/or add assertions to detect
1416 * misuse, if necessary.
1417 */
1418int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1419{
1420 RAMBlock *block = find_ram_block(base);
1421
1422 assert(block);
1423
129ddaf3
MT
1424 newsize = TARGET_PAGE_ALIGN(newsize);
1425
62be4e3a
MT
1426 if (block->used_length == newsize) {
1427 return 0;
1428 }
1429
1430 if (!(block->flags & RAM_RESIZEABLE)) {
1431 error_setg_errno(errp, EINVAL,
1432 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1433 " in != 0x" RAM_ADDR_FMT, block->idstr,
1434 newsize, block->used_length);
1435 return -EINVAL;
1436 }
1437
1438 if (block->max_length < newsize) {
1439 error_setg_errno(errp, EINVAL,
1440 "Length too large: %s: 0x" RAM_ADDR_FMT
1441 " > 0x" RAM_ADDR_FMT, block->idstr,
1442 newsize, block->max_length);
1443 return -EINVAL;
1444 }
1445
1446 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1447 block->used_length = newsize;
58d2707e
PB
1448 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1449 DIRTY_CLIENTS_ALL);
62be4e3a
MT
1450 memory_region_set_size(block->mr, newsize);
1451 if (block->resized) {
1452 block->resized(block->idstr, newsize, block->host);
1453 }
1454 return 0;
1455}
1456
ef701d7b 1457static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1458{
e1c57ab8 1459 RAMBlock *block;
0d53d9fe 1460 RAMBlock *last_block = NULL;
2152f5ca
JQ
1461 ram_addr_t old_ram_size, new_ram_size;
1462
1463 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1464
b2a8658e 1465 qemu_mutex_lock_ramlist();
9b8424d5 1466 new_block->offset = find_ram_offset(new_block->max_length);
e1c57ab8
PB
1467
1468 if (!new_block->host) {
1469 if (xen_enabled()) {
9b8424d5
MT
1470 xen_ram_alloc(new_block->offset, new_block->max_length,
1471 new_block->mr);
e1c57ab8 1472 } else {
9b8424d5 1473 new_block->host = phys_mem_alloc(new_block->max_length,
a2b257d6 1474 &new_block->mr->align);
39228250 1475 if (!new_block->host) {
ef701d7b
HT
1476 error_setg_errno(errp, errno,
1477 "cannot set up guest memory '%s'",
1478 memory_region_name(new_block->mr));
1479 qemu_mutex_unlock_ramlist();
1480 return -1;
39228250 1481 }
9b8424d5 1482 memory_try_enable_merging(new_block->host, new_block->max_length);
6977dfe6 1483 }
c902760f 1484 }
94a6b54f 1485
dd631697
LZ
1486 new_ram_size = MAX(old_ram_size,
1487 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1488 if (new_ram_size > old_ram_size) {
1489 migration_bitmap_extend(old_ram_size, new_ram_size);
1490 }
0d53d9fe
MD
1491 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1492 * QLIST (which has an RCU-friendly variant) does not have insertion at
1493 * tail, so save the last element in last_block.
1494 */
0dc3f44a 1495 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
0d53d9fe 1496 last_block = block;
9b8424d5 1497 if (block->max_length < new_block->max_length) {
abb26d63
PB
1498 break;
1499 }
1500 }
1501 if (block) {
0dc3f44a 1502 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
0d53d9fe 1503 } else if (last_block) {
0dc3f44a 1504 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
0d53d9fe 1505 } else { /* list is empty */
0dc3f44a 1506 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
abb26d63 1507 }
0d6d3c87 1508 ram_list.mru_block = NULL;
94a6b54f 1509
0dc3f44a
MD
1510 /* Write list before version */
1511 smp_wmb();
f798b07f 1512 ram_list.version++;
b2a8658e 1513 qemu_mutex_unlock_ramlist();
f798b07f 1514
2152f5ca
JQ
1515 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1516
1517 if (new_ram_size > old_ram_size) {
1ab4c8ce 1518 int i;
ae3a7047
MD
1519
1520 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1ab4c8ce
JQ
1521 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1522 ram_list.dirty_memory[i] =
1523 bitmap_zero_extend(ram_list.dirty_memory[i],
1524 old_ram_size, new_ram_size);
1525 }
2152f5ca 1526 }
9b8424d5 1527 cpu_physical_memory_set_dirty_range(new_block->offset,
58d2707e
PB
1528 new_block->used_length,
1529 DIRTY_CLIENTS_ALL);
94a6b54f 1530
a904c911
PB
1531 if (new_block->host) {
1532 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1533 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1534 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1535 if (kvm_enabled()) {
1536 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1537 }
e1c57ab8 1538 }
6f0437e8 1539
94a6b54f
PB
1540 return new_block->offset;
1541}
e9a1ab19 1542
0b183fc8 1543#ifdef __linux__
e1c57ab8 1544ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1545 bool share, const char *mem_path,
7f56e740 1546 Error **errp)
e1c57ab8
PB
1547{
1548 RAMBlock *new_block;
ef701d7b
HT
1549 ram_addr_t addr;
1550 Error *local_err = NULL;
e1c57ab8
PB
1551
1552 if (xen_enabled()) {
7f56e740
PB
1553 error_setg(errp, "-mem-path not supported with Xen");
1554 return -1;
e1c57ab8
PB
1555 }
1556
1557 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1558 /*
1559 * file_ram_alloc() needs to allocate just like
1560 * phys_mem_alloc, but we haven't bothered to provide
1561 * a hook there.
1562 */
7f56e740
PB
1563 error_setg(errp,
1564 "-mem-path not supported with this accelerator");
1565 return -1;
e1c57ab8
PB
1566 }
1567
1568 size = TARGET_PAGE_ALIGN(size);
1569 new_block = g_malloc0(sizeof(*new_block));
1570 new_block->mr = mr;
9b8424d5
MT
1571 new_block->used_length = size;
1572 new_block->max_length = size;
dbcb8981 1573 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1574 new_block->host = file_ram_alloc(new_block, size,
1575 mem_path, errp);
1576 if (!new_block->host) {
1577 g_free(new_block);
1578 return -1;
1579 }
1580
ef701d7b
HT
1581 addr = ram_block_add(new_block, &local_err);
1582 if (local_err) {
1583 g_free(new_block);
1584 error_propagate(errp, local_err);
1585 return -1;
1586 }
1587 return addr;
e1c57ab8 1588}
0b183fc8 1589#endif
e1c57ab8 1590
62be4e3a
MT
1591static
1592ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1593 void (*resized)(const char*,
1594 uint64_t length,
1595 void *host),
1596 void *host, bool resizeable,
ef701d7b 1597 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1598{
1599 RAMBlock *new_block;
ef701d7b
HT
1600 ram_addr_t addr;
1601 Error *local_err = NULL;
e1c57ab8
PB
1602
1603 size = TARGET_PAGE_ALIGN(size);
62be4e3a 1604 max_size = TARGET_PAGE_ALIGN(max_size);
e1c57ab8
PB
1605 new_block = g_malloc0(sizeof(*new_block));
1606 new_block->mr = mr;
62be4e3a 1607 new_block->resized = resized;
9b8424d5
MT
1608 new_block->used_length = size;
1609 new_block->max_length = max_size;
62be4e3a 1610 assert(max_size >= size);
e1c57ab8
PB
1611 new_block->fd = -1;
1612 new_block->host = host;
1613 if (host) {
7bd4f430 1614 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1615 }
62be4e3a
MT
1616 if (resizeable) {
1617 new_block->flags |= RAM_RESIZEABLE;
1618 }
ef701d7b
HT
1619 addr = ram_block_add(new_block, &local_err);
1620 if (local_err) {
1621 g_free(new_block);
1622 error_propagate(errp, local_err);
1623 return -1;
1624 }
1625 return addr;
e1c57ab8
PB
1626}
1627
62be4e3a
MT
1628ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1629 MemoryRegion *mr, Error **errp)
1630{
1631 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1632}
1633
ef701d7b 1634ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1635{
62be4e3a
MT
1636 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1637}
1638
1639ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1640 void (*resized)(const char*,
1641 uint64_t length,
1642 void *host),
1643 MemoryRegion *mr, Error **errp)
1644{
1645 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
6977dfe6
YT
1646}
1647
1f2e98b6
AW
1648void qemu_ram_free_from_ptr(ram_addr_t addr)
1649{
1650 RAMBlock *block;
1651
b2a8658e 1652 qemu_mutex_lock_ramlist();
0dc3f44a 1653 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1f2e98b6 1654 if (addr == block->offset) {
0dc3f44a 1655 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1656 ram_list.mru_block = NULL;
0dc3f44a
MD
1657 /* Write list before version */
1658 smp_wmb();
f798b07f 1659 ram_list.version++;
43771539 1660 g_free_rcu(block, rcu);
b2a8658e 1661 break;
1f2e98b6
AW
1662 }
1663 }
b2a8658e 1664 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1665}
1666
43771539
PB
1667static void reclaim_ramblock(RAMBlock *block)
1668{
1669 if (block->flags & RAM_PREALLOC) {
1670 ;
1671 } else if (xen_enabled()) {
1672 xen_invalidate_map_cache_entry(block->host);
1673#ifndef _WIN32
1674 } else if (block->fd >= 0) {
1675 munmap(block->host, block->max_length);
1676 close(block->fd);
1677#endif
1678 } else {
1679 qemu_anon_ram_free(block->host, block->max_length);
1680 }
1681 g_free(block);
1682}
1683
c227f099 1684void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1685{
04b16653
AW
1686 RAMBlock *block;
1687
b2a8658e 1688 qemu_mutex_lock_ramlist();
0dc3f44a 1689 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
04b16653 1690 if (addr == block->offset) {
0dc3f44a 1691 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1692 ram_list.mru_block = NULL;
0dc3f44a
MD
1693 /* Write list before version */
1694 smp_wmb();
f798b07f 1695 ram_list.version++;
43771539 1696 call_rcu(block, reclaim_ramblock, rcu);
b2a8658e 1697 break;
04b16653
AW
1698 }
1699 }
b2a8658e 1700 qemu_mutex_unlock_ramlist();
e9a1ab19
FB
1701}
1702
cd19cfa2
HY
1703#ifndef _WIN32
1704void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1705{
1706 RAMBlock *block;
1707 ram_addr_t offset;
1708 int flags;
1709 void *area, *vaddr;
1710
0dc3f44a 1711 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
cd19cfa2 1712 offset = addr - block->offset;
9b8424d5 1713 if (offset < block->max_length) {
1240be24 1714 vaddr = ramblock_ptr(block, offset);
7bd4f430 1715 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1716 ;
dfeaf2ab
MA
1717 } else if (xen_enabled()) {
1718 abort();
cd19cfa2
HY
1719 } else {
1720 flags = MAP_FIXED;
3435f395 1721 if (block->fd >= 0) {
dbcb8981
PB
1722 flags |= (block->flags & RAM_SHARED ?
1723 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1724 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1725 flags, block->fd, offset);
cd19cfa2 1726 } else {
2eb9fbaa
MA
1727 /*
1728 * Remap needs to match alloc. Accelerators that
1729 * set phys_mem_alloc never remap. If they did,
1730 * we'd need a remap hook here.
1731 */
1732 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1733
cd19cfa2
HY
1734 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1735 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1736 flags, -1, 0);
cd19cfa2
HY
1737 }
1738 if (area != vaddr) {
f15fbc4b
AP
1739 fprintf(stderr, "Could not remap addr: "
1740 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1741 length, addr);
1742 exit(1);
1743 }
8490fc78 1744 memory_try_enable_merging(vaddr, length);
ddb97f1d 1745 qemu_ram_setup_dump(vaddr, length);
cd19cfa2 1746 }
cd19cfa2
HY
1747 }
1748 }
1749}
1750#endif /* !_WIN32 */
1751
a35ba7be
PB
1752int qemu_get_ram_fd(ram_addr_t addr)
1753{
ae3a7047
MD
1754 RAMBlock *block;
1755 int fd;
a35ba7be 1756
0dc3f44a 1757 rcu_read_lock();
ae3a7047
MD
1758 block = qemu_get_ram_block(addr);
1759 fd = block->fd;
0dc3f44a 1760 rcu_read_unlock();
ae3a7047 1761 return fd;
a35ba7be
PB
1762}
1763
3fd74b84
DM
1764void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1765{
ae3a7047
MD
1766 RAMBlock *block;
1767 void *ptr;
3fd74b84 1768
0dc3f44a 1769 rcu_read_lock();
ae3a7047
MD
1770 block = qemu_get_ram_block(addr);
1771 ptr = ramblock_ptr(block, 0);
0dc3f44a 1772 rcu_read_unlock();
ae3a7047 1773 return ptr;
3fd74b84
DM
1774}
1775
1b5ec234 1776/* Return a host pointer to ram allocated with qemu_ram_alloc.
ae3a7047
MD
1777 * This should not be used for general purpose DMA. Use address_space_map
1778 * or address_space_rw instead. For local memory (e.g. video ram) that the
1779 * device owns, use memory_region_get_ram_ptr.
0dc3f44a
MD
1780 *
1781 * By the time this function returns, the returned pointer is not protected
1782 * by RCU anymore. If the caller is not within an RCU critical section and
1783 * does not hold the iothread lock, it must have other means of protecting the
1784 * pointer, such as a reference to the region that includes the incoming
1785 * ram_addr_t.
1b5ec234
PB
1786 */
1787void *qemu_get_ram_ptr(ram_addr_t addr)
1788{
ae3a7047
MD
1789 RAMBlock *block;
1790 void *ptr;
1b5ec234 1791
0dc3f44a 1792 rcu_read_lock();
ae3a7047
MD
1793 block = qemu_get_ram_block(addr);
1794
1795 if (xen_enabled() && block->host == NULL) {
0d6d3c87
PB
1796 /* We need to check if the requested address is in the RAM
1797 * because we don't want to map the entire memory in QEMU.
1798 * In that case just map until the end of the page.
1799 */
1800 if (block->offset == 0) {
ae3a7047 1801 ptr = xen_map_cache(addr, 0, 0);
0dc3f44a 1802 goto unlock;
0d6d3c87 1803 }
ae3a7047
MD
1804
1805 block->host = xen_map_cache(block->offset, block->max_length, 1);
0d6d3c87 1806 }
ae3a7047
MD
1807 ptr = ramblock_ptr(block, addr - block->offset);
1808
0dc3f44a
MD
1809unlock:
1810 rcu_read_unlock();
ae3a7047 1811 return ptr;
dc828ca1
PB
1812}
1813
38bee5dc 1814/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
ae3a7047 1815 * but takes a size argument.
0dc3f44a
MD
1816 *
1817 * By the time this function returns, the returned pointer is not protected
1818 * by RCU anymore. If the caller is not within an RCU critical section and
1819 * does not hold the iothread lock, it must have other means of protecting the
1820 * pointer, such as a reference to the region that includes the incoming
1821 * ram_addr_t.
ae3a7047 1822 */
cb85f7ab 1823static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1824{
ae3a7047 1825 void *ptr;
8ab934f9
SS
1826 if (*size == 0) {
1827 return NULL;
1828 }
868bb33f 1829 if (xen_enabled()) {
e41d7c69 1830 return xen_map_cache(addr, *size, 1);
868bb33f 1831 } else {
38bee5dc 1832 RAMBlock *block;
0dc3f44a
MD
1833 rcu_read_lock();
1834 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5
MT
1835 if (addr - block->offset < block->max_length) {
1836 if (addr - block->offset + *size > block->max_length)
1837 *size = block->max_length - addr + block->offset;
ae3a7047 1838 ptr = ramblock_ptr(block, addr - block->offset);
0dc3f44a 1839 rcu_read_unlock();
ae3a7047 1840 return ptr;
38bee5dc
SS
1841 }
1842 }
1843
1844 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1845 abort();
38bee5dc
SS
1846 }
1847}
1848
7443b437 1849/* Some of the softmmu routines need to translate from a host pointer
ae3a7047
MD
1850 * (typically a TLB entry) back to a ram offset.
1851 *
1852 * By the time this function returns, the returned pointer is not protected
1853 * by RCU anymore. If the caller is not within an RCU critical section and
1854 * does not hold the iothread lock, it must have other means of protecting the
1855 * pointer, such as a reference to the region that includes the incoming
1856 * ram_addr_t.
1857 */
1b5ec234 1858MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1859{
94a6b54f
PB
1860 RAMBlock *block;
1861 uint8_t *host = ptr;
ae3a7047 1862 MemoryRegion *mr;
94a6b54f 1863
868bb33f 1864 if (xen_enabled()) {
0dc3f44a 1865 rcu_read_lock();
e41d7c69 1866 *ram_addr = xen_ram_addr_from_mapcache(ptr);
ae3a7047 1867 mr = qemu_get_ram_block(*ram_addr)->mr;
0dc3f44a 1868 rcu_read_unlock();
ae3a7047 1869 return mr;
712c2b41
SS
1870 }
1871
0dc3f44a
MD
1872 rcu_read_lock();
1873 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 1874 if (block && block->host && host - block->host < block->max_length) {
23887b79
PB
1875 goto found;
1876 }
1877
0dc3f44a 1878 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
432d268c
JN
1879 /* This case append when the block is not mapped. */
1880 if (block->host == NULL) {
1881 continue;
1882 }
9b8424d5 1883 if (host - block->host < block->max_length) {
23887b79 1884 goto found;
f471a17e 1885 }
94a6b54f 1886 }
432d268c 1887
0dc3f44a 1888 rcu_read_unlock();
1b5ec234 1889 return NULL;
23887b79
PB
1890
1891found:
1892 *ram_addr = block->offset + (host - block->host);
ae3a7047 1893 mr = block->mr;
0dc3f44a 1894 rcu_read_unlock();
ae3a7047 1895 return mr;
e890261f 1896}
f471a17e 1897
a8170e5e 1898static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1899 uint64_t val, unsigned size)
9fa3e853 1900{
52159192 1901 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1902 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1903 }
0e0df1e2
AK
1904 switch (size) {
1905 case 1:
1906 stb_p(qemu_get_ram_ptr(ram_addr), val);
1907 break;
1908 case 2:
1909 stw_p(qemu_get_ram_ptr(ram_addr), val);
1910 break;
1911 case 4:
1912 stl_p(qemu_get_ram_ptr(ram_addr), val);
1913 break;
1914 default:
1915 abort();
3a7d929e 1916 }
58d2707e
PB
1917 /* Set both VGA and migration bits for simplicity and to remove
1918 * the notdirty callback faster.
1919 */
1920 cpu_physical_memory_set_dirty_range(ram_addr, size,
1921 DIRTY_CLIENTS_NOCODE);
f23db169
FB
1922 /* we remove the notdirty callback only if the code has been
1923 flushed */
a2cd8c85 1924 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1925 CPUArchState *env = current_cpu->env_ptr;
93afeade 1926 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1927 }
9fa3e853
FB
1928}
1929
b018ddf6
PB
1930static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1931 unsigned size, bool is_write)
1932{
1933 return is_write;
1934}
1935
0e0df1e2 1936static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1937 .write = notdirty_mem_write,
b018ddf6 1938 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1939 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1940};
1941
0f459d16 1942/* Generate a debug exception if a watchpoint has been hit. */
66b9b43c 1943static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
0f459d16 1944{
93afeade
AF
1945 CPUState *cpu = current_cpu;
1946 CPUArchState *env = cpu->env_ptr;
06d55cc1 1947 target_ulong pc, cs_base;
0f459d16 1948 target_ulong vaddr;
a1d1bb31 1949 CPUWatchpoint *wp;
06d55cc1 1950 int cpu_flags;
0f459d16 1951
ff4700b0 1952 if (cpu->watchpoint_hit) {
06d55cc1
AL
1953 /* We re-entered the check after replacing the TB. Now raise
1954 * the debug interrupt so that is will trigger after the
1955 * current instruction. */
93afeade 1956 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1957 return;
1958 }
93afeade 1959 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1960 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
1961 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1962 && (wp->flags & flags)) {
08225676
PM
1963 if (flags == BP_MEM_READ) {
1964 wp->flags |= BP_WATCHPOINT_HIT_READ;
1965 } else {
1966 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1967 }
1968 wp->hitaddr = vaddr;
66b9b43c 1969 wp->hitattrs = attrs;
ff4700b0
AF
1970 if (!cpu->watchpoint_hit) {
1971 cpu->watchpoint_hit = wp;
239c51a5 1972 tb_check_watchpoint(cpu);
6e140f28 1973 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1974 cpu->exception_index = EXCP_DEBUG;
5638d180 1975 cpu_loop_exit(cpu);
6e140f28
AL
1976 } else {
1977 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1978 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1979 cpu_resume_from_signal(cpu, NULL);
6e140f28 1980 }
06d55cc1 1981 }
6e140f28
AL
1982 } else {
1983 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1984 }
1985 }
1986}
1987
6658ffb8
PB
1988/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1989 so these check for a hit then pass through to the normal out-of-line
1990 phys routines. */
66b9b43c
PM
1991static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1992 unsigned size, MemTxAttrs attrs)
6658ffb8 1993{
66b9b43c
PM
1994 MemTxResult res;
1995 uint64_t data;
1996
1997 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
1ec9b909 1998 switch (size) {
66b9b43c
PM
1999 case 1:
2000 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
2001 break;
2002 case 2:
2003 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
2004 break;
2005 case 4:
2006 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
2007 break;
1ec9b909
AK
2008 default: abort();
2009 }
66b9b43c
PM
2010 *pdata = data;
2011 return res;
6658ffb8
PB
2012}
2013
66b9b43c
PM
2014static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2015 uint64_t val, unsigned size,
2016 MemTxAttrs attrs)
6658ffb8 2017{
66b9b43c
PM
2018 MemTxResult res;
2019
2020 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1ec9b909 2021 switch (size) {
67364150 2022 case 1:
66b9b43c 2023 address_space_stb(&address_space_memory, addr, val, attrs, &res);
67364150
MF
2024 break;
2025 case 2:
66b9b43c 2026 address_space_stw(&address_space_memory, addr, val, attrs, &res);
67364150
MF
2027 break;
2028 case 4:
66b9b43c 2029 address_space_stl(&address_space_memory, addr, val, attrs, &res);
67364150 2030 break;
1ec9b909
AK
2031 default: abort();
2032 }
66b9b43c 2033 return res;
6658ffb8
PB
2034}
2035
1ec9b909 2036static const MemoryRegionOps watch_mem_ops = {
66b9b43c
PM
2037 .read_with_attrs = watch_mem_read,
2038 .write_with_attrs = watch_mem_write,
1ec9b909 2039 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 2040};
6658ffb8 2041
f25a49e0
PM
2042static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2043 unsigned len, MemTxAttrs attrs)
db7b5426 2044{
acc9d80b 2045 subpage_t *subpage = opaque;
ff6cff75 2046 uint8_t buf[8];
5c9eb028 2047 MemTxResult res;
791af8c8 2048
db7b5426 2049#if defined(DEBUG_SUBPAGE)
016e9d62 2050 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 2051 subpage, len, addr);
db7b5426 2052#endif
5c9eb028
PM
2053 res = address_space_read(subpage->as, addr + subpage->base,
2054 attrs, buf, len);
2055 if (res) {
2056 return res;
f25a49e0 2057 }
acc9d80b
JK
2058 switch (len) {
2059 case 1:
f25a49e0
PM
2060 *data = ldub_p(buf);
2061 return MEMTX_OK;
acc9d80b 2062 case 2:
f25a49e0
PM
2063 *data = lduw_p(buf);
2064 return MEMTX_OK;
acc9d80b 2065 case 4:
f25a49e0
PM
2066 *data = ldl_p(buf);
2067 return MEMTX_OK;
ff6cff75 2068 case 8:
f25a49e0
PM
2069 *data = ldq_p(buf);
2070 return MEMTX_OK;
acc9d80b
JK
2071 default:
2072 abort();
2073 }
db7b5426
BS
2074}
2075
f25a49e0
PM
2076static MemTxResult subpage_write(void *opaque, hwaddr addr,
2077 uint64_t value, unsigned len, MemTxAttrs attrs)
db7b5426 2078{
acc9d80b 2079 subpage_t *subpage = opaque;
ff6cff75 2080 uint8_t buf[8];
acc9d80b 2081
db7b5426 2082#if defined(DEBUG_SUBPAGE)
016e9d62 2083 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
2084 " value %"PRIx64"\n",
2085 __func__, subpage, len, addr, value);
db7b5426 2086#endif
acc9d80b
JK
2087 switch (len) {
2088 case 1:
2089 stb_p(buf, value);
2090 break;
2091 case 2:
2092 stw_p(buf, value);
2093 break;
2094 case 4:
2095 stl_p(buf, value);
2096 break;
ff6cff75
PB
2097 case 8:
2098 stq_p(buf, value);
2099 break;
acc9d80b
JK
2100 default:
2101 abort();
2102 }
5c9eb028
PM
2103 return address_space_write(subpage->as, addr + subpage->base,
2104 attrs, buf, len);
db7b5426
BS
2105}
2106
c353e4cc 2107static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 2108 unsigned len, bool is_write)
c353e4cc 2109{
acc9d80b 2110 subpage_t *subpage = opaque;
c353e4cc 2111#if defined(DEBUG_SUBPAGE)
016e9d62 2112 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 2113 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
2114#endif
2115
acc9d80b 2116 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 2117 len, is_write);
c353e4cc
PB
2118}
2119
70c68e44 2120static const MemoryRegionOps subpage_ops = {
f25a49e0
PM
2121 .read_with_attrs = subpage_read,
2122 .write_with_attrs = subpage_write,
ff6cff75
PB
2123 .impl.min_access_size = 1,
2124 .impl.max_access_size = 8,
2125 .valid.min_access_size = 1,
2126 .valid.max_access_size = 8,
c353e4cc 2127 .valid.accepts = subpage_accepts,
70c68e44 2128 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
2129};
2130
c227f099 2131static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2132 uint16_t section)
db7b5426
BS
2133{
2134 int idx, eidx;
2135
2136 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2137 return -1;
2138 idx = SUBPAGE_IDX(start);
2139 eidx = SUBPAGE_IDX(end);
2140#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2141 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2142 __func__, mmio, start, end, idx, eidx, section);
db7b5426 2143#endif
db7b5426 2144 for (; idx <= eidx; idx++) {
5312bd8b 2145 mmio->sub_section[idx] = section;
db7b5426
BS
2146 }
2147
2148 return 0;
2149}
2150
acc9d80b 2151static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 2152{
c227f099 2153 subpage_t *mmio;
db7b5426 2154
7267c094 2155 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 2156
acc9d80b 2157 mmio->as = as;
1eec614b 2158 mmio->base = base;
2c9b15ca 2159 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 2160 NULL, TARGET_PAGE_SIZE);
b3b00c78 2161 mmio->iomem.subpage = true;
db7b5426 2162#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2163 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2164 mmio, base, TARGET_PAGE_SIZE);
db7b5426 2165#endif
b41aac4f 2166 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
2167
2168 return mmio;
2169}
2170
a656e22f
PC
2171static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2172 MemoryRegion *mr)
5312bd8b 2173{
a656e22f 2174 assert(as);
5312bd8b 2175 MemoryRegionSection section = {
a656e22f 2176 .address_space = as,
5312bd8b
AK
2177 .mr = mr,
2178 .offset_within_address_space = 0,
2179 .offset_within_region = 0,
052e87b0 2180 .size = int128_2_64(),
5312bd8b
AK
2181 };
2182
53cb28cb 2183 return phys_section_add(map, &section);
5312bd8b
AK
2184}
2185
9d82b5a7 2186MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
aa102231 2187{
79e2b9ae
PB
2188 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2189 MemoryRegionSection *sections = d->map.sections;
9d82b5a7
PB
2190
2191 return sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
2192}
2193
e9179ce1
AK
2194static void io_mem_init(void)
2195{
1f6245e5 2196 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 2197 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 2198 NULL, UINT64_MAX);
2c9b15ca 2199 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 2200 NULL, UINT64_MAX);
2c9b15ca 2201 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 2202 NULL, UINT64_MAX);
e9179ce1
AK
2203}
2204
ac1970fb 2205static void mem_begin(MemoryListener *listener)
00752703
PB
2206{
2207 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
2208 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2209 uint16_t n;
2210
a656e22f 2211 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 2212 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 2213 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 2214 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 2215 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 2216 assert(n == PHYS_SECTION_ROM);
a656e22f 2217 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 2218 assert(n == PHYS_SECTION_WATCH);
00752703 2219
9736e55b 2220 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
2221 d->as = as;
2222 as->next_dispatch = d;
2223}
2224
79e2b9ae
PB
2225static void address_space_dispatch_free(AddressSpaceDispatch *d)
2226{
2227 phys_sections_free(&d->map);
2228 g_free(d);
2229}
2230
00752703 2231static void mem_commit(MemoryListener *listener)
ac1970fb 2232{
89ae337a 2233 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
2234 AddressSpaceDispatch *cur = as->dispatch;
2235 AddressSpaceDispatch *next = as->next_dispatch;
2236
53cb28cb 2237 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 2238
79e2b9ae 2239 atomic_rcu_set(&as->dispatch, next);
53cb28cb 2240 if (cur) {
79e2b9ae 2241 call_rcu(cur, address_space_dispatch_free, rcu);
53cb28cb 2242 }
9affd6fc
PB
2243}
2244
1d71148e 2245static void tcg_commit(MemoryListener *listener)
50c1e149 2246{
182735ef 2247 CPUState *cpu;
117712c3
AK
2248
2249 /* since each CPU stores ram addresses in its TLB cache, we must
2250 reset the modified entries */
2251 /* XXX: slow ! */
bdc44640 2252 CPU_FOREACH(cpu) {
33bde2e1
EI
2253 /* FIXME: Disentangle the cpu.h circular files deps so we can
2254 directly get the right CPU from listener. */
2255 if (cpu->tcg_as_listener != listener) {
2256 continue;
2257 }
76e5c76f 2258 cpu_reload_memory_map(cpu);
117712c3 2259 }
50c1e149
AK
2260}
2261
ac1970fb
AK
2262void address_space_init_dispatch(AddressSpace *as)
2263{
00752703 2264 as->dispatch = NULL;
89ae337a 2265 as->dispatch_listener = (MemoryListener) {
ac1970fb 2266 .begin = mem_begin,
00752703 2267 .commit = mem_commit,
ac1970fb
AK
2268 .region_add = mem_add,
2269 .region_nop = mem_add,
2270 .priority = 0,
2271 };
89ae337a 2272 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
2273}
2274
6e48e8f9
PB
2275void address_space_unregister(AddressSpace *as)
2276{
2277 memory_listener_unregister(&as->dispatch_listener);
2278}
2279
83f3c251
AK
2280void address_space_destroy_dispatch(AddressSpace *as)
2281{
2282 AddressSpaceDispatch *d = as->dispatch;
2283
79e2b9ae
PB
2284 atomic_rcu_set(&as->dispatch, NULL);
2285 if (d) {
2286 call_rcu(d, address_space_dispatch_free, rcu);
2287 }
83f3c251
AK
2288}
2289
62152b8a
AK
2290static void memory_map_init(void)
2291{
7267c094 2292 system_memory = g_malloc(sizeof(*system_memory));
03f49957 2293
57271d63 2294 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2295 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2296
7267c094 2297 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2298 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2299 65536);
7dca8043 2300 address_space_init(&address_space_io, system_io, "I/O");
62152b8a
AK
2301}
2302
2303MemoryRegion *get_system_memory(void)
2304{
2305 return system_memory;
2306}
2307
309cb471
AK
2308MemoryRegion *get_system_io(void)
2309{
2310 return system_io;
2311}
2312
e2eef170
PB
2313#endif /* !defined(CONFIG_USER_ONLY) */
2314
13eb76e0
FB
2315/* physical memory access (slow version, mainly for debug) */
2316#if defined(CONFIG_USER_ONLY)
f17ec444 2317int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2318 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2319{
2320 int l, flags;
2321 target_ulong page;
53a5960a 2322 void * p;
13eb76e0
FB
2323
2324 while (len > 0) {
2325 page = addr & TARGET_PAGE_MASK;
2326 l = (page + TARGET_PAGE_SIZE) - addr;
2327 if (l > len)
2328 l = len;
2329 flags = page_get_flags(page);
2330 if (!(flags & PAGE_VALID))
a68fe89c 2331 return -1;
13eb76e0
FB
2332 if (is_write) {
2333 if (!(flags & PAGE_WRITE))
a68fe89c 2334 return -1;
579a97f7 2335 /* XXX: this code should not depend on lock_user */
72fb7daa 2336 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2337 return -1;
72fb7daa
AJ
2338 memcpy(p, buf, l);
2339 unlock_user(p, addr, l);
13eb76e0
FB
2340 } else {
2341 if (!(flags & PAGE_READ))
a68fe89c 2342 return -1;
579a97f7 2343 /* XXX: this code should not depend on lock_user */
72fb7daa 2344 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2345 return -1;
72fb7daa 2346 memcpy(buf, p, l);
5b257578 2347 unlock_user(p, addr, 0);
13eb76e0
FB
2348 }
2349 len -= l;
2350 buf += l;
2351 addr += l;
2352 }
a68fe89c 2353 return 0;
13eb76e0 2354}
8df1cd07 2355
13eb76e0 2356#else
51d7a9eb 2357
845b6214 2358static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
a8170e5e 2359 hwaddr length)
51d7a9eb 2360{
e87f7778
PB
2361 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2362 /* No early return if dirty_log_mask is or becomes 0, because
2363 * cpu_physical_memory_set_dirty_range will still call
2364 * xen_modified_memory.
2365 */
2366 if (dirty_log_mask) {
2367 dirty_log_mask =
2368 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2369 }
2370 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2371 tb_invalidate_phys_range(addr, addr + length);
2372 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
51d7a9eb 2373 }
e87f7778 2374 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
51d7a9eb
AP
2375}
2376
23326164 2377static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2378{
e1622f4b 2379 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2380
2381 /* Regions are assumed to support 1-4 byte accesses unless
2382 otherwise specified. */
23326164
RH
2383 if (access_size_max == 0) {
2384 access_size_max = 4;
2385 }
2386
2387 /* Bound the maximum access by the alignment of the address. */
2388 if (!mr->ops->impl.unaligned) {
2389 unsigned align_size_max = addr & -addr;
2390 if (align_size_max != 0 && align_size_max < access_size_max) {
2391 access_size_max = align_size_max;
2392 }
82f2563f 2393 }
23326164
RH
2394
2395 /* Don't attempt accesses larger than the maximum. */
2396 if (l > access_size_max) {
2397 l = access_size_max;
82f2563f 2398 }
6554f5c0 2399 l = pow2floor(l);
23326164
RH
2400
2401 return l;
82f2563f
PB
2402}
2403
4840f10e 2404static bool prepare_mmio_access(MemoryRegion *mr)
125b3806 2405{
4840f10e
JK
2406 bool unlocked = !qemu_mutex_iothread_locked();
2407 bool release_lock = false;
2408
2409 if (unlocked && mr->global_locking) {
2410 qemu_mutex_lock_iothread();
2411 unlocked = false;
2412 release_lock = true;
2413 }
125b3806 2414 if (mr->flush_coalesced_mmio) {
4840f10e
JK
2415 if (unlocked) {
2416 qemu_mutex_lock_iothread();
2417 }
125b3806 2418 qemu_flush_coalesced_mmio_buffer();
4840f10e
JK
2419 if (unlocked) {
2420 qemu_mutex_unlock_iothread();
2421 }
125b3806 2422 }
4840f10e
JK
2423
2424 return release_lock;
125b3806
PB
2425}
2426
5c9eb028
PM
2427MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2428 uint8_t *buf, int len, bool is_write)
13eb76e0 2429{
149f54b5 2430 hwaddr l;
13eb76e0 2431 uint8_t *ptr;
791af8c8 2432 uint64_t val;
149f54b5 2433 hwaddr addr1;
5c8a00ce 2434 MemoryRegion *mr;
3b643495 2435 MemTxResult result = MEMTX_OK;
4840f10e 2436 bool release_lock = false;
3b46e624 2437
41063e1e 2438 rcu_read_lock();
13eb76e0 2439 while (len > 0) {
149f54b5 2440 l = len;
5c8a00ce 2441 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2442
13eb76e0 2443 if (is_write) {
5c8a00ce 2444 if (!memory_access_is_direct(mr, is_write)) {
4840f10e 2445 release_lock |= prepare_mmio_access(mr);
5c8a00ce 2446 l = memory_access_size(mr, l, addr1);
4917cf44 2447 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2448 potential bugs */
23326164
RH
2449 switch (l) {
2450 case 8:
2451 /* 64 bit write access */
2452 val = ldq_p(buf);
3b643495
PM
2453 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2454 attrs);
23326164
RH
2455 break;
2456 case 4:
1c213d19 2457 /* 32 bit write access */
c27004ec 2458 val = ldl_p(buf);
3b643495
PM
2459 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2460 attrs);
23326164
RH
2461 break;
2462 case 2:
1c213d19 2463 /* 16 bit write access */
c27004ec 2464 val = lduw_p(buf);
3b643495
PM
2465 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2466 attrs);
23326164
RH
2467 break;
2468 case 1:
1c213d19 2469 /* 8 bit write access */
c27004ec 2470 val = ldub_p(buf);
3b643495
PM
2471 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2472 attrs);
23326164
RH
2473 break;
2474 default:
2475 abort();
13eb76e0 2476 }
2bbfa05d 2477 } else {
5c8a00ce 2478 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2479 /* RAM case */
5579c7f3 2480 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2481 memcpy(ptr, buf, l);
845b6214 2482 invalidate_and_set_dirty(mr, addr1, l);
13eb76e0
FB
2483 }
2484 } else {
5c8a00ce 2485 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2486 /* I/O case */
4840f10e 2487 release_lock |= prepare_mmio_access(mr);
5c8a00ce 2488 l = memory_access_size(mr, l, addr1);
23326164
RH
2489 switch (l) {
2490 case 8:
2491 /* 64 bit read access */
3b643495
PM
2492 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2493 attrs);
23326164
RH
2494 stq_p(buf, val);
2495 break;
2496 case 4:
13eb76e0 2497 /* 32 bit read access */
3b643495
PM
2498 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2499 attrs);
c27004ec 2500 stl_p(buf, val);
23326164
RH
2501 break;
2502 case 2:
13eb76e0 2503 /* 16 bit read access */
3b643495
PM
2504 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2505 attrs);
c27004ec 2506 stw_p(buf, val);
23326164
RH
2507 break;
2508 case 1:
1c213d19 2509 /* 8 bit read access */
3b643495
PM
2510 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2511 attrs);
c27004ec 2512 stb_p(buf, val);
23326164
RH
2513 break;
2514 default:
2515 abort();
13eb76e0
FB
2516 }
2517 } else {
2518 /* RAM case */
5c8a00ce 2519 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2520 memcpy(buf, ptr, l);
13eb76e0
FB
2521 }
2522 }
4840f10e
JK
2523
2524 if (release_lock) {
2525 qemu_mutex_unlock_iothread();
2526 release_lock = false;
2527 }
2528
13eb76e0
FB
2529 len -= l;
2530 buf += l;
2531 addr += l;
2532 }
41063e1e 2533 rcu_read_unlock();
fd8aaa76 2534
3b643495 2535 return result;
13eb76e0 2536}
8df1cd07 2537
5c9eb028
PM
2538MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2539 const uint8_t *buf, int len)
ac1970fb 2540{
5c9eb028 2541 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
ac1970fb
AK
2542}
2543
5c9eb028
PM
2544MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2545 uint8_t *buf, int len)
ac1970fb 2546{
5c9eb028 2547 return address_space_rw(as, addr, attrs, buf, len, false);
ac1970fb
AK
2548}
2549
2550
a8170e5e 2551void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2552 int len, int is_write)
2553{
5c9eb028
PM
2554 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2555 buf, len, is_write);
ac1970fb
AK
2556}
2557
582b55a9
AG
2558enum write_rom_type {
2559 WRITE_DATA,
2560 FLUSH_CACHE,
2561};
2562
2a221651 2563static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2564 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2565{
149f54b5 2566 hwaddr l;
d0ecd2aa 2567 uint8_t *ptr;
149f54b5 2568 hwaddr addr1;
5c8a00ce 2569 MemoryRegion *mr;
3b46e624 2570
41063e1e 2571 rcu_read_lock();
d0ecd2aa 2572 while (len > 0) {
149f54b5 2573 l = len;
2a221651 2574 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2575
5c8a00ce
PB
2576 if (!(memory_region_is_ram(mr) ||
2577 memory_region_is_romd(mr))) {
b242e0e0 2578 l = memory_access_size(mr, l, addr1);
d0ecd2aa 2579 } else {
5c8a00ce 2580 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2581 /* ROM/RAM case */
5579c7f3 2582 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2583 switch (type) {
2584 case WRITE_DATA:
2585 memcpy(ptr, buf, l);
845b6214 2586 invalidate_and_set_dirty(mr, addr1, l);
582b55a9
AG
2587 break;
2588 case FLUSH_CACHE:
2589 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2590 break;
2591 }
d0ecd2aa
FB
2592 }
2593 len -= l;
2594 buf += l;
2595 addr += l;
2596 }
41063e1e 2597 rcu_read_unlock();
d0ecd2aa
FB
2598}
2599
582b55a9 2600/* used for ROM loading : can write in RAM and ROM */
2a221651 2601void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2602 const uint8_t *buf, int len)
2603{
2a221651 2604 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2605}
2606
2607void cpu_flush_icache_range(hwaddr start, int len)
2608{
2609 /*
2610 * This function should do the same thing as an icache flush that was
2611 * triggered from within the guest. For TCG we are always cache coherent,
2612 * so there is no need to flush anything. For KVM / Xen we need to flush
2613 * the host's instruction cache at least.
2614 */
2615 if (tcg_enabled()) {
2616 return;
2617 }
2618
2a221651
EI
2619 cpu_physical_memory_write_rom_internal(&address_space_memory,
2620 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2621}
2622
6d16c2f8 2623typedef struct {
d3e71559 2624 MemoryRegion *mr;
6d16c2f8 2625 void *buffer;
a8170e5e
AK
2626 hwaddr addr;
2627 hwaddr len;
c2cba0ff 2628 bool in_use;
6d16c2f8
AL
2629} BounceBuffer;
2630
2631static BounceBuffer bounce;
2632
ba223c29 2633typedef struct MapClient {
e95205e1 2634 QEMUBH *bh;
72cf2d4f 2635 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2636} MapClient;
2637
38e047b5 2638QemuMutex map_client_list_lock;
72cf2d4f
BS
2639static QLIST_HEAD(map_client_list, MapClient) map_client_list
2640 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29 2641
e95205e1
FZ
2642static void cpu_unregister_map_client_do(MapClient *client)
2643{
2644 QLIST_REMOVE(client, link);
2645 g_free(client);
2646}
2647
33b6c2ed
FZ
2648static void cpu_notify_map_clients_locked(void)
2649{
2650 MapClient *client;
2651
2652 while (!QLIST_EMPTY(&map_client_list)) {
2653 client = QLIST_FIRST(&map_client_list);
e95205e1
FZ
2654 qemu_bh_schedule(client->bh);
2655 cpu_unregister_map_client_do(client);
33b6c2ed
FZ
2656 }
2657}
2658
e95205e1 2659void cpu_register_map_client(QEMUBH *bh)
ba223c29 2660{
7267c094 2661 MapClient *client = g_malloc(sizeof(*client));
ba223c29 2662
38e047b5 2663 qemu_mutex_lock(&map_client_list_lock);
e95205e1 2664 client->bh = bh;
72cf2d4f 2665 QLIST_INSERT_HEAD(&map_client_list, client, link);
33b6c2ed
FZ
2666 if (!atomic_read(&bounce.in_use)) {
2667 cpu_notify_map_clients_locked();
2668 }
38e047b5 2669 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2670}
2671
38e047b5 2672void cpu_exec_init_all(void)
ba223c29 2673{
38e047b5
FZ
2674 qemu_mutex_init(&ram_list.mutex);
2675 memory_map_init();
2676 io_mem_init();
2677 qemu_mutex_init(&map_client_list_lock);
ba223c29
AL
2678}
2679
e95205e1 2680void cpu_unregister_map_client(QEMUBH *bh)
ba223c29
AL
2681{
2682 MapClient *client;
2683
e95205e1
FZ
2684 qemu_mutex_lock(&map_client_list_lock);
2685 QLIST_FOREACH(client, &map_client_list, link) {
2686 if (client->bh == bh) {
2687 cpu_unregister_map_client_do(client);
2688 break;
2689 }
ba223c29 2690 }
e95205e1 2691 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2692}
2693
2694static void cpu_notify_map_clients(void)
2695{
38e047b5 2696 qemu_mutex_lock(&map_client_list_lock);
33b6c2ed 2697 cpu_notify_map_clients_locked();
38e047b5 2698 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2699}
2700
51644ab7
PB
2701bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2702{
5c8a00ce 2703 MemoryRegion *mr;
51644ab7
PB
2704 hwaddr l, xlat;
2705
41063e1e 2706 rcu_read_lock();
51644ab7
PB
2707 while (len > 0) {
2708 l = len;
5c8a00ce
PB
2709 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2710 if (!memory_access_is_direct(mr, is_write)) {
2711 l = memory_access_size(mr, l, addr);
2712 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2713 return false;
2714 }
2715 }
2716
2717 len -= l;
2718 addr += l;
2719 }
41063e1e 2720 rcu_read_unlock();
51644ab7
PB
2721 return true;
2722}
2723
6d16c2f8
AL
2724/* Map a physical memory region into a host virtual address.
2725 * May map a subset of the requested range, given by and returned in *plen.
2726 * May return NULL if resources needed to perform the mapping are exhausted.
2727 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2728 * Use cpu_register_map_client() to know when retrying the map operation is
2729 * likely to succeed.
6d16c2f8 2730 */
ac1970fb 2731void *address_space_map(AddressSpace *as,
a8170e5e
AK
2732 hwaddr addr,
2733 hwaddr *plen,
ac1970fb 2734 bool is_write)
6d16c2f8 2735{
a8170e5e 2736 hwaddr len = *plen;
e3127ae0
PB
2737 hwaddr done = 0;
2738 hwaddr l, xlat, base;
2739 MemoryRegion *mr, *this_mr;
2740 ram_addr_t raddr;
6d16c2f8 2741
e3127ae0
PB
2742 if (len == 0) {
2743 return NULL;
2744 }
38bee5dc 2745
e3127ae0 2746 l = len;
41063e1e 2747 rcu_read_lock();
e3127ae0 2748 mr = address_space_translate(as, addr, &xlat, &l, is_write);
41063e1e 2749
e3127ae0 2750 if (!memory_access_is_direct(mr, is_write)) {
c2cba0ff 2751 if (atomic_xchg(&bounce.in_use, true)) {
41063e1e 2752 rcu_read_unlock();
e3127ae0 2753 return NULL;
6d16c2f8 2754 }
e85d9db5
KW
2755 /* Avoid unbounded allocations */
2756 l = MIN(l, TARGET_PAGE_SIZE);
2757 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2758 bounce.addr = addr;
2759 bounce.len = l;
d3e71559
PB
2760
2761 memory_region_ref(mr);
2762 bounce.mr = mr;
e3127ae0 2763 if (!is_write) {
5c9eb028
PM
2764 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2765 bounce.buffer, l);
8ab934f9 2766 }
6d16c2f8 2767
41063e1e 2768 rcu_read_unlock();
e3127ae0
PB
2769 *plen = l;
2770 return bounce.buffer;
2771 }
2772
2773 base = xlat;
2774 raddr = memory_region_get_ram_addr(mr);
2775
2776 for (;;) {
6d16c2f8
AL
2777 len -= l;
2778 addr += l;
e3127ae0
PB
2779 done += l;
2780 if (len == 0) {
2781 break;
2782 }
2783
2784 l = len;
2785 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2786 if (this_mr != mr || xlat != base + done) {
2787 break;
2788 }
6d16c2f8 2789 }
e3127ae0 2790
d3e71559 2791 memory_region_ref(mr);
41063e1e 2792 rcu_read_unlock();
e3127ae0
PB
2793 *plen = done;
2794 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2795}
2796
ac1970fb 2797/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2798 * Will also mark the memory as dirty if is_write == 1. access_len gives
2799 * the amount of memory that was actually read or written by the caller.
2800 */
a8170e5e
AK
2801void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2802 int is_write, hwaddr access_len)
6d16c2f8
AL
2803{
2804 if (buffer != bounce.buffer) {
d3e71559
PB
2805 MemoryRegion *mr;
2806 ram_addr_t addr1;
2807
2808 mr = qemu_ram_addr_from_host(buffer, &addr1);
2809 assert(mr != NULL);
6d16c2f8 2810 if (is_write) {
845b6214 2811 invalidate_and_set_dirty(mr, addr1, access_len);
6d16c2f8 2812 }
868bb33f 2813 if (xen_enabled()) {
e41d7c69 2814 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2815 }
d3e71559 2816 memory_region_unref(mr);
6d16c2f8
AL
2817 return;
2818 }
2819 if (is_write) {
5c9eb028
PM
2820 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2821 bounce.buffer, access_len);
6d16c2f8 2822 }
f8a83245 2823 qemu_vfree(bounce.buffer);
6d16c2f8 2824 bounce.buffer = NULL;
d3e71559 2825 memory_region_unref(bounce.mr);
c2cba0ff 2826 atomic_mb_set(&bounce.in_use, false);
ba223c29 2827 cpu_notify_map_clients();
6d16c2f8 2828}
d0ecd2aa 2829
a8170e5e
AK
2830void *cpu_physical_memory_map(hwaddr addr,
2831 hwaddr *plen,
ac1970fb
AK
2832 int is_write)
2833{
2834 return address_space_map(&address_space_memory, addr, plen, is_write);
2835}
2836
a8170e5e
AK
2837void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2838 int is_write, hwaddr access_len)
ac1970fb
AK
2839{
2840 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2841}
2842
8df1cd07 2843/* warning: addr must be aligned */
50013115
PM
2844static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2845 MemTxAttrs attrs,
2846 MemTxResult *result,
2847 enum device_endian endian)
8df1cd07 2848{
8df1cd07 2849 uint8_t *ptr;
791af8c8 2850 uint64_t val;
5c8a00ce 2851 MemoryRegion *mr;
149f54b5
PB
2852 hwaddr l = 4;
2853 hwaddr addr1;
50013115 2854 MemTxResult r;
4840f10e 2855 bool release_lock = false;
8df1cd07 2856
41063e1e 2857 rcu_read_lock();
fdfba1a2 2858 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2859 if (l < 4 || !memory_access_is_direct(mr, false)) {
4840f10e 2860 release_lock |= prepare_mmio_access(mr);
125b3806 2861
8df1cd07 2862 /* I/O case */
50013115 2863 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
1e78bcc1
AG
2864#if defined(TARGET_WORDS_BIGENDIAN)
2865 if (endian == DEVICE_LITTLE_ENDIAN) {
2866 val = bswap32(val);
2867 }
2868#else
2869 if (endian == DEVICE_BIG_ENDIAN) {
2870 val = bswap32(val);
2871 }
2872#endif
8df1cd07
FB
2873 } else {
2874 /* RAM case */
5c8a00ce 2875 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2876 & TARGET_PAGE_MASK)
149f54b5 2877 + addr1);
1e78bcc1
AG
2878 switch (endian) {
2879 case DEVICE_LITTLE_ENDIAN:
2880 val = ldl_le_p(ptr);
2881 break;
2882 case DEVICE_BIG_ENDIAN:
2883 val = ldl_be_p(ptr);
2884 break;
2885 default:
2886 val = ldl_p(ptr);
2887 break;
2888 }
50013115
PM
2889 r = MEMTX_OK;
2890 }
2891 if (result) {
2892 *result = r;
8df1cd07 2893 }
4840f10e
JK
2894 if (release_lock) {
2895 qemu_mutex_unlock_iothread();
2896 }
41063e1e 2897 rcu_read_unlock();
8df1cd07
FB
2898 return val;
2899}
2900
50013115
PM
2901uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2902 MemTxAttrs attrs, MemTxResult *result)
2903{
2904 return address_space_ldl_internal(as, addr, attrs, result,
2905 DEVICE_NATIVE_ENDIAN);
2906}
2907
2908uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2909 MemTxAttrs attrs, MemTxResult *result)
2910{
2911 return address_space_ldl_internal(as, addr, attrs, result,
2912 DEVICE_LITTLE_ENDIAN);
2913}
2914
2915uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2916 MemTxAttrs attrs, MemTxResult *result)
2917{
2918 return address_space_ldl_internal(as, addr, attrs, result,
2919 DEVICE_BIG_ENDIAN);
2920}
2921
fdfba1a2 2922uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2923{
50013115 2924 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2925}
2926
fdfba1a2 2927uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2928{
50013115 2929 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2930}
2931
fdfba1a2 2932uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2933{
50013115 2934 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2935}
2936
84b7b8e7 2937/* warning: addr must be aligned */
50013115
PM
2938static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2939 MemTxAttrs attrs,
2940 MemTxResult *result,
2941 enum device_endian endian)
84b7b8e7 2942{
84b7b8e7
FB
2943 uint8_t *ptr;
2944 uint64_t val;
5c8a00ce 2945 MemoryRegion *mr;
149f54b5
PB
2946 hwaddr l = 8;
2947 hwaddr addr1;
50013115 2948 MemTxResult r;
4840f10e 2949 bool release_lock = false;
84b7b8e7 2950
41063e1e 2951 rcu_read_lock();
2c17449b 2952 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2953 false);
2954 if (l < 8 || !memory_access_is_direct(mr, false)) {
4840f10e 2955 release_lock |= prepare_mmio_access(mr);
125b3806 2956
84b7b8e7 2957 /* I/O case */
50013115 2958 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
968a5627
PB
2959#if defined(TARGET_WORDS_BIGENDIAN)
2960 if (endian == DEVICE_LITTLE_ENDIAN) {
2961 val = bswap64(val);
2962 }
2963#else
2964 if (endian == DEVICE_BIG_ENDIAN) {
2965 val = bswap64(val);
2966 }
84b7b8e7
FB
2967#endif
2968 } else {
2969 /* RAM case */
5c8a00ce 2970 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2971 & TARGET_PAGE_MASK)
149f54b5 2972 + addr1);
1e78bcc1
AG
2973 switch (endian) {
2974 case DEVICE_LITTLE_ENDIAN:
2975 val = ldq_le_p(ptr);
2976 break;
2977 case DEVICE_BIG_ENDIAN:
2978 val = ldq_be_p(ptr);
2979 break;
2980 default:
2981 val = ldq_p(ptr);
2982 break;
2983 }
50013115
PM
2984 r = MEMTX_OK;
2985 }
2986 if (result) {
2987 *result = r;
84b7b8e7 2988 }
4840f10e
JK
2989 if (release_lock) {
2990 qemu_mutex_unlock_iothread();
2991 }
41063e1e 2992 rcu_read_unlock();
84b7b8e7
FB
2993 return val;
2994}
2995
50013115
PM
2996uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2997 MemTxAttrs attrs, MemTxResult *result)
2998{
2999 return address_space_ldq_internal(as, addr, attrs, result,
3000 DEVICE_NATIVE_ENDIAN);
3001}
3002
3003uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3004 MemTxAttrs attrs, MemTxResult *result)
3005{
3006 return address_space_ldq_internal(as, addr, attrs, result,
3007 DEVICE_LITTLE_ENDIAN);
3008}
3009
3010uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3011 MemTxAttrs attrs, MemTxResult *result)
3012{
3013 return address_space_ldq_internal(as, addr, attrs, result,
3014 DEVICE_BIG_ENDIAN);
3015}
3016
2c17449b 3017uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3018{
50013115 3019 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3020}
3021
2c17449b 3022uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3023{
50013115 3024 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3025}
3026
2c17449b 3027uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3028{
50013115 3029 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3030}
3031
aab33094 3032/* XXX: optimize */
50013115
PM
3033uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3034 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
3035{
3036 uint8_t val;
50013115
PM
3037 MemTxResult r;
3038
3039 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3040 if (result) {
3041 *result = r;
3042 }
aab33094
FB
3043 return val;
3044}
3045
50013115
PM
3046uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3047{
3048 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3049}
3050
733f0b02 3051/* warning: addr must be aligned */
50013115
PM
3052static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3053 hwaddr addr,
3054 MemTxAttrs attrs,
3055 MemTxResult *result,
3056 enum device_endian endian)
aab33094 3057{
733f0b02
MT
3058 uint8_t *ptr;
3059 uint64_t val;
5c8a00ce 3060 MemoryRegion *mr;
149f54b5
PB
3061 hwaddr l = 2;
3062 hwaddr addr1;
50013115 3063 MemTxResult r;
4840f10e 3064 bool release_lock = false;
733f0b02 3065
41063e1e 3066 rcu_read_lock();
41701aa4 3067 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3068 false);
3069 if (l < 2 || !memory_access_is_direct(mr, false)) {
4840f10e 3070 release_lock |= prepare_mmio_access(mr);
125b3806 3071
733f0b02 3072 /* I/O case */
50013115 3073 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
1e78bcc1
AG
3074#if defined(TARGET_WORDS_BIGENDIAN)
3075 if (endian == DEVICE_LITTLE_ENDIAN) {
3076 val = bswap16(val);
3077 }
3078#else
3079 if (endian == DEVICE_BIG_ENDIAN) {
3080 val = bswap16(val);
3081 }
3082#endif
733f0b02
MT
3083 } else {
3084 /* RAM case */
5c8a00ce 3085 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 3086 & TARGET_PAGE_MASK)
149f54b5 3087 + addr1);
1e78bcc1
AG
3088 switch (endian) {
3089 case DEVICE_LITTLE_ENDIAN:
3090 val = lduw_le_p(ptr);
3091 break;
3092 case DEVICE_BIG_ENDIAN:
3093 val = lduw_be_p(ptr);
3094 break;
3095 default:
3096 val = lduw_p(ptr);
3097 break;
3098 }
50013115
PM
3099 r = MEMTX_OK;
3100 }
3101 if (result) {
3102 *result = r;
733f0b02 3103 }
4840f10e
JK
3104 if (release_lock) {
3105 qemu_mutex_unlock_iothread();
3106 }
41063e1e 3107 rcu_read_unlock();
733f0b02 3108 return val;
aab33094
FB
3109}
3110
50013115
PM
3111uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3112 MemTxAttrs attrs, MemTxResult *result)
3113{
3114 return address_space_lduw_internal(as, addr, attrs, result,
3115 DEVICE_NATIVE_ENDIAN);
3116}
3117
3118uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3119 MemTxAttrs attrs, MemTxResult *result)
3120{
3121 return address_space_lduw_internal(as, addr, attrs, result,
3122 DEVICE_LITTLE_ENDIAN);
3123}
3124
3125uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3126 MemTxAttrs attrs, MemTxResult *result)
3127{
3128 return address_space_lduw_internal(as, addr, attrs, result,
3129 DEVICE_BIG_ENDIAN);
3130}
3131
41701aa4 3132uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3133{
50013115 3134 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3135}
3136
41701aa4 3137uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3138{
50013115 3139 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3140}
3141
41701aa4 3142uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3143{
50013115 3144 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3145}
3146
8df1cd07
FB
3147/* warning: addr must be aligned. The ram page is not masked as dirty
3148 and the code inside is not invalidated. It is useful if the dirty
3149 bits are used to track modified PTEs */
50013115
PM
3150void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3151 MemTxAttrs attrs, MemTxResult *result)
8df1cd07 3152{
8df1cd07 3153 uint8_t *ptr;
5c8a00ce 3154 MemoryRegion *mr;
149f54b5
PB
3155 hwaddr l = 4;
3156 hwaddr addr1;
50013115 3157 MemTxResult r;
845b6214 3158 uint8_t dirty_log_mask;
4840f10e 3159 bool release_lock = false;
8df1cd07 3160
41063e1e 3161 rcu_read_lock();
2198a121 3162 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3163 true);
3164 if (l < 4 || !memory_access_is_direct(mr, true)) {
4840f10e 3165 release_lock |= prepare_mmio_access(mr);
125b3806 3166
50013115 3167 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3168 } else {
5c8a00ce 3169 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3170 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3171 stl_p(ptr, val);
74576198 3172
845b6214
PB
3173 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3174 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
58d2707e 3175 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
50013115
PM
3176 r = MEMTX_OK;
3177 }
3178 if (result) {
3179 *result = r;
8df1cd07 3180 }
4840f10e
JK
3181 if (release_lock) {
3182 qemu_mutex_unlock_iothread();
3183 }
41063e1e 3184 rcu_read_unlock();
8df1cd07
FB
3185}
3186
50013115
PM
3187void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3188{
3189 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3190}
3191
8df1cd07 3192/* warning: addr must be aligned */
50013115
PM
3193static inline void address_space_stl_internal(AddressSpace *as,
3194 hwaddr addr, uint32_t val,
3195 MemTxAttrs attrs,
3196 MemTxResult *result,
3197 enum device_endian endian)
8df1cd07 3198{
8df1cd07 3199 uint8_t *ptr;
5c8a00ce 3200 MemoryRegion *mr;
149f54b5
PB
3201 hwaddr l = 4;
3202 hwaddr addr1;
50013115 3203 MemTxResult r;
4840f10e 3204 bool release_lock = false;
8df1cd07 3205
41063e1e 3206 rcu_read_lock();
ab1da857 3207 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3208 true);
3209 if (l < 4 || !memory_access_is_direct(mr, true)) {
4840f10e 3210 release_lock |= prepare_mmio_access(mr);
125b3806 3211
1e78bcc1
AG
3212#if defined(TARGET_WORDS_BIGENDIAN)
3213 if (endian == DEVICE_LITTLE_ENDIAN) {
3214 val = bswap32(val);
3215 }
3216#else
3217 if (endian == DEVICE_BIG_ENDIAN) {
3218 val = bswap32(val);
3219 }
3220#endif
50013115 3221 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3222 } else {
8df1cd07 3223 /* RAM case */
5c8a00ce 3224 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3225 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3226 switch (endian) {
3227 case DEVICE_LITTLE_ENDIAN:
3228 stl_le_p(ptr, val);
3229 break;
3230 case DEVICE_BIG_ENDIAN:
3231 stl_be_p(ptr, val);
3232 break;
3233 default:
3234 stl_p(ptr, val);
3235 break;
3236 }
845b6214 3237 invalidate_and_set_dirty(mr, addr1, 4);
50013115
PM
3238 r = MEMTX_OK;
3239 }
3240 if (result) {
3241 *result = r;
8df1cd07 3242 }
4840f10e
JK
3243 if (release_lock) {
3244 qemu_mutex_unlock_iothread();
3245 }
41063e1e 3246 rcu_read_unlock();
8df1cd07
FB
3247}
3248
50013115
PM
3249void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3250 MemTxAttrs attrs, MemTxResult *result)
3251{
3252 address_space_stl_internal(as, addr, val, attrs, result,
3253 DEVICE_NATIVE_ENDIAN);
3254}
3255
3256void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3257 MemTxAttrs attrs, MemTxResult *result)
3258{
3259 address_space_stl_internal(as, addr, val, attrs, result,
3260 DEVICE_LITTLE_ENDIAN);
3261}
3262
3263void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3264 MemTxAttrs attrs, MemTxResult *result)
3265{
3266 address_space_stl_internal(as, addr, val, attrs, result,
3267 DEVICE_BIG_ENDIAN);
3268}
3269
ab1da857 3270void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3271{
50013115 3272 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3273}
3274
ab1da857 3275void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3276{
50013115 3277 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3278}
3279
ab1da857 3280void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3281{
50013115 3282 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3283}
3284
aab33094 3285/* XXX: optimize */
50013115
PM
3286void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3287 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
3288{
3289 uint8_t v = val;
50013115
PM
3290 MemTxResult r;
3291
3292 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3293 if (result) {
3294 *result = r;
3295 }
3296}
3297
3298void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3299{
3300 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
aab33094
FB
3301}
3302
733f0b02 3303/* warning: addr must be aligned */
50013115
PM
3304static inline void address_space_stw_internal(AddressSpace *as,
3305 hwaddr addr, uint32_t val,
3306 MemTxAttrs attrs,
3307 MemTxResult *result,
3308 enum device_endian endian)
aab33094 3309{
733f0b02 3310 uint8_t *ptr;
5c8a00ce 3311 MemoryRegion *mr;
149f54b5
PB
3312 hwaddr l = 2;
3313 hwaddr addr1;
50013115 3314 MemTxResult r;
4840f10e 3315 bool release_lock = false;
733f0b02 3316
41063e1e 3317 rcu_read_lock();
5ce5944d 3318 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 3319 if (l < 2 || !memory_access_is_direct(mr, true)) {
4840f10e 3320 release_lock |= prepare_mmio_access(mr);
125b3806 3321
1e78bcc1
AG
3322#if defined(TARGET_WORDS_BIGENDIAN)
3323 if (endian == DEVICE_LITTLE_ENDIAN) {
3324 val = bswap16(val);
3325 }
3326#else
3327 if (endian == DEVICE_BIG_ENDIAN) {
3328 val = bswap16(val);
3329 }
3330#endif
50013115 3331 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
733f0b02 3332 } else {
733f0b02 3333 /* RAM case */
5c8a00ce 3334 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 3335 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3336 switch (endian) {
3337 case DEVICE_LITTLE_ENDIAN:
3338 stw_le_p(ptr, val);
3339 break;
3340 case DEVICE_BIG_ENDIAN:
3341 stw_be_p(ptr, val);
3342 break;
3343 default:
3344 stw_p(ptr, val);
3345 break;
3346 }
845b6214 3347 invalidate_and_set_dirty(mr, addr1, 2);
50013115
PM
3348 r = MEMTX_OK;
3349 }
3350 if (result) {
3351 *result = r;
733f0b02 3352 }
4840f10e
JK
3353 if (release_lock) {
3354 qemu_mutex_unlock_iothread();
3355 }
41063e1e 3356 rcu_read_unlock();
aab33094
FB
3357}
3358
50013115
PM
3359void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3360 MemTxAttrs attrs, MemTxResult *result)
3361{
3362 address_space_stw_internal(as, addr, val, attrs, result,
3363 DEVICE_NATIVE_ENDIAN);
3364}
3365
3366void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3367 MemTxAttrs attrs, MemTxResult *result)
3368{
3369 address_space_stw_internal(as, addr, val, attrs, result,
3370 DEVICE_LITTLE_ENDIAN);
3371}
3372
3373void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3374 MemTxAttrs attrs, MemTxResult *result)
3375{
3376 address_space_stw_internal(as, addr, val, attrs, result,
3377 DEVICE_BIG_ENDIAN);
3378}
3379
5ce5944d 3380void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3381{
50013115 3382 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3383}
3384
5ce5944d 3385void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3386{
50013115 3387 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3388}
3389
5ce5944d 3390void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3391{
50013115 3392 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3393}
3394
aab33094 3395/* XXX: optimize */
50013115
PM
3396void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3397 MemTxAttrs attrs, MemTxResult *result)
aab33094 3398{
50013115 3399 MemTxResult r;
aab33094 3400 val = tswap64(val);
50013115
PM
3401 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3402 if (result) {
3403 *result = r;
3404 }
aab33094
FB
3405}
3406
50013115
PM
3407void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3408 MemTxAttrs attrs, MemTxResult *result)
1e78bcc1 3409{
50013115 3410 MemTxResult r;
1e78bcc1 3411 val = cpu_to_le64(val);
50013115
PM
3412 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3413 if (result) {
3414 *result = r;
3415 }
3416}
3417void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3418 MemTxAttrs attrs, MemTxResult *result)
3419{
3420 MemTxResult r;
3421 val = cpu_to_be64(val);
3422 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3423 if (result) {
3424 *result = r;
3425 }
3426}
3427
3428void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3429{
3430 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3431}
3432
3433void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3434{
3435 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3436}
3437
f606604f 3438void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1 3439{
50013115 3440 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3441}
3442
5e2972fd 3443/* virtual memory access for debug (includes writing to ROM) */
f17ec444 3444int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 3445 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3446{
3447 int l;
a8170e5e 3448 hwaddr phys_addr;
9b3c35e0 3449 target_ulong page;
13eb76e0
FB
3450
3451 while (len > 0) {
3452 page = addr & TARGET_PAGE_MASK;
f17ec444 3453 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
3454 /* if no physical page mapped, return an error */
3455 if (phys_addr == -1)
3456 return -1;
3457 l = (page + TARGET_PAGE_SIZE) - addr;
3458 if (l > len)
3459 l = len;
5e2972fd 3460 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
3461 if (is_write) {
3462 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3463 } else {
5c9eb028
PM
3464 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3465 buf, l, 0);
2e38847b 3466 }
13eb76e0
FB
3467 len -= l;
3468 buf += l;
3469 addr += l;
3470 }
3471 return 0;
3472}
a68fe89c 3473#endif
13eb76e0 3474
8e4a424b
BS
3475/*
3476 * A helper function for the _utterly broken_ virtio device model to find out if
3477 * it's running on a big endian machine. Don't do this at home kids!
3478 */
98ed8ecf
GK
3479bool target_words_bigendian(void);
3480bool target_words_bigendian(void)
8e4a424b
BS
3481{
3482#if defined(TARGET_WORDS_BIGENDIAN)
3483 return true;
3484#else
3485 return false;
3486#endif
3487}
3488
76f35538 3489#ifndef CONFIG_USER_ONLY
a8170e5e 3490bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 3491{
5c8a00ce 3492 MemoryRegion*mr;
149f54b5 3493 hwaddr l = 1;
41063e1e 3494 bool res;
76f35538 3495
41063e1e 3496 rcu_read_lock();
5c8a00ce
PB
3497 mr = address_space_translate(&address_space_memory,
3498 phys_addr, &phys_addr, &l, false);
76f35538 3499
41063e1e
PB
3500 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3501 rcu_read_unlock();
3502 return res;
76f35538 3503}
bd2fa51f 3504
e3807054 3505int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
bd2fa51f
MH
3506{
3507 RAMBlock *block;
e3807054 3508 int ret = 0;
bd2fa51f 3509
0dc3f44a
MD
3510 rcu_read_lock();
3511 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
e3807054
DDAG
3512 ret = func(block->idstr, block->host, block->offset,
3513 block->used_length, opaque);
3514 if (ret) {
3515 break;
3516 }
bd2fa51f 3517 }
0dc3f44a 3518 rcu_read_unlock();
e3807054 3519 return ret;
bd2fa51f 3520}
ec3f8c99 3521#endif
This page took 1.506784 seconds and 4 git commands to generate.