]> Git Repo - qemu.git/blame - exec.c
memory: move RAM_PREALLOC_MASK to exec.c, rename
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
cc9e98cb 29#include "hw/qdev.h"
1de7afc9 30#include "qemu/osdep.h"
9c17d615 31#include "sysemu/kvm.h"
2ff3de68 32#include "sysemu/sysemu.h"
0d09e41a 33#include "hw/xen/xen.h"
1de7afc9
PB
34#include "qemu/timer.h"
35#include "qemu/config-file.h"
75a34036 36#include "qemu/error-report.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
220c3ebd 52#include "exec/ram_addr.h"
582b55a9 53#include "qemu/cache-utils.h"
67d95c15 54
b35ba30f
MT
55#include "qemu/range.h"
56
db7b5426 57//#define DEBUG_SUBPAGE
1196be37 58
e2eef170 59#if !defined(CONFIG_USER_ONLY)
981fdf23 60static bool in_migration;
94a6b54f 61
a3161038 62RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
63
64static MemoryRegion *system_memory;
309cb471 65static MemoryRegion *system_io;
62152b8a 66
f6790af6
AK
67AddressSpace address_space_io;
68AddressSpace address_space_memory;
2673a5da 69
0844e007 70MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 71static MemoryRegion io_mem_unassigned;
0e0df1e2 72
7bd4f430
PB
73/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
74#define RAM_PREALLOC (1 << 0)
75
e2eef170 76#endif
9fa3e853 77
bdc44640 78struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
79/* current CPU in the current thread. It is only valid inside
80 cpu_exec() */
4917cf44 81DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 82/* 0 = Do not count executed instructions.
bf20dc07 83 1 = Precise instruction counting.
2e70f6ef 84 2 = Adaptive rate instruction counting. */
5708fc66 85int use_icount;
6a00d601 86
e2eef170 87#if !defined(CONFIG_USER_ONLY)
4346ae3e 88
1db8abb1
PB
89typedef struct PhysPageEntry PhysPageEntry;
90
91struct PhysPageEntry {
9736e55b 92 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 93 uint32_t skip : 6;
9736e55b 94 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 95 uint32_t ptr : 26;
1db8abb1
PB
96};
97
8b795765
MT
98#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
99
03f49957 100/* Size of the L2 (and L3, etc) page tables. */
57271d63 101#define ADDR_SPACE_BITS 64
03f49957 102
026736ce 103#define P_L2_BITS 9
03f49957
PB
104#define P_L2_SIZE (1 << P_L2_BITS)
105
106#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
107
108typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 109
53cb28cb
MA
110typedef struct PhysPageMap {
111 unsigned sections_nb;
112 unsigned sections_nb_alloc;
113 unsigned nodes_nb;
114 unsigned nodes_nb_alloc;
115 Node *nodes;
116 MemoryRegionSection *sections;
117} PhysPageMap;
118
1db8abb1
PB
119struct AddressSpaceDispatch {
120 /* This is a multi-level map on the physical address space.
121 * The bottom level has pointers to MemoryRegionSections.
122 */
123 PhysPageEntry phys_map;
53cb28cb 124 PhysPageMap map;
acc9d80b 125 AddressSpace *as;
1db8abb1
PB
126};
127
90260c6c
JK
128#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
129typedef struct subpage_t {
130 MemoryRegion iomem;
acc9d80b 131 AddressSpace *as;
90260c6c
JK
132 hwaddr base;
133 uint16_t sub_section[TARGET_PAGE_SIZE];
134} subpage_t;
135
b41aac4f
LPF
136#define PHYS_SECTION_UNASSIGNED 0
137#define PHYS_SECTION_NOTDIRTY 1
138#define PHYS_SECTION_ROM 2
139#define PHYS_SECTION_WATCH 3
5312bd8b 140
e2eef170 141static void io_mem_init(void);
62152b8a 142static void memory_map_init(void);
09daed84 143static void tcg_commit(MemoryListener *listener);
e2eef170 144
1ec9b909 145static MemoryRegion io_mem_watch;
6658ffb8 146#endif
fd6ce8f6 147
6d9a1304 148#if !defined(CONFIG_USER_ONLY)
d6f2ea22 149
53cb28cb 150static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 151{
53cb28cb
MA
152 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
153 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
154 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
155 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 156 }
f7bf5461
AK
157}
158
53cb28cb 159static uint32_t phys_map_node_alloc(PhysPageMap *map)
f7bf5461
AK
160{
161 unsigned i;
8b795765 162 uint32_t ret;
f7bf5461 163
53cb28cb 164 ret = map->nodes_nb++;
f7bf5461 165 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 166 assert(ret != map->nodes_nb_alloc);
03f49957 167 for (i = 0; i < P_L2_SIZE; ++i) {
53cb28cb
MA
168 map->nodes[ret][i].skip = 1;
169 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 170 }
f7bf5461 171 return ret;
d6f2ea22
AK
172}
173
53cb28cb
MA
174static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
175 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 176 int level)
f7bf5461
AK
177{
178 PhysPageEntry *p;
179 int i;
03f49957 180 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 181
9736e55b 182 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
53cb28cb
MA
183 lp->ptr = phys_map_node_alloc(map);
184 p = map->nodes[lp->ptr];
f7bf5461 185 if (level == 0) {
03f49957 186 for (i = 0; i < P_L2_SIZE; i++) {
9736e55b 187 p[i].skip = 0;
b41aac4f 188 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 189 }
67c4d23c 190 }
f7bf5461 191 } else {
53cb28cb 192 p = map->nodes[lp->ptr];
92e873b9 193 }
03f49957 194 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 195
03f49957 196 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 197 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 198 lp->skip = 0;
c19e8800 199 lp->ptr = leaf;
07f07b31
AK
200 *index += step;
201 *nb -= step;
2999097b 202 } else {
53cb28cb 203 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
204 }
205 ++lp;
f7bf5461
AK
206 }
207}
208
ac1970fb 209static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 210 hwaddr index, hwaddr nb,
2999097b 211 uint16_t leaf)
f7bf5461 212{
2999097b 213 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 214 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 215
53cb28cb 216 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
217}
218
b35ba30f
MT
219/* Compact a non leaf page entry. Simply detect that the entry has a single child,
220 * and update our entry so we can skip it and go directly to the destination.
221 */
222static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
223{
224 unsigned valid_ptr = P_L2_SIZE;
225 int valid = 0;
226 PhysPageEntry *p;
227 int i;
228
229 if (lp->ptr == PHYS_MAP_NODE_NIL) {
230 return;
231 }
232
233 p = nodes[lp->ptr];
234 for (i = 0; i < P_L2_SIZE; i++) {
235 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
236 continue;
237 }
238
239 valid_ptr = i;
240 valid++;
241 if (p[i].skip) {
242 phys_page_compact(&p[i], nodes, compacted);
243 }
244 }
245
246 /* We can only compress if there's only one child. */
247 if (valid != 1) {
248 return;
249 }
250
251 assert(valid_ptr < P_L2_SIZE);
252
253 /* Don't compress if it won't fit in the # of bits we have. */
254 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
255 return;
256 }
257
258 lp->ptr = p[valid_ptr].ptr;
259 if (!p[valid_ptr].skip) {
260 /* If our only child is a leaf, make this a leaf. */
261 /* By design, we should have made this node a leaf to begin with so we
262 * should never reach here.
263 * But since it's so simple to handle this, let's do it just in case we
264 * change this rule.
265 */
266 lp->skip = 0;
267 } else {
268 lp->skip += p[valid_ptr].skip;
269 }
270}
271
272static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
273{
274 DECLARE_BITMAP(compacted, nodes_nb);
275
276 if (d->phys_map.skip) {
53cb28cb 277 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
278 }
279}
280
97115a8d 281static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 282 Node *nodes, MemoryRegionSection *sections)
92e873b9 283{
31ab2b4a 284 PhysPageEntry *p;
97115a8d 285 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 286 int i;
f1f6e3b8 287
9736e55b 288 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 289 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 290 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 291 }
9affd6fc 292 p = nodes[lp.ptr];
03f49957 293 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 294 }
b35ba30f
MT
295
296 if (sections[lp.ptr].size.hi ||
297 range_covers_byte(sections[lp.ptr].offset_within_address_space,
298 sections[lp.ptr].size.lo, addr)) {
299 return &sections[lp.ptr];
300 } else {
301 return &sections[PHYS_SECTION_UNASSIGNED];
302 }
f3705d53
AK
303}
304
e5548617
BS
305bool memory_region_is_unassigned(MemoryRegion *mr)
306{
2a8e7499 307 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 308 && mr != &io_mem_watch;
fd6ce8f6 309}
149f54b5 310
c7086b4a 311static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
312 hwaddr addr,
313 bool resolve_subpage)
9f029603 314{
90260c6c
JK
315 MemoryRegionSection *section;
316 subpage_t *subpage;
317
53cb28cb 318 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
319 if (resolve_subpage && section->mr->subpage) {
320 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 321 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
322 }
323 return section;
9f029603
JK
324}
325
90260c6c 326static MemoryRegionSection *
c7086b4a 327address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 328 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
329{
330 MemoryRegionSection *section;
a87f3954 331 Int128 diff;
149f54b5 332
c7086b4a 333 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
334 /* Compute offset within MemoryRegionSection */
335 addr -= section->offset_within_address_space;
336
337 /* Compute offset within MemoryRegion */
338 *xlat = addr + section->offset_within_region;
339
340 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 341 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
342 return section;
343}
90260c6c 344
a87f3954
PB
345static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
346{
347 if (memory_region_is_ram(mr)) {
348 return !(is_write && mr->readonly);
349 }
350 if (memory_region_is_romd(mr)) {
351 return !is_write;
352 }
353
354 return false;
355}
356
5c8a00ce
PB
357MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
358 hwaddr *xlat, hwaddr *plen,
359 bool is_write)
90260c6c 360{
30951157
AK
361 IOMMUTLBEntry iotlb;
362 MemoryRegionSection *section;
363 MemoryRegion *mr;
364 hwaddr len = *plen;
365
366 for (;;) {
a87f3954 367 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
368 mr = section->mr;
369
370 if (!mr->iommu_ops) {
371 break;
372 }
373
374 iotlb = mr->iommu_ops->translate(mr, addr);
375 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
376 | (addr & iotlb.addr_mask));
377 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
378 if (!(iotlb.perm & (1 << is_write))) {
379 mr = &io_mem_unassigned;
380 break;
381 }
382
383 as = iotlb.target_as;
384 }
385
fe680d0d 386 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954
PB
387 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
388 len = MIN(page, len);
389 }
390
30951157
AK
391 *plen = len;
392 *xlat = addr;
393 return mr;
90260c6c
JK
394}
395
396MemoryRegionSection *
397address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
398 hwaddr *plen)
399{
30951157 400 MemoryRegionSection *section;
c7086b4a 401 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
402
403 assert(!section->mr->iommu_ops);
404 return section;
90260c6c 405}
5b6dd868 406#endif
fd6ce8f6 407
5b6dd868 408void cpu_exec_init_all(void)
fdbb84d1 409{
5b6dd868 410#if !defined(CONFIG_USER_ONLY)
b2a8658e 411 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
412 memory_map_init();
413 io_mem_init();
fdbb84d1 414#endif
5b6dd868 415}
fdbb84d1 416
b170fce3 417#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
418
419static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 420{
259186a7 421 CPUState *cpu = opaque;
a513fe19 422
5b6dd868
BS
423 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
424 version_id is increased. */
259186a7 425 cpu->interrupt_request &= ~0x01;
c01a71c1 426 tlb_flush(cpu, 1);
5b6dd868
BS
427
428 return 0;
a513fe19 429}
7501267e 430
1a1562f5 431const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
432 .name = "cpu_common",
433 .version_id = 1,
434 .minimum_version_id = 1,
5b6dd868 435 .post_load = cpu_common_post_load,
35d08458 436 .fields = (VMStateField[]) {
259186a7
AF
437 VMSTATE_UINT32(halted, CPUState),
438 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
439 VMSTATE_END_OF_LIST()
440 }
441};
1a1562f5 442
5b6dd868 443#endif
ea041c0e 444
38d8f5c8 445CPUState *qemu_get_cpu(int index)
ea041c0e 446{
bdc44640 447 CPUState *cpu;
ea041c0e 448
bdc44640 449 CPU_FOREACH(cpu) {
55e5c285 450 if (cpu->cpu_index == index) {
bdc44640 451 return cpu;
55e5c285 452 }
ea041c0e 453 }
5b6dd868 454
bdc44640 455 return NULL;
ea041c0e
FB
456}
457
09daed84
EI
458#if !defined(CONFIG_USER_ONLY)
459void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
460{
461 /* We only support one address space per cpu at the moment. */
462 assert(cpu->as == as);
463
464 if (cpu->tcg_as_listener) {
465 memory_listener_unregister(cpu->tcg_as_listener);
466 } else {
467 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
468 }
469 cpu->tcg_as_listener->commit = tcg_commit;
470 memory_listener_register(cpu->tcg_as_listener, as);
471}
472#endif
473
5b6dd868 474void cpu_exec_init(CPUArchState *env)
ea041c0e 475{
5b6dd868 476 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 477 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 478 CPUState *some_cpu;
5b6dd868
BS
479 int cpu_index;
480
481#if defined(CONFIG_USER_ONLY)
482 cpu_list_lock();
483#endif
5b6dd868 484 cpu_index = 0;
bdc44640 485 CPU_FOREACH(some_cpu) {
5b6dd868
BS
486 cpu_index++;
487 }
55e5c285 488 cpu->cpu_index = cpu_index;
1b1ed8dc 489 cpu->numa_node = 0;
f0c3c505 490 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 491 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 492#ifndef CONFIG_USER_ONLY
09daed84 493 cpu->as = &address_space_memory;
5b6dd868
BS
494 cpu->thread_id = qemu_get_thread_id();
495#endif
bdc44640 496 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
497#if defined(CONFIG_USER_ONLY)
498 cpu_list_unlock();
499#endif
e0d47944
AF
500 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
501 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
502 }
5b6dd868 503#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
504 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
505 cpu_save, cpu_load, env);
b170fce3 506 assert(cc->vmsd == NULL);
e0d47944 507 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 508#endif
b170fce3
AF
509 if (cc->vmsd != NULL) {
510 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
511 }
ea041c0e
FB
512}
513
1fddef4b 514#if defined(TARGET_HAS_ICE)
94df27fd 515#if defined(CONFIG_USER_ONLY)
00b941e5 516static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
517{
518 tb_invalidate_phys_page_range(pc, pc + 1, 0);
519}
520#else
00b941e5 521static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 522{
e8262a1b
MF
523 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
524 if (phys != -1) {
09daed84 525 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 526 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 527 }
1e7855a5 528}
c27004ec 529#endif
94df27fd 530#endif /* TARGET_HAS_ICE */
d720b93d 531
c527ee8f 532#if defined(CONFIG_USER_ONLY)
75a34036 533void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
534
535{
536}
537
75a34036 538int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
539 int flags, CPUWatchpoint **watchpoint)
540{
541 return -ENOSYS;
542}
543#else
6658ffb8 544/* Add a watchpoint. */
75a34036 545int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 546 int flags, CPUWatchpoint **watchpoint)
6658ffb8 547{
75a34036 548 vaddr len_mask = ~(len - 1);
c0ce998e 549 CPUWatchpoint *wp;
6658ffb8 550
b4051334 551 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
552 if ((len & (len - 1)) || (addr & ~len_mask) ||
553 len == 0 || len > TARGET_PAGE_SIZE) {
75a34036
AF
554 error_report("tried to set invalid watchpoint at %"
555 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
556 return -EINVAL;
557 }
7267c094 558 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
559
560 wp->vaddr = addr;
b4051334 561 wp->len_mask = len_mask;
a1d1bb31
AL
562 wp->flags = flags;
563
2dc9f411 564 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
565 if (flags & BP_GDB) {
566 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
567 } else {
568 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
569 }
6658ffb8 570
31b030d4 571 tlb_flush_page(cpu, addr);
a1d1bb31
AL
572
573 if (watchpoint)
574 *watchpoint = wp;
575 return 0;
6658ffb8
PB
576}
577
a1d1bb31 578/* Remove a specific watchpoint. */
75a34036 579int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 580 int flags)
6658ffb8 581{
75a34036 582 vaddr len_mask = ~(len - 1);
a1d1bb31 583 CPUWatchpoint *wp;
6658ffb8 584
ff4700b0 585 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
b4051334 586 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 587 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 588 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
589 return 0;
590 }
591 }
a1d1bb31 592 return -ENOENT;
6658ffb8
PB
593}
594
a1d1bb31 595/* Remove a specific watchpoint by reference. */
75a34036 596void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 597{
ff4700b0 598 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 599
31b030d4 600 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 601
7267c094 602 g_free(watchpoint);
a1d1bb31
AL
603}
604
605/* Remove all matching watchpoints. */
75a34036 606void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 607{
c0ce998e 608 CPUWatchpoint *wp, *next;
a1d1bb31 609
ff4700b0 610 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
611 if (wp->flags & mask) {
612 cpu_watchpoint_remove_by_ref(cpu, wp);
613 }
c0ce998e 614 }
7d03f82f 615}
c527ee8f 616#endif
7d03f82f 617
a1d1bb31 618/* Add a breakpoint. */
b3310ab3 619int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 620 CPUBreakpoint **breakpoint)
4c3a88a2 621{
1fddef4b 622#if defined(TARGET_HAS_ICE)
c0ce998e 623 CPUBreakpoint *bp;
3b46e624 624
7267c094 625 bp = g_malloc(sizeof(*bp));
4c3a88a2 626
a1d1bb31
AL
627 bp->pc = pc;
628 bp->flags = flags;
629
2dc9f411 630 /* keep all GDB-injected breakpoints in front */
00b941e5 631 if (flags & BP_GDB) {
f0c3c505 632 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 633 } else {
f0c3c505 634 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 635 }
3b46e624 636
f0c3c505 637 breakpoint_invalidate(cpu, pc);
a1d1bb31 638
00b941e5 639 if (breakpoint) {
a1d1bb31 640 *breakpoint = bp;
00b941e5 641 }
4c3a88a2
FB
642 return 0;
643#else
a1d1bb31 644 return -ENOSYS;
4c3a88a2
FB
645#endif
646}
647
a1d1bb31 648/* Remove a specific breakpoint. */
b3310ab3 649int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 650{
7d03f82f 651#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
652 CPUBreakpoint *bp;
653
f0c3c505 654 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 655 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 656 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
657 return 0;
658 }
7d03f82f 659 }
a1d1bb31
AL
660 return -ENOENT;
661#else
662 return -ENOSYS;
7d03f82f
EI
663#endif
664}
665
a1d1bb31 666/* Remove a specific breakpoint by reference. */
b3310ab3 667void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 668{
1fddef4b 669#if defined(TARGET_HAS_ICE)
f0c3c505
AF
670 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
671
672 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 673
7267c094 674 g_free(breakpoint);
a1d1bb31
AL
675#endif
676}
677
678/* Remove all matching breakpoints. */
b3310ab3 679void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31
AL
680{
681#if defined(TARGET_HAS_ICE)
c0ce998e 682 CPUBreakpoint *bp, *next;
a1d1bb31 683
f0c3c505 684 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
685 if (bp->flags & mask) {
686 cpu_breakpoint_remove_by_ref(cpu, bp);
687 }
c0ce998e 688 }
4c3a88a2
FB
689#endif
690}
691
c33a346e
FB
692/* enable or disable single step mode. EXCP_DEBUG is returned by the
693 CPU loop after each instruction */
3825b28f 694void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 695{
1fddef4b 696#if defined(TARGET_HAS_ICE)
ed2803da
AF
697 if (cpu->singlestep_enabled != enabled) {
698 cpu->singlestep_enabled = enabled;
699 if (kvm_enabled()) {
38e478ec 700 kvm_update_guest_debug(cpu, 0);
ed2803da 701 } else {
ccbb4d44 702 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 703 /* XXX: only flush what is necessary */
38e478ec 704 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
705 tb_flush(env);
706 }
c33a346e
FB
707 }
708#endif
709}
710
a47dddd7 711void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
712{
713 va_list ap;
493ae1f0 714 va_list ap2;
7501267e
FB
715
716 va_start(ap, fmt);
493ae1f0 717 va_copy(ap2, ap);
7501267e
FB
718 fprintf(stderr, "qemu: fatal: ");
719 vfprintf(stderr, fmt, ap);
720 fprintf(stderr, "\n");
878096ee 721 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
722 if (qemu_log_enabled()) {
723 qemu_log("qemu: fatal: ");
724 qemu_log_vprintf(fmt, ap2);
725 qemu_log("\n");
a0762859 726 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 727 qemu_log_flush();
93fcfe39 728 qemu_log_close();
924edcae 729 }
493ae1f0 730 va_end(ap2);
f9373291 731 va_end(ap);
fd052bf6
RV
732#if defined(CONFIG_USER_ONLY)
733 {
734 struct sigaction act;
735 sigfillset(&act.sa_mask);
736 act.sa_handler = SIG_DFL;
737 sigaction(SIGABRT, &act, NULL);
738 }
739#endif
7501267e
FB
740 abort();
741}
742
0124311e 743#if !defined(CONFIG_USER_ONLY)
041603fe
PB
744static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
745{
746 RAMBlock *block;
747
748 /* The list is protected by the iothread lock here. */
749 block = ram_list.mru_block;
750 if (block && addr - block->offset < block->length) {
751 goto found;
752 }
753 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
754 if (addr - block->offset < block->length) {
755 goto found;
756 }
757 }
758
759 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
760 abort();
761
762found:
763 ram_list.mru_block = block;
764 return block;
765}
766
a2f4d5be 767static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 768{
041603fe 769 ram_addr_t start1;
a2f4d5be
JQ
770 RAMBlock *block;
771 ram_addr_t end;
772
773 end = TARGET_PAGE_ALIGN(start + length);
774 start &= TARGET_PAGE_MASK;
d24981d3 775
041603fe
PB
776 block = qemu_get_ram_block(start);
777 assert(block == qemu_get_ram_block(end - 1));
778 start1 = (uintptr_t)block->host + (start - block->offset);
779 cpu_tlb_reset_dirty_all(start1, length);
d24981d3
JQ
780}
781
5579c7f3 782/* Note: start and end must be within the same ram block. */
a2f4d5be 783void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 784 unsigned client)
1ccde1cb 785{
1ccde1cb
FB
786 if (length == 0)
787 return;
ace694cc 788 cpu_physical_memory_clear_dirty_range(start, length, client);
f23db169 789
d24981d3 790 if (tcg_enabled()) {
a2f4d5be 791 tlb_reset_dirty_range_all(start, length);
5579c7f3 792 }
1ccde1cb
FB
793}
794
981fdf23 795static void cpu_physical_memory_set_dirty_tracking(bool enable)
74576198
AL
796{
797 in_migration = enable;
74576198
AL
798}
799
bb0e627a 800hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
801 MemoryRegionSection *section,
802 target_ulong vaddr,
803 hwaddr paddr, hwaddr xlat,
804 int prot,
805 target_ulong *address)
e5548617 806{
a8170e5e 807 hwaddr iotlb;
e5548617
BS
808 CPUWatchpoint *wp;
809
cc5bea60 810 if (memory_region_is_ram(section->mr)) {
e5548617
BS
811 /* Normal RAM. */
812 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 813 + xlat;
e5548617 814 if (!section->readonly) {
b41aac4f 815 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 816 } else {
b41aac4f 817 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
818 }
819 } else {
1b3fb98f 820 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 821 iotlb += xlat;
e5548617
BS
822 }
823
824 /* Make accesses to pages with watchpoints go via the
825 watchpoint trap routines. */
ff4700b0 826 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
e5548617
BS
827 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
828 /* Avoid trapping reads of pages with a write breakpoint. */
829 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 830 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
831 *address |= TLB_MMIO;
832 break;
833 }
834 }
835 }
836
837 return iotlb;
838}
9fa3e853
FB
839#endif /* defined(CONFIG_USER_ONLY) */
840
e2eef170 841#if !defined(CONFIG_USER_ONLY)
8da3ff18 842
c227f099 843static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 844 uint16_t section);
acc9d80b 845static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 846
575ddeb4 847static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
91138037
MA
848
849/*
850 * Set a custom physical guest memory alloator.
851 * Accelerators with unusual needs may need this. Hopefully, we can
852 * get rid of it eventually.
853 */
575ddeb4 854void phys_mem_set_alloc(void *(*alloc)(size_t))
91138037
MA
855{
856 phys_mem_alloc = alloc;
857}
858
53cb28cb
MA
859static uint16_t phys_section_add(PhysPageMap *map,
860 MemoryRegionSection *section)
5312bd8b 861{
68f3f65b
PB
862 /* The physical section number is ORed with a page-aligned
863 * pointer to produce the iotlb entries. Thus it should
864 * never overflow into the page-aligned value.
865 */
53cb28cb 866 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 867
53cb28cb
MA
868 if (map->sections_nb == map->sections_nb_alloc) {
869 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
870 map->sections = g_renew(MemoryRegionSection, map->sections,
871 map->sections_nb_alloc);
5312bd8b 872 }
53cb28cb 873 map->sections[map->sections_nb] = *section;
dfde4e6e 874 memory_region_ref(section->mr);
53cb28cb 875 return map->sections_nb++;
5312bd8b
AK
876}
877
058bc4b5
PB
878static void phys_section_destroy(MemoryRegion *mr)
879{
dfde4e6e
PB
880 memory_region_unref(mr);
881
058bc4b5
PB
882 if (mr->subpage) {
883 subpage_t *subpage = container_of(mr, subpage_t, iomem);
884 memory_region_destroy(&subpage->iomem);
885 g_free(subpage);
886 }
887}
888
6092666e 889static void phys_sections_free(PhysPageMap *map)
5312bd8b 890{
9affd6fc
PB
891 while (map->sections_nb > 0) {
892 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
893 phys_section_destroy(section->mr);
894 }
9affd6fc
PB
895 g_free(map->sections);
896 g_free(map->nodes);
5312bd8b
AK
897}
898
ac1970fb 899static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
900{
901 subpage_t *subpage;
a8170e5e 902 hwaddr base = section->offset_within_address_space
0f0cb164 903 & TARGET_PAGE_MASK;
97115a8d 904 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 905 d->map.nodes, d->map.sections);
0f0cb164
AK
906 MemoryRegionSection subsection = {
907 .offset_within_address_space = base,
052e87b0 908 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 909 };
a8170e5e 910 hwaddr start, end;
0f0cb164 911
f3705d53 912 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 913
f3705d53 914 if (!(existing->mr->subpage)) {
acc9d80b 915 subpage = subpage_init(d->as, base);
3be91e86 916 subsection.address_space = d->as;
0f0cb164 917 subsection.mr = &subpage->iomem;
ac1970fb 918 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 919 phys_section_add(&d->map, &subsection));
0f0cb164 920 } else {
f3705d53 921 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
922 }
923 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 924 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
925 subpage_register(subpage, start, end,
926 phys_section_add(&d->map, section));
0f0cb164
AK
927}
928
929
052e87b0
PB
930static void register_multipage(AddressSpaceDispatch *d,
931 MemoryRegionSection *section)
33417e70 932{
a8170e5e 933 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 934 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
935 uint64_t num_pages = int128_get64(int128_rshift(section->size,
936 TARGET_PAGE_BITS));
dd81124b 937
733d5ef5
PB
938 assert(num_pages);
939 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
940}
941
ac1970fb 942static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 943{
89ae337a 944 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 945 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 946 MemoryRegionSection now = *section, remain = *section;
052e87b0 947 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 948
733d5ef5
PB
949 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
950 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
951 - now.offset_within_address_space;
952
052e87b0 953 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 954 register_subpage(d, &now);
733d5ef5 955 } else {
052e87b0 956 now.size = int128_zero();
733d5ef5 957 }
052e87b0
PB
958 while (int128_ne(remain.size, now.size)) {
959 remain.size = int128_sub(remain.size, now.size);
960 remain.offset_within_address_space += int128_get64(now.size);
961 remain.offset_within_region += int128_get64(now.size);
69b67646 962 now = remain;
052e87b0 963 if (int128_lt(remain.size, page_size)) {
733d5ef5 964 register_subpage(d, &now);
88266249 965 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 966 now.size = page_size;
ac1970fb 967 register_subpage(d, &now);
69b67646 968 } else {
052e87b0 969 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 970 register_multipage(d, &now);
69b67646 971 }
0f0cb164
AK
972 }
973}
974
62a2744c
SY
975void qemu_flush_coalesced_mmio_buffer(void)
976{
977 if (kvm_enabled())
978 kvm_flush_coalesced_mmio_buffer();
979}
980
b2a8658e
UD
981void qemu_mutex_lock_ramlist(void)
982{
983 qemu_mutex_lock(&ram_list.mutex);
984}
985
986void qemu_mutex_unlock_ramlist(void)
987{
988 qemu_mutex_unlock(&ram_list.mutex);
989}
990
e1e84ba0 991#ifdef __linux__
c902760f
MT
992
993#include <sys/vfs.h>
994
995#define HUGETLBFS_MAGIC 0x958458f6
996
997static long gethugepagesize(const char *path)
998{
999 struct statfs fs;
1000 int ret;
1001
1002 do {
9742bf26 1003 ret = statfs(path, &fs);
c902760f
MT
1004 } while (ret != 0 && errno == EINTR);
1005
1006 if (ret != 0) {
9742bf26
YT
1007 perror(path);
1008 return 0;
c902760f
MT
1009 }
1010
1011 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1012 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1013
1014 return fs.f_bsize;
1015}
1016
04b16653
AW
1017static void *file_ram_alloc(RAMBlock *block,
1018 ram_addr_t memory,
1019 const char *path)
c902760f
MT
1020{
1021 char *filename;
8ca761f6
PF
1022 char *sanitized_name;
1023 char *c;
c902760f
MT
1024 void *area;
1025 int fd;
c902760f
MT
1026 unsigned long hpagesize;
1027
1028 hpagesize = gethugepagesize(path);
1029 if (!hpagesize) {
f9a49dfa 1030 goto error;
c902760f
MT
1031 }
1032
1033 if (memory < hpagesize) {
1034 return NULL;
1035 }
1036
1037 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1038 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
f9a49dfa 1039 goto error;
c902760f
MT
1040 }
1041
8ca761f6
PF
1042 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1043 sanitized_name = g_strdup(block->mr->name);
1044 for (c = sanitized_name; *c != '\0'; c++) {
1045 if (*c == '/')
1046 *c = '_';
1047 }
1048
1049 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1050 sanitized_name);
1051 g_free(sanitized_name);
c902760f
MT
1052
1053 fd = mkstemp(filename);
1054 if (fd < 0) {
9742bf26 1055 perror("unable to create backing store for hugepages");
e4ada482 1056 g_free(filename);
f9a49dfa 1057 goto error;
c902760f
MT
1058 }
1059 unlink(filename);
e4ada482 1060 g_free(filename);
c902760f
MT
1061
1062 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1063
1064 /*
1065 * ftruncate is not supported by hugetlbfs in older
1066 * hosts, so don't bother bailing out on errors.
1067 * If anything goes wrong with it under other filesystems,
1068 * mmap will fail.
1069 */
1070 if (ftruncate(fd, memory))
9742bf26 1071 perror("ftruncate");
c902760f 1072
c902760f 1073 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
c902760f 1074 if (area == MAP_FAILED) {
9742bf26
YT
1075 perror("file_ram_alloc: can't mmap RAM pages");
1076 close(fd);
f9a49dfa 1077 goto error;
c902760f 1078 }
ef36fa14
MT
1079
1080 if (mem_prealloc) {
38183310 1081 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1082 }
1083
04b16653 1084 block->fd = fd;
c902760f 1085 return area;
f9a49dfa
MT
1086
1087error:
1088 if (mem_prealloc) {
1089 exit(1);
1090 }
1091 return NULL;
c902760f 1092}
e1e84ba0
MA
1093#else
1094static void *file_ram_alloc(RAMBlock *block,
1095 ram_addr_t memory,
1096 const char *path)
1097{
1098 fprintf(stderr, "-mem-path not supported on this host\n");
1099 exit(1);
1100}
c902760f
MT
1101#endif
1102
d17b5288 1103static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1104{
1105 RAMBlock *block, *next_block;
3e837b2c 1106 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1107
49cd9ac6
SH
1108 assert(size != 0); /* it would hand out same offset multiple times */
1109
a3161038 1110 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1111 return 0;
1112
a3161038 1113 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1114 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1115
1116 end = block->offset + block->length;
1117
a3161038 1118 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1119 if (next_block->offset >= end) {
1120 next = MIN(next, next_block->offset);
1121 }
1122 }
1123 if (next - end >= size && next - end < mingap) {
3e837b2c 1124 offset = end;
04b16653
AW
1125 mingap = next - end;
1126 }
1127 }
3e837b2c
AW
1128
1129 if (offset == RAM_ADDR_MAX) {
1130 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1131 (uint64_t)size);
1132 abort();
1133 }
1134
04b16653
AW
1135 return offset;
1136}
1137
652d7ec2 1138ram_addr_t last_ram_offset(void)
d17b5288
AW
1139{
1140 RAMBlock *block;
1141 ram_addr_t last = 0;
1142
a3161038 1143 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1144 last = MAX(last, block->offset + block->length);
1145
1146 return last;
1147}
1148
ddb97f1d
JB
1149static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1150{
1151 int ret;
ddb97f1d
JB
1152
1153 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1154 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1155 "dump-guest-core", true)) {
ddb97f1d
JB
1156 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1157 if (ret) {
1158 perror("qemu_madvise");
1159 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1160 "but dump_guest_core=off specified\n");
1161 }
1162 }
1163}
1164
20cfe881 1165static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1166{
20cfe881 1167 RAMBlock *block;
84b89d78 1168
a3161038 1169 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1170 if (block->offset == addr) {
20cfe881 1171 return block;
c5705a77
AK
1172 }
1173 }
20cfe881
HT
1174
1175 return NULL;
1176}
1177
1178void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1179{
1180 RAMBlock *new_block = find_ram_block(addr);
1181 RAMBlock *block;
1182
c5705a77
AK
1183 assert(new_block);
1184 assert(!new_block->idstr[0]);
84b89d78 1185
09e5ab63
AL
1186 if (dev) {
1187 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1188 if (id) {
1189 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1190 g_free(id);
84b89d78
CM
1191 }
1192 }
1193 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1194
b2a8658e
UD
1195 /* This assumes the iothread lock is taken here too. */
1196 qemu_mutex_lock_ramlist();
a3161038 1197 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1198 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1199 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1200 new_block->idstr);
1201 abort();
1202 }
1203 }
b2a8658e 1204 qemu_mutex_unlock_ramlist();
c5705a77
AK
1205}
1206
20cfe881
HT
1207void qemu_ram_unset_idstr(ram_addr_t addr)
1208{
1209 RAMBlock *block = find_ram_block(addr);
1210
1211 if (block) {
1212 memset(block->idstr, 0, sizeof(block->idstr));
1213 }
1214}
1215
8490fc78
LC
1216static int memory_try_enable_merging(void *addr, size_t len)
1217{
2ff3de68 1218 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1219 /* disabled by the user */
1220 return 0;
1221 }
1222
1223 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1224}
1225
e1c57ab8 1226static ram_addr_t ram_block_add(RAMBlock *new_block)
c5705a77 1227{
e1c57ab8 1228 RAMBlock *block;
2152f5ca
JQ
1229 ram_addr_t old_ram_size, new_ram_size;
1230
1231 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1232
b2a8658e
UD
1233 /* This assumes the iothread lock is taken here too. */
1234 qemu_mutex_lock_ramlist();
e1c57ab8
PB
1235 new_block->offset = find_ram_offset(new_block->length);
1236
1237 if (!new_block->host) {
1238 if (xen_enabled()) {
1239 xen_ram_alloc(new_block->offset, new_block->length, new_block->mr);
1240 } else {
1241 new_block->host = phys_mem_alloc(new_block->length);
39228250
MA
1242 if (!new_block->host) {
1243 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1244 new_block->mr->name, strerror(errno));
1245 exit(1);
1246 }
e1c57ab8 1247 memory_try_enable_merging(new_block->host, new_block->length);
6977dfe6 1248 }
c902760f 1249 }
94a6b54f 1250
abb26d63
PB
1251 /* Keep the list sorted from biggest to smallest block. */
1252 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1253 if (block->length < new_block->length) {
1254 break;
1255 }
1256 }
1257 if (block) {
1258 QTAILQ_INSERT_BEFORE(block, new_block, next);
1259 } else {
1260 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1261 }
0d6d3c87 1262 ram_list.mru_block = NULL;
94a6b54f 1263
f798b07f 1264 ram_list.version++;
b2a8658e 1265 qemu_mutex_unlock_ramlist();
f798b07f 1266
2152f5ca
JQ
1267 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1268
1269 if (new_ram_size > old_ram_size) {
1ab4c8ce
JQ
1270 int i;
1271 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1272 ram_list.dirty_memory[i] =
1273 bitmap_zero_extend(ram_list.dirty_memory[i],
1274 old_ram_size, new_ram_size);
1275 }
2152f5ca 1276 }
e1c57ab8 1277 cpu_physical_memory_set_dirty_range(new_block->offset, new_block->length);
94a6b54f 1278
e1c57ab8
PB
1279 qemu_ram_setup_dump(new_block->host, new_block->length);
1280 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE);
1281 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK);
ddb97f1d 1282
e1c57ab8
PB
1283 if (kvm_enabled()) {
1284 kvm_setup_guest_memory(new_block->host, new_block->length);
1285 }
6f0437e8 1286
94a6b54f
PB
1287 return new_block->offset;
1288}
e9a1ab19 1289
e1c57ab8
PB
1290ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1291 const char *mem_path)
1292{
1293 RAMBlock *new_block;
1294
1295 if (xen_enabled()) {
1296 fprintf(stderr, "-mem-path not supported with Xen\n");
1297 exit(1);
1298 }
1299
1300 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1301 /*
1302 * file_ram_alloc() needs to allocate just like
1303 * phys_mem_alloc, but we haven't bothered to provide
1304 * a hook there.
1305 */
1306 fprintf(stderr,
1307 "-mem-path not supported with this accelerator\n");
1308 exit(1);
1309 }
1310
1311 size = TARGET_PAGE_ALIGN(size);
1312 new_block = g_malloc0(sizeof(*new_block));
1313 new_block->mr = mr;
1314 new_block->length = size;
1315 new_block->host = file_ram_alloc(new_block, size, mem_path);
1316 return ram_block_add(new_block);
1317}
1318
1319ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1320 MemoryRegion *mr)
1321{
1322 RAMBlock *new_block;
1323
1324 size = TARGET_PAGE_ALIGN(size);
1325 new_block = g_malloc0(sizeof(*new_block));
1326 new_block->mr = mr;
1327 new_block->length = size;
1328 new_block->fd = -1;
1329 new_block->host = host;
1330 if (host) {
7bd4f430 1331 new_block->flags |= RAM_PREALLOC;
e1c57ab8
PB
1332 }
1333 return ram_block_add(new_block);
1334}
1335
c5705a77 1336ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1337{
c5705a77 1338 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1339}
1340
1f2e98b6
AW
1341void qemu_ram_free_from_ptr(ram_addr_t addr)
1342{
1343 RAMBlock *block;
1344
b2a8658e
UD
1345 /* This assumes the iothread lock is taken here too. */
1346 qemu_mutex_lock_ramlist();
a3161038 1347 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1348 if (addr == block->offset) {
a3161038 1349 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1350 ram_list.mru_block = NULL;
f798b07f 1351 ram_list.version++;
7267c094 1352 g_free(block);
b2a8658e 1353 break;
1f2e98b6
AW
1354 }
1355 }
b2a8658e 1356 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1357}
1358
c227f099 1359void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1360{
04b16653
AW
1361 RAMBlock *block;
1362
b2a8658e
UD
1363 /* This assumes the iothread lock is taken here too. */
1364 qemu_mutex_lock_ramlist();
a3161038 1365 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1366 if (addr == block->offset) {
a3161038 1367 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1368 ram_list.mru_block = NULL;
f798b07f 1369 ram_list.version++;
7bd4f430 1370 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1371 ;
dfeaf2ab
MA
1372 } else if (xen_enabled()) {
1373 xen_invalidate_map_cache_entry(block->host);
089f3f76 1374#ifndef _WIN32
3435f395
MA
1375 } else if (block->fd >= 0) {
1376 munmap(block->host, block->length);
1377 close(block->fd);
089f3f76 1378#endif
04b16653 1379 } else {
dfeaf2ab 1380 qemu_anon_ram_free(block->host, block->length);
04b16653 1381 }
7267c094 1382 g_free(block);
b2a8658e 1383 break;
04b16653
AW
1384 }
1385 }
b2a8658e 1386 qemu_mutex_unlock_ramlist();
04b16653 1387
e9a1ab19
FB
1388}
1389
cd19cfa2
HY
1390#ifndef _WIN32
1391void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1392{
1393 RAMBlock *block;
1394 ram_addr_t offset;
1395 int flags;
1396 void *area, *vaddr;
1397
a3161038 1398 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1399 offset = addr - block->offset;
1400 if (offset < block->length) {
1401 vaddr = block->host + offset;
7bd4f430 1402 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1403 ;
dfeaf2ab
MA
1404 } else if (xen_enabled()) {
1405 abort();
cd19cfa2
HY
1406 } else {
1407 flags = MAP_FIXED;
1408 munmap(vaddr, length);
3435f395 1409 if (block->fd >= 0) {
cd19cfa2 1410#ifdef MAP_POPULATE
3435f395
MA
1411 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1412 MAP_PRIVATE;
fd28aa13 1413#else
3435f395 1414 flags |= MAP_PRIVATE;
cd19cfa2 1415#endif
3435f395
MA
1416 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1417 flags, block->fd, offset);
cd19cfa2 1418 } else {
2eb9fbaa
MA
1419 /*
1420 * Remap needs to match alloc. Accelerators that
1421 * set phys_mem_alloc never remap. If they did,
1422 * we'd need a remap hook here.
1423 */
1424 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1425
cd19cfa2
HY
1426 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1427 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1428 flags, -1, 0);
cd19cfa2
HY
1429 }
1430 if (area != vaddr) {
f15fbc4b
AP
1431 fprintf(stderr, "Could not remap addr: "
1432 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1433 length, addr);
1434 exit(1);
1435 }
8490fc78 1436 memory_try_enable_merging(vaddr, length);
ddb97f1d 1437 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1438 }
1439 return;
1440 }
1441 }
1442}
1443#endif /* !_WIN32 */
1444
1b5ec234
PB
1445/* Return a host pointer to ram allocated with qemu_ram_alloc.
1446 With the exception of the softmmu code in this file, this should
1447 only be used for local memory (e.g. video ram) that the device owns,
1448 and knows it isn't going to access beyond the end of the block.
1449
1450 It should not be used for general purpose DMA.
1451 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1452 */
1453void *qemu_get_ram_ptr(ram_addr_t addr)
1454{
1455 RAMBlock *block = qemu_get_ram_block(addr);
1456
0d6d3c87
PB
1457 if (xen_enabled()) {
1458 /* We need to check if the requested address is in the RAM
1459 * because we don't want to map the entire memory in QEMU.
1460 * In that case just map until the end of the page.
1461 */
1462 if (block->offset == 0) {
1463 return xen_map_cache(addr, 0, 0);
1464 } else if (block->host == NULL) {
1465 block->host =
1466 xen_map_cache(block->offset, block->length, 1);
1467 }
1468 }
1469 return block->host + (addr - block->offset);
dc828ca1
PB
1470}
1471
38bee5dc
SS
1472/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1473 * but takes a size argument */
cb85f7ab 1474static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1475{
8ab934f9
SS
1476 if (*size == 0) {
1477 return NULL;
1478 }
868bb33f 1479 if (xen_enabled()) {
e41d7c69 1480 return xen_map_cache(addr, *size, 1);
868bb33f 1481 } else {
38bee5dc
SS
1482 RAMBlock *block;
1483
a3161038 1484 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1485 if (addr - block->offset < block->length) {
1486 if (addr - block->offset + *size > block->length)
1487 *size = block->length - addr + block->offset;
1488 return block->host + (addr - block->offset);
1489 }
1490 }
1491
1492 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1493 abort();
38bee5dc
SS
1494 }
1495}
1496
7443b437
PB
1497/* Some of the softmmu routines need to translate from a host pointer
1498 (typically a TLB entry) back to a ram offset. */
1b5ec234 1499MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1500{
94a6b54f
PB
1501 RAMBlock *block;
1502 uint8_t *host = ptr;
1503
868bb33f 1504 if (xen_enabled()) {
e41d7c69 1505 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1506 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1507 }
1508
23887b79
PB
1509 block = ram_list.mru_block;
1510 if (block && block->host && host - block->host < block->length) {
1511 goto found;
1512 }
1513
a3161038 1514 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1515 /* This case append when the block is not mapped. */
1516 if (block->host == NULL) {
1517 continue;
1518 }
f471a17e 1519 if (host - block->host < block->length) {
23887b79 1520 goto found;
f471a17e 1521 }
94a6b54f 1522 }
432d268c 1523
1b5ec234 1524 return NULL;
23887b79
PB
1525
1526found:
1527 *ram_addr = block->offset + (host - block->host);
1b5ec234 1528 return block->mr;
e890261f 1529}
f471a17e 1530
a8170e5e 1531static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1532 uint64_t val, unsigned size)
9fa3e853 1533{
52159192 1534 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1535 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1536 }
0e0df1e2
AK
1537 switch (size) {
1538 case 1:
1539 stb_p(qemu_get_ram_ptr(ram_addr), val);
1540 break;
1541 case 2:
1542 stw_p(qemu_get_ram_ptr(ram_addr), val);
1543 break;
1544 case 4:
1545 stl_p(qemu_get_ram_ptr(ram_addr), val);
1546 break;
1547 default:
1548 abort();
3a7d929e 1549 }
52159192
JQ
1550 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
1551 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
f23db169
FB
1552 /* we remove the notdirty callback only if the code has been
1553 flushed */
a2cd8c85 1554 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1555 CPUArchState *env = current_cpu->env_ptr;
93afeade 1556 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1557 }
9fa3e853
FB
1558}
1559
b018ddf6
PB
1560static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1561 unsigned size, bool is_write)
1562{
1563 return is_write;
1564}
1565
0e0df1e2 1566static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1567 .write = notdirty_mem_write,
b018ddf6 1568 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1569 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1570};
1571
0f459d16 1572/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1573static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1574{
93afeade
AF
1575 CPUState *cpu = current_cpu;
1576 CPUArchState *env = cpu->env_ptr;
06d55cc1 1577 target_ulong pc, cs_base;
0f459d16 1578 target_ulong vaddr;
a1d1bb31 1579 CPUWatchpoint *wp;
06d55cc1 1580 int cpu_flags;
0f459d16 1581
ff4700b0 1582 if (cpu->watchpoint_hit) {
06d55cc1
AL
1583 /* We re-entered the check after replacing the TB. Now raise
1584 * the debug interrupt so that is will trigger after the
1585 * current instruction. */
93afeade 1586 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1587 return;
1588 }
93afeade 1589 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1590 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
b4051334
AL
1591 if ((vaddr == (wp->vaddr & len_mask) ||
1592 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28 1593 wp->flags |= BP_WATCHPOINT_HIT;
ff4700b0
AF
1594 if (!cpu->watchpoint_hit) {
1595 cpu->watchpoint_hit = wp;
239c51a5 1596 tb_check_watchpoint(cpu);
6e140f28 1597 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1598 cpu->exception_index = EXCP_DEBUG;
5638d180 1599 cpu_loop_exit(cpu);
6e140f28
AL
1600 } else {
1601 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1602 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1603 cpu_resume_from_signal(cpu, NULL);
6e140f28 1604 }
06d55cc1 1605 }
6e140f28
AL
1606 } else {
1607 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1608 }
1609 }
1610}
1611
6658ffb8
PB
1612/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1613 so these check for a hit then pass through to the normal out-of-line
1614 phys routines. */
a8170e5e 1615static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1616 unsigned size)
6658ffb8 1617{
1ec9b909
AK
1618 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1619 switch (size) {
2c17449b 1620 case 1: return ldub_phys(&address_space_memory, addr);
41701aa4 1621 case 2: return lduw_phys(&address_space_memory, addr);
fdfba1a2 1622 case 4: return ldl_phys(&address_space_memory, addr);
1ec9b909
AK
1623 default: abort();
1624 }
6658ffb8
PB
1625}
1626
a8170e5e 1627static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1628 uint64_t val, unsigned size)
6658ffb8 1629{
1ec9b909
AK
1630 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1631 switch (size) {
67364150 1632 case 1:
db3be60d 1633 stb_phys(&address_space_memory, addr, val);
67364150
MF
1634 break;
1635 case 2:
5ce5944d 1636 stw_phys(&address_space_memory, addr, val);
67364150
MF
1637 break;
1638 case 4:
ab1da857 1639 stl_phys(&address_space_memory, addr, val);
67364150 1640 break;
1ec9b909
AK
1641 default: abort();
1642 }
6658ffb8
PB
1643}
1644
1ec9b909
AK
1645static const MemoryRegionOps watch_mem_ops = {
1646 .read = watch_mem_read,
1647 .write = watch_mem_write,
1648 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1649};
6658ffb8 1650
a8170e5e 1651static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1652 unsigned len)
db7b5426 1653{
acc9d80b
JK
1654 subpage_t *subpage = opaque;
1655 uint8_t buf[4];
791af8c8 1656
db7b5426 1657#if defined(DEBUG_SUBPAGE)
016e9d62 1658 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1659 subpage, len, addr);
db7b5426 1660#endif
acc9d80b
JK
1661 address_space_read(subpage->as, addr + subpage->base, buf, len);
1662 switch (len) {
1663 case 1:
1664 return ldub_p(buf);
1665 case 2:
1666 return lduw_p(buf);
1667 case 4:
1668 return ldl_p(buf);
1669 default:
1670 abort();
1671 }
db7b5426
BS
1672}
1673
a8170e5e 1674static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1675 uint64_t value, unsigned len)
db7b5426 1676{
acc9d80b
JK
1677 subpage_t *subpage = opaque;
1678 uint8_t buf[4];
1679
db7b5426 1680#if defined(DEBUG_SUBPAGE)
016e9d62 1681 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1682 " value %"PRIx64"\n",
1683 __func__, subpage, len, addr, value);
db7b5426 1684#endif
acc9d80b
JK
1685 switch (len) {
1686 case 1:
1687 stb_p(buf, value);
1688 break;
1689 case 2:
1690 stw_p(buf, value);
1691 break;
1692 case 4:
1693 stl_p(buf, value);
1694 break;
1695 default:
1696 abort();
1697 }
1698 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1699}
1700
c353e4cc 1701static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 1702 unsigned len, bool is_write)
c353e4cc 1703{
acc9d80b 1704 subpage_t *subpage = opaque;
c353e4cc 1705#if defined(DEBUG_SUBPAGE)
016e9d62 1706 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 1707 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1708#endif
1709
acc9d80b 1710 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 1711 len, is_write);
c353e4cc
PB
1712}
1713
70c68e44
AK
1714static const MemoryRegionOps subpage_ops = {
1715 .read = subpage_read,
1716 .write = subpage_write,
c353e4cc 1717 .valid.accepts = subpage_accepts,
70c68e44 1718 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1719};
1720
c227f099 1721static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1722 uint16_t section)
db7b5426
BS
1723{
1724 int idx, eidx;
1725
1726 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1727 return -1;
1728 idx = SUBPAGE_IDX(start);
1729 eidx = SUBPAGE_IDX(end);
1730#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1731 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1732 __func__, mmio, start, end, idx, eidx, section);
db7b5426 1733#endif
db7b5426 1734 for (; idx <= eidx; idx++) {
5312bd8b 1735 mmio->sub_section[idx] = section;
db7b5426
BS
1736 }
1737
1738 return 0;
1739}
1740
acc9d80b 1741static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1742{
c227f099 1743 subpage_t *mmio;
db7b5426 1744
7267c094 1745 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1746
acc9d80b 1747 mmio->as = as;
1eec614b 1748 mmio->base = base;
2c9b15ca 1749 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
70c68e44 1750 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1751 mmio->iomem.subpage = true;
db7b5426 1752#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1753 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1754 mmio, base, TARGET_PAGE_SIZE);
db7b5426 1755#endif
b41aac4f 1756 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1757
1758 return mmio;
1759}
1760
a656e22f
PC
1761static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1762 MemoryRegion *mr)
5312bd8b 1763{
a656e22f 1764 assert(as);
5312bd8b 1765 MemoryRegionSection section = {
a656e22f 1766 .address_space = as,
5312bd8b
AK
1767 .mr = mr,
1768 .offset_within_address_space = 0,
1769 .offset_within_region = 0,
052e87b0 1770 .size = int128_2_64(),
5312bd8b
AK
1771 };
1772
53cb28cb 1773 return phys_section_add(map, &section);
5312bd8b
AK
1774}
1775
77717094 1776MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
aa102231 1777{
77717094 1778 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1779}
1780
e9179ce1
AK
1781static void io_mem_init(void)
1782{
2c9b15ca
PB
1783 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1784 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
0e0df1e2 1785 "unassigned", UINT64_MAX);
2c9b15ca 1786 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
0e0df1e2 1787 "notdirty", UINT64_MAX);
2c9b15ca 1788 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1ec9b909 1789 "watch", UINT64_MAX);
e9179ce1
AK
1790}
1791
ac1970fb 1792static void mem_begin(MemoryListener *listener)
00752703
PB
1793{
1794 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
1795 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1796 uint16_t n;
1797
a656e22f 1798 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 1799 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 1800 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 1801 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 1802 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 1803 assert(n == PHYS_SECTION_ROM);
a656e22f 1804 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 1805 assert(n == PHYS_SECTION_WATCH);
00752703 1806
9736e55b 1807 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
1808 d->as = as;
1809 as->next_dispatch = d;
1810}
1811
1812static void mem_commit(MemoryListener *listener)
ac1970fb 1813{
89ae337a 1814 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1815 AddressSpaceDispatch *cur = as->dispatch;
1816 AddressSpaceDispatch *next = as->next_dispatch;
1817
53cb28cb 1818 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 1819
0475d94f 1820 as->dispatch = next;
b41aac4f 1821
53cb28cb
MA
1822 if (cur) {
1823 phys_sections_free(&cur->map);
1824 g_free(cur);
1825 }
9affd6fc
PB
1826}
1827
1d71148e 1828static void tcg_commit(MemoryListener *listener)
50c1e149 1829{
182735ef 1830 CPUState *cpu;
117712c3
AK
1831
1832 /* since each CPU stores ram addresses in its TLB cache, we must
1833 reset the modified entries */
1834 /* XXX: slow ! */
bdc44640 1835 CPU_FOREACH(cpu) {
33bde2e1
EI
1836 /* FIXME: Disentangle the cpu.h circular files deps so we can
1837 directly get the right CPU from listener. */
1838 if (cpu->tcg_as_listener != listener) {
1839 continue;
1840 }
00c8cb0a 1841 tlb_flush(cpu, 1);
117712c3 1842 }
50c1e149
AK
1843}
1844
93632747
AK
1845static void core_log_global_start(MemoryListener *listener)
1846{
981fdf23 1847 cpu_physical_memory_set_dirty_tracking(true);
93632747
AK
1848}
1849
1850static void core_log_global_stop(MemoryListener *listener)
1851{
981fdf23 1852 cpu_physical_memory_set_dirty_tracking(false);
93632747
AK
1853}
1854
93632747 1855static MemoryListener core_memory_listener = {
93632747
AK
1856 .log_global_start = core_log_global_start,
1857 .log_global_stop = core_log_global_stop,
ac1970fb 1858 .priority = 1,
93632747
AK
1859};
1860
ac1970fb
AK
1861void address_space_init_dispatch(AddressSpace *as)
1862{
00752703 1863 as->dispatch = NULL;
89ae337a 1864 as->dispatch_listener = (MemoryListener) {
ac1970fb 1865 .begin = mem_begin,
00752703 1866 .commit = mem_commit,
ac1970fb
AK
1867 .region_add = mem_add,
1868 .region_nop = mem_add,
1869 .priority = 0,
1870 };
89ae337a 1871 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1872}
1873
83f3c251
AK
1874void address_space_destroy_dispatch(AddressSpace *as)
1875{
1876 AddressSpaceDispatch *d = as->dispatch;
1877
89ae337a 1878 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1879 g_free(d);
1880 as->dispatch = NULL;
1881}
1882
62152b8a
AK
1883static void memory_map_init(void)
1884{
7267c094 1885 system_memory = g_malloc(sizeof(*system_memory));
03f49957 1886
57271d63 1887 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 1888 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1889
7267c094 1890 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
1891 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1892 65536);
7dca8043 1893 address_space_init(&address_space_io, system_io, "I/O");
93632747 1894
f6790af6 1895 memory_listener_register(&core_memory_listener, &address_space_memory);
62152b8a
AK
1896}
1897
1898MemoryRegion *get_system_memory(void)
1899{
1900 return system_memory;
1901}
1902
309cb471
AK
1903MemoryRegion *get_system_io(void)
1904{
1905 return system_io;
1906}
1907
e2eef170
PB
1908#endif /* !defined(CONFIG_USER_ONLY) */
1909
13eb76e0
FB
1910/* physical memory access (slow version, mainly for debug) */
1911#if defined(CONFIG_USER_ONLY)
f17ec444 1912int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 1913 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1914{
1915 int l, flags;
1916 target_ulong page;
53a5960a 1917 void * p;
13eb76e0
FB
1918
1919 while (len > 0) {
1920 page = addr & TARGET_PAGE_MASK;
1921 l = (page + TARGET_PAGE_SIZE) - addr;
1922 if (l > len)
1923 l = len;
1924 flags = page_get_flags(page);
1925 if (!(flags & PAGE_VALID))
a68fe89c 1926 return -1;
13eb76e0
FB
1927 if (is_write) {
1928 if (!(flags & PAGE_WRITE))
a68fe89c 1929 return -1;
579a97f7 1930 /* XXX: this code should not depend on lock_user */
72fb7daa 1931 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1932 return -1;
72fb7daa
AJ
1933 memcpy(p, buf, l);
1934 unlock_user(p, addr, l);
13eb76e0
FB
1935 } else {
1936 if (!(flags & PAGE_READ))
a68fe89c 1937 return -1;
579a97f7 1938 /* XXX: this code should not depend on lock_user */
72fb7daa 1939 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1940 return -1;
72fb7daa 1941 memcpy(buf, p, l);
5b257578 1942 unlock_user(p, addr, 0);
13eb76e0
FB
1943 }
1944 len -= l;
1945 buf += l;
1946 addr += l;
1947 }
a68fe89c 1948 return 0;
13eb76e0 1949}
8df1cd07 1950
13eb76e0 1951#else
51d7a9eb 1952
a8170e5e
AK
1953static void invalidate_and_set_dirty(hwaddr addr,
1954 hwaddr length)
51d7a9eb 1955{
a2cd8c85 1956 if (cpu_physical_memory_is_clean(addr)) {
51d7a9eb
AP
1957 /* invalidate code */
1958 tb_invalidate_phys_page_range(addr, addr + length, 0);
1959 /* set dirty bit */
52159192
JQ
1960 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
1961 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
51d7a9eb 1962 }
e226939d 1963 xen_modified_memory(addr, length);
51d7a9eb
AP
1964}
1965
23326164 1966static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 1967{
e1622f4b 1968 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
1969
1970 /* Regions are assumed to support 1-4 byte accesses unless
1971 otherwise specified. */
23326164
RH
1972 if (access_size_max == 0) {
1973 access_size_max = 4;
1974 }
1975
1976 /* Bound the maximum access by the alignment of the address. */
1977 if (!mr->ops->impl.unaligned) {
1978 unsigned align_size_max = addr & -addr;
1979 if (align_size_max != 0 && align_size_max < access_size_max) {
1980 access_size_max = align_size_max;
1981 }
82f2563f 1982 }
23326164
RH
1983
1984 /* Don't attempt accesses larger than the maximum. */
1985 if (l > access_size_max) {
1986 l = access_size_max;
82f2563f 1987 }
098178f2
PB
1988 if (l & (l - 1)) {
1989 l = 1 << (qemu_fls(l) - 1);
1990 }
23326164
RH
1991
1992 return l;
82f2563f
PB
1993}
1994
fd8aaa76 1995bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1996 int len, bool is_write)
13eb76e0 1997{
149f54b5 1998 hwaddr l;
13eb76e0 1999 uint8_t *ptr;
791af8c8 2000 uint64_t val;
149f54b5 2001 hwaddr addr1;
5c8a00ce 2002 MemoryRegion *mr;
fd8aaa76 2003 bool error = false;
3b46e624 2004
13eb76e0 2005 while (len > 0) {
149f54b5 2006 l = len;
5c8a00ce 2007 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2008
13eb76e0 2009 if (is_write) {
5c8a00ce
PB
2010 if (!memory_access_is_direct(mr, is_write)) {
2011 l = memory_access_size(mr, l, addr1);
4917cf44 2012 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2013 potential bugs */
23326164
RH
2014 switch (l) {
2015 case 8:
2016 /* 64 bit write access */
2017 val = ldq_p(buf);
2018 error |= io_mem_write(mr, addr1, val, 8);
2019 break;
2020 case 4:
1c213d19 2021 /* 32 bit write access */
c27004ec 2022 val = ldl_p(buf);
5c8a00ce 2023 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
2024 break;
2025 case 2:
1c213d19 2026 /* 16 bit write access */
c27004ec 2027 val = lduw_p(buf);
5c8a00ce 2028 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
2029 break;
2030 case 1:
1c213d19 2031 /* 8 bit write access */
c27004ec 2032 val = ldub_p(buf);
5c8a00ce 2033 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
2034 break;
2035 default:
2036 abort();
13eb76e0 2037 }
2bbfa05d 2038 } else {
5c8a00ce 2039 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2040 /* RAM case */
5579c7f3 2041 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2042 memcpy(ptr, buf, l);
51d7a9eb 2043 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
2044 }
2045 } else {
5c8a00ce 2046 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2047 /* I/O case */
5c8a00ce 2048 l = memory_access_size(mr, l, addr1);
23326164
RH
2049 switch (l) {
2050 case 8:
2051 /* 64 bit read access */
2052 error |= io_mem_read(mr, addr1, &val, 8);
2053 stq_p(buf, val);
2054 break;
2055 case 4:
13eb76e0 2056 /* 32 bit read access */
5c8a00ce 2057 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 2058 stl_p(buf, val);
23326164
RH
2059 break;
2060 case 2:
13eb76e0 2061 /* 16 bit read access */
5c8a00ce 2062 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 2063 stw_p(buf, val);
23326164
RH
2064 break;
2065 case 1:
1c213d19 2066 /* 8 bit read access */
5c8a00ce 2067 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 2068 stb_p(buf, val);
23326164
RH
2069 break;
2070 default:
2071 abort();
13eb76e0
FB
2072 }
2073 } else {
2074 /* RAM case */
5c8a00ce 2075 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2076 memcpy(buf, ptr, l);
13eb76e0
FB
2077 }
2078 }
2079 len -= l;
2080 buf += l;
2081 addr += l;
2082 }
fd8aaa76
PB
2083
2084 return error;
13eb76e0 2085}
8df1cd07 2086
fd8aaa76 2087bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2088 const uint8_t *buf, int len)
2089{
fd8aaa76 2090 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2091}
2092
fd8aaa76 2093bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2094{
fd8aaa76 2095 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2096}
2097
2098
a8170e5e 2099void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2100 int len, int is_write)
2101{
fd8aaa76 2102 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2103}
2104
582b55a9
AG
2105enum write_rom_type {
2106 WRITE_DATA,
2107 FLUSH_CACHE,
2108};
2109
2a221651 2110static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2111 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2112{
149f54b5 2113 hwaddr l;
d0ecd2aa 2114 uint8_t *ptr;
149f54b5 2115 hwaddr addr1;
5c8a00ce 2116 MemoryRegion *mr;
3b46e624 2117
d0ecd2aa 2118 while (len > 0) {
149f54b5 2119 l = len;
2a221651 2120 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2121
5c8a00ce
PB
2122 if (!(memory_region_is_ram(mr) ||
2123 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2124 /* do nothing */
2125 } else {
5c8a00ce 2126 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2127 /* ROM/RAM case */
5579c7f3 2128 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2129 switch (type) {
2130 case WRITE_DATA:
2131 memcpy(ptr, buf, l);
2132 invalidate_and_set_dirty(addr1, l);
2133 break;
2134 case FLUSH_CACHE:
2135 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2136 break;
2137 }
d0ecd2aa
FB
2138 }
2139 len -= l;
2140 buf += l;
2141 addr += l;
2142 }
2143}
2144
582b55a9 2145/* used for ROM loading : can write in RAM and ROM */
2a221651 2146void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2147 const uint8_t *buf, int len)
2148{
2a221651 2149 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2150}
2151
2152void cpu_flush_icache_range(hwaddr start, int len)
2153{
2154 /*
2155 * This function should do the same thing as an icache flush that was
2156 * triggered from within the guest. For TCG we are always cache coherent,
2157 * so there is no need to flush anything. For KVM / Xen we need to flush
2158 * the host's instruction cache at least.
2159 */
2160 if (tcg_enabled()) {
2161 return;
2162 }
2163
2a221651
EI
2164 cpu_physical_memory_write_rom_internal(&address_space_memory,
2165 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2166}
2167
6d16c2f8 2168typedef struct {
d3e71559 2169 MemoryRegion *mr;
6d16c2f8 2170 void *buffer;
a8170e5e
AK
2171 hwaddr addr;
2172 hwaddr len;
6d16c2f8
AL
2173} BounceBuffer;
2174
2175static BounceBuffer bounce;
2176
ba223c29
AL
2177typedef struct MapClient {
2178 void *opaque;
2179 void (*callback)(void *opaque);
72cf2d4f 2180 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2181} MapClient;
2182
72cf2d4f
BS
2183static QLIST_HEAD(map_client_list, MapClient) map_client_list
2184 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2185
2186void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2187{
7267c094 2188 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2189
2190 client->opaque = opaque;
2191 client->callback = callback;
72cf2d4f 2192 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2193 return client;
2194}
2195
8b9c99d9 2196static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2197{
2198 MapClient *client = (MapClient *)_client;
2199
72cf2d4f 2200 QLIST_REMOVE(client, link);
7267c094 2201 g_free(client);
ba223c29
AL
2202}
2203
2204static void cpu_notify_map_clients(void)
2205{
2206 MapClient *client;
2207
72cf2d4f
BS
2208 while (!QLIST_EMPTY(&map_client_list)) {
2209 client = QLIST_FIRST(&map_client_list);
ba223c29 2210 client->callback(client->opaque);
34d5e948 2211 cpu_unregister_map_client(client);
ba223c29
AL
2212 }
2213}
2214
51644ab7
PB
2215bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2216{
5c8a00ce 2217 MemoryRegion *mr;
51644ab7
PB
2218 hwaddr l, xlat;
2219
2220 while (len > 0) {
2221 l = len;
5c8a00ce
PB
2222 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2223 if (!memory_access_is_direct(mr, is_write)) {
2224 l = memory_access_size(mr, l, addr);
2225 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2226 return false;
2227 }
2228 }
2229
2230 len -= l;
2231 addr += l;
2232 }
2233 return true;
2234}
2235
6d16c2f8
AL
2236/* Map a physical memory region into a host virtual address.
2237 * May map a subset of the requested range, given by and returned in *plen.
2238 * May return NULL if resources needed to perform the mapping are exhausted.
2239 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2240 * Use cpu_register_map_client() to know when retrying the map operation is
2241 * likely to succeed.
6d16c2f8 2242 */
ac1970fb 2243void *address_space_map(AddressSpace *as,
a8170e5e
AK
2244 hwaddr addr,
2245 hwaddr *plen,
ac1970fb 2246 bool is_write)
6d16c2f8 2247{
a8170e5e 2248 hwaddr len = *plen;
e3127ae0
PB
2249 hwaddr done = 0;
2250 hwaddr l, xlat, base;
2251 MemoryRegion *mr, *this_mr;
2252 ram_addr_t raddr;
6d16c2f8 2253
e3127ae0
PB
2254 if (len == 0) {
2255 return NULL;
2256 }
38bee5dc 2257
e3127ae0
PB
2258 l = len;
2259 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2260 if (!memory_access_is_direct(mr, is_write)) {
2261 if (bounce.buffer) {
2262 return NULL;
6d16c2f8 2263 }
e85d9db5
KW
2264 /* Avoid unbounded allocations */
2265 l = MIN(l, TARGET_PAGE_SIZE);
2266 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2267 bounce.addr = addr;
2268 bounce.len = l;
d3e71559
PB
2269
2270 memory_region_ref(mr);
2271 bounce.mr = mr;
e3127ae0
PB
2272 if (!is_write) {
2273 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2274 }
6d16c2f8 2275
e3127ae0
PB
2276 *plen = l;
2277 return bounce.buffer;
2278 }
2279
2280 base = xlat;
2281 raddr = memory_region_get_ram_addr(mr);
2282
2283 for (;;) {
6d16c2f8
AL
2284 len -= l;
2285 addr += l;
e3127ae0
PB
2286 done += l;
2287 if (len == 0) {
2288 break;
2289 }
2290
2291 l = len;
2292 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2293 if (this_mr != mr || xlat != base + done) {
2294 break;
2295 }
6d16c2f8 2296 }
e3127ae0 2297
d3e71559 2298 memory_region_ref(mr);
e3127ae0
PB
2299 *plen = done;
2300 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2301}
2302
ac1970fb 2303/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2304 * Will also mark the memory as dirty if is_write == 1. access_len gives
2305 * the amount of memory that was actually read or written by the caller.
2306 */
a8170e5e
AK
2307void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2308 int is_write, hwaddr access_len)
6d16c2f8
AL
2309{
2310 if (buffer != bounce.buffer) {
d3e71559
PB
2311 MemoryRegion *mr;
2312 ram_addr_t addr1;
2313
2314 mr = qemu_ram_addr_from_host(buffer, &addr1);
2315 assert(mr != NULL);
6d16c2f8 2316 if (is_write) {
6d16c2f8
AL
2317 while (access_len) {
2318 unsigned l;
2319 l = TARGET_PAGE_SIZE;
2320 if (l > access_len)
2321 l = access_len;
51d7a9eb 2322 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2323 addr1 += l;
2324 access_len -= l;
2325 }
2326 }
868bb33f 2327 if (xen_enabled()) {
e41d7c69 2328 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2329 }
d3e71559 2330 memory_region_unref(mr);
6d16c2f8
AL
2331 return;
2332 }
2333 if (is_write) {
ac1970fb 2334 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2335 }
f8a83245 2336 qemu_vfree(bounce.buffer);
6d16c2f8 2337 bounce.buffer = NULL;
d3e71559 2338 memory_region_unref(bounce.mr);
ba223c29 2339 cpu_notify_map_clients();
6d16c2f8 2340}
d0ecd2aa 2341
a8170e5e
AK
2342void *cpu_physical_memory_map(hwaddr addr,
2343 hwaddr *plen,
ac1970fb
AK
2344 int is_write)
2345{
2346 return address_space_map(&address_space_memory, addr, plen, is_write);
2347}
2348
a8170e5e
AK
2349void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2350 int is_write, hwaddr access_len)
ac1970fb
AK
2351{
2352 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2353}
2354
8df1cd07 2355/* warning: addr must be aligned */
fdfba1a2 2356static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2357 enum device_endian endian)
8df1cd07 2358{
8df1cd07 2359 uint8_t *ptr;
791af8c8 2360 uint64_t val;
5c8a00ce 2361 MemoryRegion *mr;
149f54b5
PB
2362 hwaddr l = 4;
2363 hwaddr addr1;
8df1cd07 2364
fdfba1a2 2365 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2366 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2367 /* I/O case */
5c8a00ce 2368 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2369#if defined(TARGET_WORDS_BIGENDIAN)
2370 if (endian == DEVICE_LITTLE_ENDIAN) {
2371 val = bswap32(val);
2372 }
2373#else
2374 if (endian == DEVICE_BIG_ENDIAN) {
2375 val = bswap32(val);
2376 }
2377#endif
8df1cd07
FB
2378 } else {
2379 /* RAM case */
5c8a00ce 2380 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2381 & TARGET_PAGE_MASK)
149f54b5 2382 + addr1);
1e78bcc1
AG
2383 switch (endian) {
2384 case DEVICE_LITTLE_ENDIAN:
2385 val = ldl_le_p(ptr);
2386 break;
2387 case DEVICE_BIG_ENDIAN:
2388 val = ldl_be_p(ptr);
2389 break;
2390 default:
2391 val = ldl_p(ptr);
2392 break;
2393 }
8df1cd07
FB
2394 }
2395 return val;
2396}
2397
fdfba1a2 2398uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2399{
fdfba1a2 2400 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2401}
2402
fdfba1a2 2403uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2404{
fdfba1a2 2405 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2406}
2407
fdfba1a2 2408uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2409{
fdfba1a2 2410 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2411}
2412
84b7b8e7 2413/* warning: addr must be aligned */
2c17449b 2414static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2415 enum device_endian endian)
84b7b8e7 2416{
84b7b8e7
FB
2417 uint8_t *ptr;
2418 uint64_t val;
5c8a00ce 2419 MemoryRegion *mr;
149f54b5
PB
2420 hwaddr l = 8;
2421 hwaddr addr1;
84b7b8e7 2422
2c17449b 2423 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2424 false);
2425 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2426 /* I/O case */
5c8a00ce 2427 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2428#if defined(TARGET_WORDS_BIGENDIAN)
2429 if (endian == DEVICE_LITTLE_ENDIAN) {
2430 val = bswap64(val);
2431 }
2432#else
2433 if (endian == DEVICE_BIG_ENDIAN) {
2434 val = bswap64(val);
2435 }
84b7b8e7
FB
2436#endif
2437 } else {
2438 /* RAM case */
5c8a00ce 2439 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2440 & TARGET_PAGE_MASK)
149f54b5 2441 + addr1);
1e78bcc1
AG
2442 switch (endian) {
2443 case DEVICE_LITTLE_ENDIAN:
2444 val = ldq_le_p(ptr);
2445 break;
2446 case DEVICE_BIG_ENDIAN:
2447 val = ldq_be_p(ptr);
2448 break;
2449 default:
2450 val = ldq_p(ptr);
2451 break;
2452 }
84b7b8e7
FB
2453 }
2454 return val;
2455}
2456
2c17449b 2457uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2458{
2c17449b 2459 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2460}
2461
2c17449b 2462uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2463{
2c17449b 2464 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2465}
2466
2c17449b 2467uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2468{
2c17449b 2469 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2470}
2471
aab33094 2472/* XXX: optimize */
2c17449b 2473uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
aab33094
FB
2474{
2475 uint8_t val;
2c17449b 2476 address_space_rw(as, addr, &val, 1, 0);
aab33094
FB
2477 return val;
2478}
2479
733f0b02 2480/* warning: addr must be aligned */
41701aa4 2481static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2482 enum device_endian endian)
aab33094 2483{
733f0b02
MT
2484 uint8_t *ptr;
2485 uint64_t val;
5c8a00ce 2486 MemoryRegion *mr;
149f54b5
PB
2487 hwaddr l = 2;
2488 hwaddr addr1;
733f0b02 2489
41701aa4 2490 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2491 false);
2492 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2493 /* I/O case */
5c8a00ce 2494 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2495#if defined(TARGET_WORDS_BIGENDIAN)
2496 if (endian == DEVICE_LITTLE_ENDIAN) {
2497 val = bswap16(val);
2498 }
2499#else
2500 if (endian == DEVICE_BIG_ENDIAN) {
2501 val = bswap16(val);
2502 }
2503#endif
733f0b02
MT
2504 } else {
2505 /* RAM case */
5c8a00ce 2506 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2507 & TARGET_PAGE_MASK)
149f54b5 2508 + addr1);
1e78bcc1
AG
2509 switch (endian) {
2510 case DEVICE_LITTLE_ENDIAN:
2511 val = lduw_le_p(ptr);
2512 break;
2513 case DEVICE_BIG_ENDIAN:
2514 val = lduw_be_p(ptr);
2515 break;
2516 default:
2517 val = lduw_p(ptr);
2518 break;
2519 }
733f0b02
MT
2520 }
2521 return val;
aab33094
FB
2522}
2523
41701aa4 2524uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2525{
41701aa4 2526 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2527}
2528
41701aa4 2529uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2530{
41701aa4 2531 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2532}
2533
41701aa4 2534uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2535{
41701aa4 2536 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2537}
2538
8df1cd07
FB
2539/* warning: addr must be aligned. The ram page is not masked as dirty
2540 and the code inside is not invalidated. It is useful if the dirty
2541 bits are used to track modified PTEs */
2198a121 2542void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
8df1cd07 2543{
8df1cd07 2544 uint8_t *ptr;
5c8a00ce 2545 MemoryRegion *mr;
149f54b5
PB
2546 hwaddr l = 4;
2547 hwaddr addr1;
8df1cd07 2548
2198a121 2549 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2550 true);
2551 if (l < 4 || !memory_access_is_direct(mr, true)) {
2552 io_mem_write(mr, addr1, val, 4);
8df1cd07 2553 } else {
5c8a00ce 2554 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2555 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2556 stl_p(ptr, val);
74576198
AL
2557
2558 if (unlikely(in_migration)) {
a2cd8c85 2559 if (cpu_physical_memory_is_clean(addr1)) {
74576198
AL
2560 /* invalidate code */
2561 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2562 /* set dirty bit */
52159192
JQ
2563 cpu_physical_memory_set_dirty_flag(addr1,
2564 DIRTY_MEMORY_MIGRATION);
2565 cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
74576198
AL
2566 }
2567 }
8df1cd07
FB
2568 }
2569}
2570
2571/* warning: addr must be aligned */
ab1da857
EI
2572static inline void stl_phys_internal(AddressSpace *as,
2573 hwaddr addr, uint32_t val,
1e78bcc1 2574 enum device_endian endian)
8df1cd07 2575{
8df1cd07 2576 uint8_t *ptr;
5c8a00ce 2577 MemoryRegion *mr;
149f54b5
PB
2578 hwaddr l = 4;
2579 hwaddr addr1;
8df1cd07 2580
ab1da857 2581 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2582 true);
2583 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2584#if defined(TARGET_WORDS_BIGENDIAN)
2585 if (endian == DEVICE_LITTLE_ENDIAN) {
2586 val = bswap32(val);
2587 }
2588#else
2589 if (endian == DEVICE_BIG_ENDIAN) {
2590 val = bswap32(val);
2591 }
2592#endif
5c8a00ce 2593 io_mem_write(mr, addr1, val, 4);
8df1cd07 2594 } else {
8df1cd07 2595 /* RAM case */
5c8a00ce 2596 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2597 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2598 switch (endian) {
2599 case DEVICE_LITTLE_ENDIAN:
2600 stl_le_p(ptr, val);
2601 break;
2602 case DEVICE_BIG_ENDIAN:
2603 stl_be_p(ptr, val);
2604 break;
2605 default:
2606 stl_p(ptr, val);
2607 break;
2608 }
51d7a9eb 2609 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2610 }
2611}
2612
ab1da857 2613void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2614{
ab1da857 2615 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2616}
2617
ab1da857 2618void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2619{
ab1da857 2620 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2621}
2622
ab1da857 2623void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2624{
ab1da857 2625 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2626}
2627
aab33094 2628/* XXX: optimize */
db3be60d 2629void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
aab33094
FB
2630{
2631 uint8_t v = val;
db3be60d 2632 address_space_rw(as, addr, &v, 1, 1);
aab33094
FB
2633}
2634
733f0b02 2635/* warning: addr must be aligned */
5ce5944d
EI
2636static inline void stw_phys_internal(AddressSpace *as,
2637 hwaddr addr, uint32_t val,
1e78bcc1 2638 enum device_endian endian)
aab33094 2639{
733f0b02 2640 uint8_t *ptr;
5c8a00ce 2641 MemoryRegion *mr;
149f54b5
PB
2642 hwaddr l = 2;
2643 hwaddr addr1;
733f0b02 2644
5ce5944d 2645 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 2646 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2647#if defined(TARGET_WORDS_BIGENDIAN)
2648 if (endian == DEVICE_LITTLE_ENDIAN) {
2649 val = bswap16(val);
2650 }
2651#else
2652 if (endian == DEVICE_BIG_ENDIAN) {
2653 val = bswap16(val);
2654 }
2655#endif
5c8a00ce 2656 io_mem_write(mr, addr1, val, 2);
733f0b02 2657 } else {
733f0b02 2658 /* RAM case */
5c8a00ce 2659 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2660 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2661 switch (endian) {
2662 case DEVICE_LITTLE_ENDIAN:
2663 stw_le_p(ptr, val);
2664 break;
2665 case DEVICE_BIG_ENDIAN:
2666 stw_be_p(ptr, val);
2667 break;
2668 default:
2669 stw_p(ptr, val);
2670 break;
2671 }
51d7a9eb 2672 invalidate_and_set_dirty(addr1, 2);
733f0b02 2673 }
aab33094
FB
2674}
2675
5ce5944d 2676void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2677{
5ce5944d 2678 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2679}
2680
5ce5944d 2681void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2682{
5ce5944d 2683 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2684}
2685
5ce5944d 2686void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2687{
5ce5944d 2688 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2689}
2690
aab33094 2691/* XXX: optimize */
f606604f 2692void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
aab33094
FB
2693{
2694 val = tswap64(val);
f606604f 2695 address_space_rw(as, addr, (void *) &val, 8, 1);
aab33094
FB
2696}
2697
f606604f 2698void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2699{
2700 val = cpu_to_le64(val);
f606604f 2701 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2702}
2703
f606604f 2704void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2705{
2706 val = cpu_to_be64(val);
f606604f 2707 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2708}
2709
5e2972fd 2710/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2711int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2712 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2713{
2714 int l;
a8170e5e 2715 hwaddr phys_addr;
9b3c35e0 2716 target_ulong page;
13eb76e0
FB
2717
2718 while (len > 0) {
2719 page = addr & TARGET_PAGE_MASK;
f17ec444 2720 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2721 /* if no physical page mapped, return an error */
2722 if (phys_addr == -1)
2723 return -1;
2724 l = (page + TARGET_PAGE_SIZE) - addr;
2725 if (l > len)
2726 l = len;
5e2972fd 2727 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
2728 if (is_write) {
2729 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2730 } else {
2731 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2732 }
13eb76e0
FB
2733 len -= l;
2734 buf += l;
2735 addr += l;
2736 }
2737 return 0;
2738}
a68fe89c 2739#endif
13eb76e0 2740
8e4a424b
BS
2741#if !defined(CONFIG_USER_ONLY)
2742
2743/*
2744 * A helper function for the _utterly broken_ virtio device model to find out if
2745 * it's running on a big endian machine. Don't do this at home kids!
2746 */
2747bool virtio_is_big_endian(void);
2748bool virtio_is_big_endian(void)
2749{
2750#if defined(TARGET_WORDS_BIGENDIAN)
2751 return true;
2752#else
2753 return false;
2754#endif
2755}
2756
2757#endif
2758
76f35538 2759#ifndef CONFIG_USER_ONLY
a8170e5e 2760bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2761{
5c8a00ce 2762 MemoryRegion*mr;
149f54b5 2763 hwaddr l = 1;
76f35538 2764
5c8a00ce
PB
2765 mr = address_space_translate(&address_space_memory,
2766 phys_addr, &phys_addr, &l, false);
76f35538 2767
5c8a00ce
PB
2768 return !(memory_region_is_ram(mr) ||
2769 memory_region_is_romd(mr));
76f35538 2770}
bd2fa51f
MH
2771
2772void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2773{
2774 RAMBlock *block;
2775
2776 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2777 func(block->host, block->offset, block->length, opaque);
2778 }
2779}
ec3f8c99 2780#endif
This page took 1.326017 seconds and 4 git commands to generate.